Commit 61bd1e85 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6: (53 commits)
  [SCSI] libosd: OSD2r05: on-the-wire changes for latest OSD2 revision 5.
  [SCSI] libosd: OSD2r05: OSD_CRYPTO_KEYID_SIZE will grow 20 => 32 bytes
  [SCSI] libosd: OSD2r05: Prepare for rev5 attribute list changes
  [SCSI] libosd: fix potential ERR_PTR dereference in osd_initiator.c
  [SCSI] mpt2sas : bump driver version to 01.100.02.00
  [SCSI] mpt2sas: fix hotplug event processing
  [SCSI] mpt2sas : release diagnotic buffers prior host reset
  [SCSI] mpt2sas : Broadcast Primative AEN bug fix
  [SCSI] mpt2sas : Identify Dell series-7 adapters at driver load time
  [SCSI] mpt2sas : driver name needs to be in the MPT2IOCINFO ioctl
  [SCSI] mpt2sas : running out of message frames
  [SCSI] mpt2sas : fix oops when firmware sends large sense buffer size
  [SCSI] mpt2sas : the sanity check in base_interrupt needs to be on dword boundary
  [SCSI] mpt2sas : unique ioctl magic number
  [SCSI] fix sign extension with 1.5TB usb-storage LBD=y
  [SCSI] ipr: Fix sleeping function called with interrupts disabled
  [SCSI] fcoe: fip: add multicast filter to receive FIP advertisements.
  [SCSI] libfc: Fix compilation warnings with allmodconfig
  [SCSI] fcoe: fix spelling typos and bad comments
  [SCSI] fcoe: don't export functions that are internal to fcoe
  ...
parents 8c0c3f7f e9da4d7f
......@@ -599,6 +599,7 @@ static struct scsi_host_template iscsi_iser_sht = {
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler= iscsi_eh_device_reset,
.eh_target_reset_handler= iscsi_eh_target_reset,
.target_alloc = iscsi_target_alloc,
.use_clustering = DISABLE_CLUSTERING,
.proc_name = "iscsi_iser",
.this_id = -1,
......
......@@ -97,9 +97,7 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
ccw_device_set_online(adapter->ccw_device);
zfcp_erp_wait(adapter);
wait_event(adapter->erp_done_wqh,
!(atomic_read(&unit->status) &
ZFCP_STATUS_UNIT_SCSI_WORK_PENDING));
flush_work(&unit->scsi_work);
down(&zfcp_data.config_sema);
zfcp_unit_put(unit);
......@@ -279,6 +277,7 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
atomic_set(&unit->refcount, 0);
init_waitqueue_head(&unit->remove_wq);
INIT_WORK(&unit->scsi_work, zfcp_scsi_scan);
unit->port = port;
unit->fcp_lun = fcp_lun;
......@@ -525,6 +524,8 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
zfcp_fc_nameserver_init(adapter);
if (!zfcp_adapter_scsi_register(adapter))
return 0;
......@@ -553,7 +554,6 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
cancel_work_sync(&adapter->scan_work);
cancel_work_sync(&adapter->stat_work);
cancel_delayed_work_sync(&adapter->nsp.work);
zfcp_adapter_scsi_unregister(adapter);
sysfs_remove_group(&adapter->ccw_device->dev.kobj,
&zfcp_sysfs_adapter_attrs);
......@@ -671,8 +671,7 @@ void zfcp_port_dequeue(struct zfcp_port *port)
list_del(&port->list);
write_unlock_irq(&zfcp_data.config_lock);
if (port->rport)
fc_remote_port_delete(port->rport);
port->rport = NULL;
port->rport->dd_data = NULL;
zfcp_adapter_put(port->adapter);
sysfs_remove_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs);
device_unregister(&port->sysfs_device);
......
......@@ -108,7 +108,6 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
/* initialize request counter */
BUG_ON(!zfcp_reqlist_isempty(adapter));
adapter->req_no = 0;
zfcp_fc_nameserver_init(adapter);
zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL,
ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
......
......@@ -4,7 +4,7 @@
* Userspace interface for accessing the
* Access Control Lists / Control File Data Channel
*
* Copyright IBM Corporation 2008
* Copyright IBM Corporation 2008, 2009
*/
#define KMSG_COMPONENT "zfcp"
......@@ -197,6 +197,7 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
retval = -ENXIO;
goto free_buffer;
}
zfcp_adapter_get(adapter);
retval = zfcp_cfdc_sg_setup(data->command, fsf_cfdc->sg,
data_user->control_file);
......
......@@ -255,7 +255,6 @@ enum zfcp_wka_status {
/* logical unit status */
#define ZFCP_STATUS_UNIT_SHARED 0x00000004
#define ZFCP_STATUS_UNIT_READONLY 0x00000008
#define ZFCP_STATUS_UNIT_SCSI_WORK_PENDING 0x00000020
/* FSF request status (this does not have a common part) */
#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002
......@@ -530,6 +529,7 @@ struct zfcp_unit {
struct zfcp_erp_action erp_action; /* pending error recovery */
atomic_t erp_counter;
struct zfcp_latencies latencies;
struct work_struct scsi_work;
};
/* FSF request */
......
......@@ -719,6 +719,7 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
zfcp_qdio_close(adapter);
zfcp_fsf_req_dismiss_all(adapter);
adapter->fsf_req_seq_no = 0;
zfcp_fc_wka_port_force_offline(&adapter->nsp);
/* all ports and units are closed */
zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL,
ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
......@@ -1176,48 +1177,6 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
}
}
struct zfcp_erp_add_work {
struct zfcp_unit *unit;
struct work_struct work;
};
static void zfcp_erp_scsi_scan(struct work_struct *work)
{
struct zfcp_erp_add_work *p =
container_of(work, struct zfcp_erp_add_work, work);
struct zfcp_unit *unit = p->unit;
struct fc_rport *rport = unit->port->rport;
if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
scsilun_to_int((struct scsi_lun *)&unit->fcp_lun), 0);
atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
zfcp_unit_put(unit);
wake_up(&unit->port->adapter->erp_done_wqh);
kfree(p);
}
static void zfcp_erp_schedule_work(struct zfcp_unit *unit)
{
struct zfcp_erp_add_work *p;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
dev_err(&unit->port->adapter->ccw_device->dev,
"Registering unit 0x%016Lx on port 0x%016Lx failed\n",
(unsigned long long)unit->fcp_lun,
(unsigned long long)unit->port->wwpn);
return;
}
zfcp_unit_get(unit);
atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
INIT_WORK(&p->work, zfcp_erp_scsi_scan);
p->unit = unit;
if (!queue_work(zfcp_data.work_queue, &p->work))
zfcp_unit_put(unit);
}
static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
{
struct zfcp_adapter *adapter = act->adapter;
......@@ -1226,11 +1185,11 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
switch (act->action) {
case ZFCP_ERP_ACTION_REOPEN_UNIT:
flush_work(&port->rport_work);
if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) {
if (!(atomic_read(&unit->status) &
ZFCP_STATUS_UNIT_SCSI_WORK_PENDING))
zfcp_erp_schedule_work(unit);
zfcp_unit_get(unit);
if (scsi_queue_work(unit->port->adapter->scsi_host,
&unit->scsi_work) <= 0)
zfcp_unit_put(unit);
}
zfcp_unit_put(unit);
break;
......@@ -1352,6 +1311,11 @@ static int zfcp_erp_thread(void *data)
while (!(atomic_read(&adapter->status) &
ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL)) {
zfcp_rec_dbf_event_thread_lock("erthrd1", adapter);
ignore = down_interruptible(&adapter->erp_ready_sem);
zfcp_rec_dbf_event_thread_lock("erthrd2", adapter);
write_lock_irqsave(&adapter->erp_lock, flags);
next = adapter->erp_ready_head.next;
write_unlock_irqrestore(&adapter->erp_lock, flags);
......@@ -1363,10 +1327,6 @@ static int zfcp_erp_thread(void *data)
if (zfcp_erp_strategy(act) != ZFCP_ERP_DISMISSED)
zfcp_erp_wakeup(adapter);
}
zfcp_rec_dbf_event_thread_lock("erthrd1", adapter);
ignore = down_interruptible(&adapter->erp_ready_sem);
zfcp_rec_dbf_event_thread_lock("erthrd2", adapter);
}
atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
......
......@@ -106,6 +106,7 @@ extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
extern void zfcp_test_link(struct zfcp_port *);
extern void zfcp_fc_link_test_work(struct work_struct *);
extern void zfcp_fc_nameserver_init(struct zfcp_adapter *);
extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *);
/* zfcp_fsf.c */
extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
......@@ -158,6 +159,7 @@ extern void zfcp_scsi_rport_work(struct work_struct *);
extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *);
extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
extern void zfcp_scsi_scan(struct work_struct *);
/* zfcp_sysfs.c */
extern struct attribute_group zfcp_sysfs_unit_attrs;
......
......@@ -98,13 +98,6 @@ static void zfcp_wka_port_offline(struct work_struct *work)
struct zfcp_wka_port *wka_port =
container_of(dw, struct zfcp_wka_port, work);
/* Don't wait forvever. If the wka_port is too busy take it offline
through a new call later */
if (!wait_event_timeout(wka_port->completion_wq,
atomic_read(&wka_port->refcount) == 0,
HZ >> 1))
return;
mutex_lock(&wka_port->mutex);
if ((atomic_read(&wka_port->refcount) != 0) ||
(wka_port->status != ZFCP_WKA_PORT_ONLINE))
......@@ -142,6 +135,14 @@ void zfcp_fc_nameserver_init(struct zfcp_adapter *adapter)
INIT_DELAYED_WORK(&wka_port->work, zfcp_wka_port_offline);
}
void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka)
{
cancel_delayed_work_sync(&wka->work);
mutex_lock(&wka->mutex);
wka->status = ZFCP_WKA_PORT_OFFLINE;
mutex_unlock(&wka->mutex);
}
static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
struct fcp_rscn_element *elem)
{
......@@ -372,7 +373,8 @@ static void zfcp_fc_adisc_handler(unsigned long data)
if (adisc->els.status) {
/* request rejected or timed out */
zfcp_erp_port_forced_reopen(port, 0, "fcadh_1", NULL);
zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
"fcadh_1", NULL);
goto out;
}
......@@ -431,11 +433,6 @@ void zfcp_fc_link_test_work(struct work_struct *work)
container_of(work, struct zfcp_port, test_link_work);
int retval;
if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_UNBLOCKED)) {
zfcp_port_put(port);
return; /* port erp is running and will update rport status */
}
zfcp_port_get(port);
port->rport_task = RPORT_DEL;
zfcp_scsi_rport_work(&port->rport_work);
......@@ -542,6 +539,9 @@ static void zfcp_validate_port(struct zfcp_port *port)
{
struct zfcp_adapter *adapter = port->adapter;
if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
return;
atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
if ((port->supported_classes != 0) ||
......@@ -602,10 +602,8 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries)
if (acc->wwpn == fc_host_port_name(adapter->scsi_host))
continue;
port = zfcp_get_port_by_wwpn(adapter, acc->wwpn);
if (port) {
zfcp_port_get(port);
if (port)
continue;
}
port = zfcp_port_enqueue(adapter, acc->wwpn,
ZFCP_STATUS_COMMON_NOESC, d_id);
......@@ -637,7 +635,8 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
max_entries = chain ? ZFCP_GPN_FT_MAX_ENTRIES : ZFCP_GPN_FT_ENTRIES;
max_bytes = chain ? ZFCP_GPN_FT_MAX_SIZE : ZFCP_CT_SIZE_ONE_PAGE;
if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT)
if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
return 0;
ret = zfcp_wka_port_get(&adapter->nsp);
......
......@@ -172,12 +172,16 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id,
struct fsf_link_down_info *link_down)
{
struct zfcp_adapter *adapter = req->adapter;
unsigned long flags;
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
return;
atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
read_lock_irqsave(&zfcp_data.config_lock, flags);
zfcp_scsi_schedule_rports_block(adapter);
read_unlock_irqrestore(&zfcp_data.config_lock, flags);
if (!link_down)
goto out;
......@@ -645,30 +649,30 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
}
}
static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter)
__releases(&adapter->req_q_lock)
__acquires(&adapter->req_q_lock)
static int zfcp_fsf_sbal_check(struct zfcp_adapter *adapter)
{
struct zfcp_qdio_queue *req_q = &adapter->req_q;
long ret;
if (atomic_read(&req_q->count) <= -REQUEST_LIST_SIZE)
return -EIO;
if (atomic_read(&req_q->count) > 0)
return 0;
spin_lock_bh(&adapter->req_q_lock);
if (atomic_read(&req_q->count))
return 1;
spin_unlock_bh(&adapter->req_q_lock);
return 0;
}
static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter)
{
long ret;
atomic_dec(&req_q->count);
spin_unlock_bh(&adapter->req_q_lock);
ret = wait_event_interruptible_timeout(adapter->request_wq,
atomic_read(&req_q->count) >= 0,
5 * HZ);
spin_lock_bh(&adapter->req_q_lock);
atomic_inc(&req_q->count);
zfcp_fsf_sbal_check(adapter), 5 * HZ);
if (ret > 0)
return 0;
if (!ret)
atomic_inc(&adapter->qdio_outb_full);
spin_lock_bh(&adapter->req_q_lock);
return -EIO;
}
......@@ -766,8 +770,9 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter,
static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
unsigned long flags;
int idx;
unsigned long flags;
int idx;
int with_qtcb = (req->qtcb != NULL);
/* put allocated FSF request into hash table */
spin_lock_irqsave(&adapter->req_list_lock, flags);
......@@ -789,7 +794,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
}
/* Don't increase for unsolicited status */
if (req->qtcb)
if (with_qtcb)
adapter->fsf_req_seq_no++;
adapter->req_no++;
......@@ -1253,13 +1258,13 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
spin_lock_bh(&adapter->req_q_lock);
if (zfcp_fsf_req_sbal_get(adapter))
goto out;
goto out_unlock;
req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA,
0, NULL);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
goto out_unlock;
}
sbale = zfcp_qdio_sbale_req(req);
......@@ -1278,14 +1283,16 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
out:
spin_unlock_bh(&adapter->req_q_lock);
if (!retval)
wait_event(req->completion_wq,
req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
zfcp_fsf_req_free(req);
return retval;
out_unlock:
spin_unlock_bh(&adapter->req_q_lock);
return retval;
}
......@@ -1352,13 +1359,13 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
spin_lock_bh(&adapter->req_q_lock);
if (zfcp_fsf_req_sbal_get(adapter))
goto out;
goto out_unlock;
req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0,
NULL);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
goto out_unlock;
}
if (data)
......@@ -1371,14 +1378,18 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
req->handler = zfcp_fsf_exchange_port_data_handler;
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
out:
spin_unlock_bh(&adapter->req_q_lock);
if (!retval)
wait_event(req->completion_wq,
req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
zfcp_fsf_req_free(req);
return retval;
out_unlock:
spin_unlock_bh(&adapter->req_q_lock);
return retval;
}
static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
......@@ -2472,8 +2483,6 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
{
if (req->qtcb->header.fsf_status != FSF_GOOD)
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
}
/**
......
......@@ -171,7 +171,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
write_unlock_irqrestore(&adapter->abort_lock, flags);
zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL,
old_req_id);
return SUCCESS;
return FAILED; /* completion could be in progress */
}
old_req->data = NULL;
......@@ -486,10 +486,12 @@ static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
*/
static void zfcp_scsi_dev_loss_tmo_callbk(struct fc_rport *rport)
{
struct zfcp_port *port = rport->dd_data;
struct zfcp_port *port;
write_lock_irq(&zfcp_data.config_lock);
port->rport = NULL;
port = rport->dd_data;
if (port)
port->rport = NULL;
write_unlock_irq(&zfcp_data.config_lock);
}
......@@ -503,9 +505,18 @@ static void zfcp_scsi_dev_loss_tmo_callbk(struct fc_rport *rport)
*/
static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
{
struct zfcp_port *port = rport->dd_data;
struct zfcp_port *port;
write_lock_irq(&zfcp_data.config_lock);
port = rport->dd_data;
if (port)
zfcp_port_get(port);
write_unlock_irq(&zfcp_data.config_lock);
zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL);
if (port) {
zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL);
zfcp_port_put(port);
}
}
static void zfcp_scsi_rport_register(struct zfcp_port *port)
......@@ -534,8 +545,10 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
static void zfcp_scsi_rport_block(struct zfcp_port *port)
{
if (port->rport)
fc_remote_port_delete(port->rport);
struct fc_rport *rport = port->rport;
if (rport)
fc_remote_port_delete(rport);
}
void zfcp_scsi_schedule_rport_register(struct zfcp_port *port)
......@@ -583,6 +596,23 @@ void zfcp_scsi_rport_work(struct work_struct *work)
}
void zfcp_scsi_scan(struct work_struct *work)
{
struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
scsi_work);
struct fc_rport *rport;
flush_work(&unit->port->rport_work);
rport = unit->port->rport;
if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
scsilun_to_int((struct scsi_lun *)
&unit->fcp_lun), 0);
zfcp_unit_put(unit);
}
struct fc_function_template zfcp_transport_functions = {
.show_starget_port_id = 1,
.show_starget_port_name = 1,
......
......@@ -254,12 +254,21 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
write_lock_irq(&zfcp_data.config_lock);
unit = zfcp_get_unit_by_lun(port, fcp_lun);
if (unit && (atomic_read(&unit->refcount) == 0)) {
zfcp_unit_get(unit);
atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
list_move(&unit->list, &unit_remove_lh);
} else
unit = NULL;
if (unit) {
write_unlock_irq(&zfcp_data.config_lock);
/* wait for possible timeout during SCSI probe */
flush_work(&unit->scsi_work);
write_lock_irq(&zfcp_data.config_lock);
if (atomic_read(&unit->refcount) == 0) {
zfcp_unit_get(unit);
atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE,
&unit->status);
list_move(&unit->list, &unit_remove_lh);
} else {
unit = NULL;
}
}
write_unlock_irq(&zfcp_data.config_lock);
......
......@@ -34,7 +34,7 @@
#include "cxgb3i_offload.h"
#include "cxgb3i_ddp.h"
#define CXGB3I_SCSI_QDEPTH_DFLT 128
#define CXGB3I_SCSI_HOST_QDEPTH 1024
#define CXGB3I_MAX_TARGET CXGB3I_MAX_CONN
#define CXGB3I_MAX_LUN 512
#define ISCSI_PDU_NONPAYLOAD_MAX \
......
......@@ -120,20 +120,26 @@ static void clear_ddp_map(struct cxgb3i_ddp_info *ddp, unsigned int tag,
}
static inline int ddp_find_unused_entries(struct cxgb3i_ddp_info *ddp,
int start, int max, int count,
unsigned int start, unsigned int max,
unsigned int count,
struct cxgb3i_gather_list *gl)
{
unsigned int i, j;
unsigned int i, j, k;
/* not enough entries */
if ((max - start) < count)
return -EBUSY;
max -= count;
spin_lock(&ddp->map_lock);
for (i = start; i <= max;) {
for (j = 0; j < count; j++) {
if (ddp->gl_map[i + j])
for (i = start; i < max;) {
for (j = 0, k = i; j < count; j++, k++) {
if (ddp->gl_map[k])
break;
}
if (j == count) {
for (j = 0; j < count; j++)
ddp->gl_map[i + j] = gl;
for (j = 0, k = i; j < count; j++, k++)
ddp->gl_map[k] = gl;
spin_unlock(&ddp->map_lock);
return i;
}
......@@ -354,7 +360,7 @@ int cxgb3i_ddp_tag_reserve(struct t3cdev *tdev, unsigned int tid,
struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
struct pagepod_hdr hdr;
unsigned int npods;
int idx = -1, idx_max;
int idx = -1;
int err = -ENOMEM;
u32 sw_tag = *tagp;
u32 tag;
......@@ -367,17 +373,17 @@ int cxgb3i_ddp_tag_reserve(struct t3cdev *tdev, unsigned int tid,
}
npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
idx_max = ddp->nppods - npods + 1;
if (ddp->idx_last == ddp->nppods)
idx = ddp_find_unused_entries(ddp, 0, idx_max, npods, gl);
idx = ddp_find_unused_entries(ddp, 0, ddp->nppods, npods, gl);
else {
idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1,
idx_max, npods, gl);
if (idx < 0 && ddp->idx_last >= npods)
ddp->nppods, npods, gl);
if (idx < 0 && ddp->idx_last >= npods) {
idx = ddp_find_unused_entries(ddp, 0,
ddp->idx_last - npods + 1,
min(ddp->idx_last + npods, ddp->nppods),
npods, gl);
}
}
if (idx < 0) {
ddp_log_debug("xferlen %u, gl %u, npods %u NO DDP.\n",
......
......@@ -876,13 +876,14 @@ static struct scsi_host_template cxgb3i_host_template = {
.proc_name = "cxgb3i",
.queuecommand = iscsi_queuecommand,
.change_queue_depth = iscsi_change_queue_depth,
.can_queue = CXGB3I_SCSI_QDEPTH_DFLT - 1,
.can_queue = CXGB3I_SCSI_HOST_QDEPTH,
.sg_tablesize = SG_ALL,
.max_sectors = 0xFFFF,
.cmd_per_lun = CXGB3I_SCSI_QDEPTH_DFLT,
.cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler = iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_target_reset,
.target_alloc = iscsi_target_alloc,
.use_clustering = DISABLE_CLUSTERING,
.this_id = -1,
};
......
......@@ -1737,7 +1737,7 @@ int cxgb3i_c3cn_send_pdus(struct s3_conn *c3cn, struct sk_buff *skb)
c3cn_tx_debug("c3cn 0x%p, snd %u - %u > %u.\n",
c3cn, c3cn->write_seq, c3cn->snd_una,
cxgb3_snd_win);
err = -EAGAIN;
err = -ENOBUFS;
goto out_err;
}
......@@ -1775,6 +1775,8 @@ int cxgb3i_c3cn_send_pdus(struct s3_conn *c3cn, struct sk_buff *skb)
out_err:
if (copied == 0 && err == -EPIPE)
copied = c3cn->err ? c3cn->err : -EPIPE;
else
copied = err;
goto done;
}
......
......@@ -400,17 +400,18 @@ int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
return 0;
}
if (err < 0 && err != -EAGAIN) {
kfree_skb(skb);
cxgb3i_tx_debug("itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
task->itt, skb, skb->len, skb->data_len, err);
iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
if (err == -EAGAIN || err == -ENOBUFS) {
/* reset skb to send when we are called again */
tdata->skb = skb;
return err;
}
/* reset skb to send when we are called again */
tdata->skb = skb;
return -EAGAIN;
kfree_skb(skb);
cxgb3i_tx_debug("itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
task->itt, skb, skb->len, skb->data_len, err);
iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
return err;
}
int cxgb3i_pdu_init(void)
......
......@@ -57,7 +57,7 @@ DEFINE_RWLOCK(fcoe_hostlist_lock);
DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
/* Function Prototyes */
/* Function Prototypes */
static int fcoe_reset(struct Scsi_Host *shost);
static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
static int fcoe_rcv(struct sk_buff *, struct net_device *,
......@@ -138,7 +138,6 @@ static struct scsi_host_template fcoe_shost_template = {
/**
* fcoe_lport_config() - sets up the fc_lport
* @lp: ptr to the fc_lport
* @shost: ptr to the parent scsi host
*
* Returns: 0 for success
*/
......@@ -256,6 +255,7 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
rtnl_lock();
memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
dev_unicast_add(fc->real_dev, flogi_maddr, ETH_ALEN);
dev_mc_add(fc->real_dev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
rtnl_unlock();
/*
......@@ -380,7 +380,7 @@ static int fcoe_if_destroy(struct net_device *netdev)
dev_mc_delete(fc->real_dev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
rtnl_unlock();
/* Free the per-CPU revieve threads */
/* Free the per-CPU receive threads */
fcoe_percpu_clean(lp);
/* Free existing skbs */
......@@ -720,7 +720,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
}
#else
/*
* This a non-SMP scenario where the singluar Rx thread is
* This a non-SMP scenario where the singular Rx thread is
* being removed. Free all skbs and stop the thread.
*/
spin_lock_bh(&p->fcoe_rx_list.lock);
......@@ -777,7 +777,7 @@ static struct notifier_block fcoe_cpu_notifier = {
* @skb: the receive skb
* @dev: associated net device
* @ptype: context
* @odldev: last device
* @olddev: last device
*
* this function will receive the packet and build fc frame and pass it up
*
......@@ -884,7 +884,6 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
kfree_skb(skb);
return -1;
}
EXPORT_SYMBOL_GPL(fcoe_rcv);
/**
* fcoe_start_io() - pass to netdev to start xmit for fcoe
......@@ -905,7 +904,7 @@ static inline int fcoe_start_io(struct sk_buff *skb)
}
/**
* fcoe_get_paged_crc_eof() - in case we need alloc a page for crc_eof
* fcoe_get_paged_crc_eof() - in case we need to alloc a page for crc_eof
* @skb: the skb to be xmitted
* @tlen: total len
*
......@@ -947,7 +946,7 @@ static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
/**
* fcoe_fc_crc() - calculates FC CRC in this fcoe skb
* @fp: the fc_frame containg data to be checksummed
* @fp: the fc_frame containing data to be checksummed
*
* This uses crc32() to calculate the crc for fc frame
* Return : 32 bit crc
......@@ -1011,7 +1010,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
wlen = skb->len / FCOE_WORD_TO_BYTE;
if (!lp->link_up) {
kfree(skb);
kfree_skb(skb);
return 0;
}
......@@ -1062,7 +1061,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
cp = NULL;
}
/* adjust skb netowrk/transport offsets to match mac/fcoe/fc */
/* adjust skb network/transport offsets to match mac/fcoe/fc */
skb_push(skb, elen + hlen);
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
......@@ -1123,7 +1122,6 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
return 0;
}
EXPORT_SYMBOL_GPL(fcoe_xmit);
/**
* fcoe_percpu_receive_thread() - recv thread per cpu
......@@ -1296,17 +1294,16 @@ void fcoe_watchdog(ulong vp)
/**
* fcoe_check_wait_queue() - put the skb into fcoe pending xmit queue
* @lp: the fc_port for this skb
* @skb: the associated skb to be xmitted
* fcoe_check_wait_queue() - attempt to clear the transmit backlog
* @lp: the fc_lport
*
* This empties the wait_queue, dequeue the head of the wait_queue queue
* and calls fcoe_start_io() for each packet, if all skb have been
* transmitted, return qlen or -1 if a error occurs, then restore
* wait_queue and try again later.
* wait_queue and try again later.
*
* The wait_queue is used when the skb transmit fails. skb will go
* in the wait_queue which will be emptied by the time function OR
* in the wait_queue which will be emptied by the timer function or
* by the next skb transmit.
*
* Returns: 0 for success
......@@ -1355,10 +1352,6 @@ static int fcoe_check_wait_queue(struct fc_lport *lp)
*/
static void fcoe_dev_setup()
{
/*
* here setup a interface specific wd time to
* monitor the link state
*/
register_netdevice_notifier(&fcoe_notifier);
}
......@@ -1437,10 +1430,9 @@ static int fcoe_device_notification(struct notifier_block *notifier,
/**
* fcoe_if_to_netdev() - parse a name buffer to get netdev
* @ifname: fixed array for output parsed ifname
* @buffer: incoming buffer to be copied
*
* Returns: NULL or ptr to netdeive
* Returns: NULL or ptr to net_device
*/
static struct net_device *fcoe_if_to_netdev(const char *buffer)
{
......@@ -1458,7 +1450,7 @@ static struct net_device *fcoe_if_to_netdev(const char *buffer)
}
/**
* fcoe_netdev_to_module_owner() - finds out the nic drive moddule of the netdev
* fcoe_netdev_to_module_owner() - finds out the driver module of the netdev
* @netdev: the target netdev
*
* Returns: ptr to the struct module, NULL for failure
......@@ -1488,7 +1480,7 @@ fcoe_netdev_to_module_owner(const struct net_device *netdev)
* Holds the Ethernet driver module by try_module_get() for
* the corresponding netdev.
*
* Returns: 0 for succsss
* Returns: 0 for success
*/
static int fcoe_ethdrv_get(const struct net_device *netdev)
{
......@@ -1510,7 +1502,7 @@ static int fcoe_ethdrv_get(const struct net_device *netdev)
* Releases the Ethernet driver module by module_put for
* the corresponding netdev.
*
* Returns: 0 for succsss
* Returns: 0 for success
*/
static int fcoe_ethdrv_put(const struct net_device *netdev)
{
......@@ -1528,7 +1520,7 @@ static int fcoe_ethdrv_put(const struct net_device *netdev)
/**
* fcoe_destroy() - handles the destroy from sysfs
* @buffer: expcted to be a eth if name
* @buffer: expected to be an eth if name
* @kp: associated kernel param
*
* Returns: 0 for success
......@@ -1565,7 +1557,7 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
/**
* fcoe_create() - Handles the create call from sysfs
* @buffer: expcted to be a eth if name
* @buffer: expected to be an eth if name
* @kp: associated kernel param
*
* Returns: 0 for success
......@@ -1652,7 +1644,6 @@ int fcoe_link_ok(struct fc_lport *lp)
return rc;
}
EXPORT_SYMBOL_GPL(fcoe_link_ok);
/**
* fcoe_percpu_clean() - Clear the pending skbs for an lport
......@@ -1684,7 +1675,6 @@ void fcoe_percpu_clean(struct fc_lport *lp)
spin_unlock_bh(&pp->fcoe_rx_list.lock);
}
}
EXPORT_SYMBOL_GPL(fcoe_percpu_clean);
/**
* fcoe_clean_pending_queue() - Dequeue a skb and free it
......@@ -1705,7 +1695,6 @@ void fcoe_clean_pending_queue(struct fc_lport *lp)
}
spin_unlock_bh(&fc->fcoe_pending_queue.lock);
}
EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue);
/**
* fcoe_reset() - Resets the fcoe
......@@ -1719,11 +1708,10 @@ int fcoe_reset(struct Scsi_Host *shost)
fc_lport_reset(lport);
return 0;
}
EXPORT_SYMBOL_GPL(fcoe_reset);
/**
* fcoe_hostlist_lookup_softc() - find the corresponding lport by a given device
* @device: this is currently ptr to net_device
* @dev: this is currently ptr to net_device
*
* Returns: NULL or the located fcoe_softc
*/
......@@ -1757,11 +1745,10 @@ struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
return (fc) ? fc->ctlr.lp : NULL;
}
EXPORT_SYMBOL_GPL(fcoe_hostlist_lookup);
/**
* fcoe_hostlist_add() - Add a lport to lports list
* @lp: ptr to the fc_lport to badded
* @lp: ptr to the fc_lport to be added
*
* Returns: 0 for success
*/
......@@ -1778,11 +1765,10 @@ int fcoe_hostlist_add(const struct fc_lport *lp)
}
return 0;
}
EXPORT_SYMBOL_GPL(fcoe_hostlist_add);
/**
* fcoe_hostlist_remove() - remove a lport from lports list
* @lp: ptr to the fc_lport to badded
* @lp: ptr to the fc_lport to be removed
*
* Returns: 0 for success
*/
......@@ -1798,7 +1784,6 @@ int fcoe_hostlist_remove(const struct fc_lport *lp)
return 0;
}
EXPORT_SYMBOL_GPL(fcoe_hostlist_remove);
/**
* fcoe_init() - fcoe module loading initialization
......
......@@ -122,7 +122,7 @@ static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip)
}
/**
* fcoe_ctrl_destroy() - Disable and tear-down the FCoE controller.
* fcoe_ctlr_destroy() - Disable and tear-down the FCoE controller.
* @fip: FCoE controller.
*
* This is called by FCoE drivers before freeing the &fcoe_ctlr.
......
......@@ -3654,6 +3654,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
{
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
struct ipr_resource_entry *res;
struct ata_port *ap = NULL;
unsigned long lock_flags = 0;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
......@@ -3672,12 +3673,16 @@ static int ipr_slave_configure(struct scsi_device *sdev)
}
if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
sdev->allow_restart = 1;
if (ipr_is_gata(res) && res->sata_port) {
if (ipr_is_gata(res) && res->sata_port)
ap = res->sata_port->ap;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
if (ap) {
scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
ata_sas_slave_configure(sdev, res->sata_port->ap);
} else {
ata_sas_slave_configure(sdev, ap);
} else
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
}
return 0;
}
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return 0;
......
......@@ -463,7 +463,7 @@ static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task,
}
if (err) {
iscsi_conn_failure(conn, err);
/* got invalid offset/len */
return -EIO;
}
return 0;
......@@ -851,6 +851,7 @@ static struct scsi_host_template iscsi_sw_tcp_sht = {
.use_clustering = DISABLE_CLUSTERING,
.slave_alloc = iscsi_sw_tcp_slave_alloc,
.slave_configure = iscsi_sw_tcp_slave_configure,
.target_alloc = iscsi_target_alloc,
.proc_name = "iscsi_tcp",
.this_id = -1,
};
......
......@@ -113,6 +113,11 @@ void fc_disc_stop_rports(struct fc_disc *disc)
lport->tt.rport_logoff(rport);
}
list_for_each_entry_safe(rdata, next, &disc->rogue_rports, peers) {
rport = PRIV_TO_RPORT(rdata);
lport->tt.rport_logoff(rport);
}
mutex_unlock(&disc->disc_mutex);
}
......@@ -131,23 +136,32 @@ static void fc_disc_rport_callback(struct fc_lport *lport,
{
struct fc_rport_libfc_priv *rdata = rport->dd_data;
struct fc_disc *disc = &lport->disc;
int found = 0;
FC_DEBUG_DISC("Received a %d event for port (%6x)\n", event,
rport->port_id);
if (event == RPORT_EV_CREATED) {
switch (event) {
case RPORT_EV_CREATED:
if (disc) {
found = 1;
mutex_lock(&disc->disc_mutex);
list_add_tail(&rdata->peers, &disc->rports);
mutex_unlock(&disc->disc_mutex);
}
break;
case RPORT_EV_LOGO:
case RPORT_EV_FAILED:
case RPORT_EV_STOP:
mutex_lock(&disc->disc_mutex);
mutex_lock(&rdata->rp_mutex);
if (rdata->trans_state == FC_PORTSTATE_ROGUE)
list_del(&rdata->peers);
mutex_unlock(&rdata->rp_mutex);
mutex_unlock(&disc->disc_mutex);
break;
default:
break;
}
if (!found)
FC_DEBUG_DISC("The rport (%6x) is not maintained "
"by the discovery layer\n", rport->port_id);
}
/**
......@@ -439,6 +453,7 @@ static int fc_disc_new_target(struct fc_disc *disc,
rdata = rport->dd_data;
rdata->ops = &fc_disc_rport_ops;
rdata->rp_state = RPORT_ST_INIT;
list_add_tail(&rdata->peers, &disc->rogue_rports);
lport->tt.rport_login(rport);
}
}
......@@ -461,21 +476,29 @@ static void fc_disc_del_target(struct fc_disc *disc, struct fc_rport *rport)
/**
* fc_disc_done() - Discovery has been completed
* @disc: FC discovery context
* Locking Note: This function expects that the disc mutex is locked before
* it is called. The discovery callback is then made with the lock released,
* and the lock is re-taken before returning from this function
*/
static void fc_disc_done(struct fc_disc *disc)
{
struct fc_lport *lport = disc->lport;
enum fc_disc_event event;
FC_DEBUG_DISC("Discovery complete for port (%6x)\n",
fc_host_port_id(lport->host));
disc->disc_callback(lport, disc->event);
event = disc->event;
disc->event = DISC_EV_NONE;
if (disc->requested)
fc_disc_gpn_ft_req(disc);
else
disc->pending = 0;
mutex_unlock(&disc->disc_mutex);
disc->disc_callback(lport, event);
mutex_lock(&disc->disc_mutex);
}
/**
......@@ -622,6 +645,8 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
rdata = rport->dd_data;
rdata->ops = &fc_disc_rport_ops;
rdata->local_port = lport;
list_add_tail(&rdata->peers,
&disc->rogue_rports);
lport->tt.rport_login(rport);
} else
FC_DBG("Failed to allocate memory for "
......@@ -681,8 +706,8 @@ static void fc_disc_timeout(struct work_struct *work)
* @fp: response frame
* @lp_arg: Fibre Channel host port instance
*
* Locking Note: This function expects that the disc_mutex is locked
* before it is called.
* Locking Note: This function is called without disc mutex held, and
* should do all its processing with the mutex held
*/
static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
void *disc_arg)
......@@ -695,11 +720,13 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
unsigned int len;
int error;
mutex_lock(&disc->disc_mutex);
FC_DEBUG_DISC("Received a GPN_FT response on port (%6x)\n",
fc_host_port_id(disc->lport->host));
if (IS_ERR(fp)) {
fc_disc_error(disc, fp);
mutex_unlock(&disc->disc_mutex);
return;
}
......@@ -744,6 +771,8 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
disc->seq_count++;
}
fc_frame_free(fp);
mutex_unlock(&disc->disc_mutex);
}
/**
......@@ -757,7 +786,6 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp)
{
struct fc_lport *lport;
struct fc_rport *rport;
struct fc_rport *new_rport;
struct fc_rport_libfc_priv *rdata;
......@@ -766,15 +794,12 @@ static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp)
if (dp->ids.port_id == fc_host_port_id(lport->host))
goto out;
rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
if (rport)
fc_disc_del_target(disc, rport);
new_rport = lport->tt.rport_create(dp);
if (new_rport) {
rdata = new_rport->dd_data;
rdata->ops = &fc_disc_rport_ops;
kfree(dp);
list_add_tail(&rdata->peers, &disc->rogue_rports);
lport->tt.rport_login(new_rport);
}
return;
......@@ -836,6 +861,7 @@ int fc_disc_init(struct fc_lport *lport)
INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
mutex_init(&disc->disc_mutex);
INIT_LIST_HEAD(&disc->rports);
INIT_LIST_HEAD(&disc->rogue_rports);
disc->lport = lport;
disc->delay = FC_DISC_DELAY;
......
......@@ -41,7 +41,7 @@ static struct fc_seq *fc_elsct_send(struct fc_lport *lport,
void *arg, u32 timer_msec)
{
enum fc_rctl r_ctl;
u32 did;
u32 did = FC_FID_NONE;
enum fc_fh_type fh_type;
int rc;
......
......@@ -713,7 +713,7 @@ static void fc_fcp_reduce_can_queue(struct fc_lport *lp)
static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
{
struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
struct fc_lport *lp;
struct fc_lport *lport = fsp->lp;
struct fc_frame_header *fh;
struct fcp_txrdy *dd;
u8 r_ctl;
......@@ -724,9 +724,8 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
fh = fc_frame_header_get(fp);
r_ctl = fh->fh_r_ctl;
lp = fsp->lp;
if (!(lp->state & LPORT_ST_READY))
if (!(lport->state & LPORT_ST_READY))
goto out;
if (fc_fcp_lock_pkt(fsp))
goto out;
......@@ -779,7 +778,7 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
if (IS_ERR(fp))
fc_fcp_error(fsp, fp);
else if (rc == -ENOMEM)
fc_fcp_reduce_can_queue(lp);
fc_fcp_reduce_can_queue(lport);
}
static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
......
......@@ -618,6 +618,11 @@ int fc_fabric_logoff(struct fc_lport *lport)
{
lport->tt.disc_stop_final(lport);
mutex_lock(&lport->lp_mutex);
if (lport->dns_rp)
lport->tt.rport_logoff(lport->dns_rp);
mutex_unlock(&lport->lp_mutex);
lport->tt.rport_flush_queue();
mutex_lock(&lport->lp_mutex);
fc_lport_enter_logo(lport);
mutex_unlock(&lport->lp_mutex);
cancel_delayed_work_sync(&lport->retry_work);
......@@ -639,7 +644,12 @@ EXPORT_SYMBOL(fc_fabric_logoff);
*/
int fc_lport_destroy(struct fc_lport *lport)
{
mutex_lock(&lport->lp_mutex);
lport->state = LPORT_ST_NONE;
lport->link_up = 0;
lport->tt.frame_send = fc_frame_drop;
mutex_unlock(&lport->lp_mutex);
lport->tt.fcp_abort_io(lport);
lport->tt.exch_mgr_reset(lport, 0, 0);
return 0;
......@@ -1032,17 +1042,19 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
FC_DEBUG_LPORT("Received a RFT_ID response\n");
if (IS_ERR(fp)) {
fc_lport_error(lport, fp);
goto err;
}
if (lport->state != LPORT_ST_RFT_ID) {
FC_DBG("Received a RFT_ID response, but in state %s\n",
fc_lport_state(lport));
if (IS_ERR(fp))
goto err;
goto out;
}
if (IS_ERR(fp)) {
fc_lport_error(lport, fp);
goto err;
}
fh = fc_frame_header_get(fp);
ct = fc_frame_payload_get(fp, sizeof(*ct));
......@@ -1084,17 +1096,19 @@ static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
FC_DEBUG_LPORT("Received a RPN_ID response\n");
if (IS_ERR(fp)) {
fc_lport_error(lport, fp);
goto err;
}
if (lport->state != LPORT_ST_RPN_ID) {
FC_DBG("Received a RPN_ID response, but in state %s\n",
fc_lport_state(lport));
if (IS_ERR(fp))
goto err;
goto out;
}
if (IS_ERR(fp)) {
fc_lport_error(lport, fp);
goto err;
}
fh = fc_frame_header_get(fp);
ct = fc_frame_payload_get(fp, sizeof(*ct));
if (fh && ct && fh->fh_type == FC_TYPE_CT &&
......@@ -1134,17 +1148,19 @@ static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
FC_DEBUG_LPORT("Received a SCR response\n");
if (IS_ERR(fp)) {
fc_lport_error(lport, fp);
goto err;
}
if (lport->state != LPORT_ST_SCR) {
FC_DBG("Received a SCR response, but in state %s\n",
fc_lport_state(lport));
if (IS_ERR(fp))
goto err;
goto out;
}
if (IS_ERR(fp)) {
fc_lport_error(lport, fp);
goto err;
}
op = fc_frame_payload_op(fp);
if (op == ELS_LS_ACC)
fc_lport_enter_ready(lport);
......@@ -1360,17 +1376,19 @@ static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
FC_DEBUG_LPORT("Received a LOGO response\n");
if (IS_ERR(fp)) {
fc_lport_error(lport, fp);
goto err;
}
if (lport->state != LPORT_ST_LOGO) {
FC_DBG("Received a LOGO response, but in state %s\n",
fc_lport_state(lport));
if (IS_ERR(fp))
goto err;
goto out;
}
if (IS_ERR(fp)) {
fc_lport_error(lport, fp);
goto err;
}
op = fc_frame_payload_op(fp);
if (op == ELS_LS_ACC)
fc_lport_enter_reset(lport);
......@@ -1400,10 +1418,6 @@ static void fc_lport_enter_logo(struct fc_lport *lport)
fc_lport_state_enter(lport, LPORT_ST_LOGO);
/* DNS session should be closed so we can release it here */
if (lport->dns_rp)
lport->tt.rport_logoff(lport->dns_rp);
fp = fc_frame_alloc(lport, sizeof(*logo));
if (!fp) {
fc_lport_error(lport, fp);
......@@ -1444,17 +1458,19 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
FC_DEBUG_LPORT("Received a FLOGI response\n");
if (IS_ERR(fp)) {
fc_lport_error(lport, fp);
goto err;
}
if (lport->state != LPORT_ST_FLOGI) {
FC_DBG("Received a FLOGI response, but in state %s\n",
fc_lport_state(lport));
if (IS_ERR(fp))
goto err;
goto out;
}
if (IS_ERR(fp)) {
fc_lport_error(lport, fp);
goto err;
}
fh = fc_frame_header_get(fp);
did = ntoh24(fh->fh_d_id);
if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
......
......@@ -267,6 +267,10 @@ static void fc_rport_work(struct work_struct *work)
"(%6x).\n", ids.port_id);
event = RPORT_EV_FAILED;
}
if (rport->port_id != FC_FID_DIR_SERV)
if (rport_ops->event_callback)
rport_ops->event_callback(lport, rport,
RPORT_EV_FAILED);
put_device(&rport->dev);
rport = new_rport;
rdata = new_rport->dd_data;
......@@ -325,11 +329,20 @@ int fc_rport_login(struct fc_rport *rport)
int fc_rport_logoff(struct fc_rport *rport)
{
struct fc_rport_libfc_priv *rdata = rport->dd_data;
struct fc_lport *lport = rdata->local_port;
mutex_lock(&rdata->rp_mutex);
FC_DEBUG_RPORT("Remove port (%6x)\n", rport->port_id);
if (rdata->rp_state == RPORT_ST_NONE) {
FC_DEBUG_RPORT("(%6x): Port (%6x) in NONE state,"
" not removing", fc_host_port_id(lport->host),
rport->port_id);
mutex_unlock(&rdata->rp_mutex);
goto out;
}
fc_rport_enter_logo(rport);
/*
......@@ -349,6 +362,7 @@ int fc_rport_logoff(struct fc_rport *rport)
mutex_unlock(&rdata->rp_mutex);
out:
return 0;
}
......@@ -430,6 +444,7 @@ static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp)
case RPORT_ST_PRLI:
case RPORT_ST_LOGO:
rdata->event = RPORT_EV_FAILED;
fc_rport_state_enter(rport, RPORT_ST_NONE);
queue_work(rport_event_queue,
&rdata->event_work);
break;
......@@ -494,7 +509,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_rport *rport = rp_arg;
struct fc_rport_libfc_priv *rdata = rport->dd_data;
struct fc_lport *lport = rdata->local_port;
struct fc_els_flogi *plp;
struct fc_els_flogi *plp = NULL;
unsigned int tov;
u16 csp_seq;
u16 cssp_seq;
......@@ -505,17 +520,19 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
FC_DEBUG_RPORT("Received a PLOGI response from port (%6x)\n",
rport->port_id);
if (IS_ERR(fp)) {
fc_rport_error_retry(rport, fp);
goto err;
}
if (rdata->rp_state != RPORT_ST_PLOGI) {
FC_DBG("Received a PLOGI response, but in state %s\n",
fc_rport_state(rport));
if (IS_ERR(fp))
goto err;
goto out;
}
if (IS_ERR(fp)) {
fc_rport_error_retry(rport, fp);
goto err;
}
op = fc_frame_payload_op(fp);
if (op == ELS_LS_ACC &&
(plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
......@@ -614,17 +631,19 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
FC_DEBUG_RPORT("Received a PRLI response from port (%6x)\n",
rport->port_id);
if (IS_ERR(fp)) {
fc_rport_error_retry(rport, fp);
goto err;
}
if (rdata->rp_state != RPORT_ST_PRLI) {
FC_DBG("Received a PRLI response, but in state %s\n",
fc_rport_state(rport));
if (IS_ERR(fp))
goto err;
goto out;
}
if (IS_ERR(fp)) {
fc_rport_error_retry(rport, fp);
goto err;
}
op = fc_frame_payload_op(fp);
if (op == ELS_LS_ACC) {
pp = fc_frame_payload_get(fp, sizeof(*pp));
......@@ -646,6 +665,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
} else {
FC_DBG("Bad ELS response\n");
rdata->event = RPORT_EV_FAILED;
fc_rport_state_enter(rport, RPORT_ST_NONE);
queue_work(rport_event_queue, &rdata->event_work);
}
......@@ -678,23 +698,26 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
FC_DEBUG_RPORT("Received a LOGO response from port (%6x)\n",
rport->port_id);
if (IS_ERR(fp)) {
fc_rport_error_retry(rport, fp);
goto err;
}
if (rdata->rp_state != RPORT_ST_LOGO) {
FC_DEBUG_RPORT("Received a LOGO response, but in state %s\n",
fc_rport_state(rport));
if (IS_ERR(fp))
goto err;
goto out;
}
if (IS_ERR(fp)) {
fc_rport_error_retry(rport, fp);
goto err;
}
op = fc_frame_payload_op(fp);
if (op == ELS_LS_ACC) {
fc_rport_enter_rtv(rport);
} else {
FC_DBG("Bad ELS response\n");
rdata->event = RPORT_EV_LOGO;
fc_rport_state_enter(rport, RPORT_ST_NONE);
queue_work(rport_event_queue, &rdata->event_work);
}
......@@ -764,17 +787,19 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
FC_DEBUG_RPORT("Received a RTV response from port (%6x)\n",
rport->port_id);
if (IS_ERR(fp)) {
fc_rport_error(rport, fp);
goto err;
}
if (rdata->rp_state != RPORT_ST_RTV) {
FC_DBG("Received a RTV response, but in state %s\n",
fc_rport_state(rport));
if (IS_ERR(fp))
goto err;
goto out;
}
if (IS_ERR(fp)) {
fc_rport_error(rport, fp);
goto err;
}
op = fc_frame_payload_op(fp);
if (op == ELS_LS_ACC) {
struct fc_els_rtv_acc *rtv;
......@@ -1007,6 +1032,8 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport,
default:
FC_DEBUG_RPORT("incoming PLOGI from %x in unexpected "
"state %d\n", sid, rdata->rp_state);
fc_frame_free(fp);
return;
break;
}
......@@ -1098,6 +1125,8 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport,
reason = ELS_RJT_NONE;
break;
default:
fc_frame_free(rx_fp);
return;
break;
}
len = fr_len(rx_fp) - sizeof(*fh);
......@@ -1227,6 +1256,11 @@ static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp,
"while in state %s\n", ntoh24(fh->fh_s_id),
fc_rport_state(rport));
if (rdata->rp_state == RPORT_ST_NONE) {
fc_frame_free(fp);
return;
}
rjt_data.fp = NULL;
rjt_data.reason = ELS_RJT_UNAB;
rjt_data.explan = ELS_EXPL_NONE;
......@@ -1256,7 +1290,13 @@ static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp,
"while in state %s\n", ntoh24(fh->fh_s_id),
fc_rport_state(rport));
if (rdata->rp_state == RPORT_ST_NONE) {
fc_frame_free(fp);
return;
}
rdata->event = RPORT_EV_LOGO;
fc_rport_state_enter(rport, RPORT_ST_NONE);
queue_work(rport_event_queue, &rdata->event_work);
lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
......
......@@ -1463,6 +1463,16 @@ int iscsi_change_queue_depth(struct scsi_device *sdev, int depth)
}
EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
int iscsi_target_alloc(struct scsi_target *starget)
{
struct iscsi_cls_session *cls_session = starget_to_session(starget);
struct iscsi_session *session = cls_session->dd_data;
starget->can_queue = session->scsi_cmds_max;
return 0;
}
EXPORT_SYMBOL_GPL(iscsi_target_alloc);
void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
{
struct iscsi_session *session = cls_session->dd_data;
......
......@@ -1036,8 +1036,11 @@ int iscsi_tcp_task_xmit(struct iscsi_task *task)
rc = conn->session->tt->init_pdu(task, r2t->data_offset + r2t->sent,
r2t->data_count);
if (rc)
if (rc) {
iscsi_conn_failure(conn, ISCSI_ERR_XMIT_FAILED);
return rc;
}
r2t->sent += r2t->data_count;
goto flush;
}
......
......@@ -443,6 +443,7 @@ struct lpfc_hba {
uint32_t hba_flag; /* hba generic flags */
#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
#define DEFER_ERATT 0x4 /* Deferred error attention in progress */
struct lpfc_dmabuf slim2p;
MAILBOX_t *mbox;
......@@ -723,4 +724,3 @@ lpfc_sli_read_hs(struct lpfc_hba *phba)
return;
}
This diff is collapsed.
......@@ -184,6 +184,8 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *);
struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t,
uint32_t);
void lpfc_reset_barrier(struct lpfc_hba * phba);
int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
......
......@@ -47,7 +47,7 @@
#include "lpfc_debugfs.h"
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
/**
/*
* debugfs interface
*
* To access this interface the user should:
......@@ -95,7 +95,7 @@ module_param(lpfc_debugfs_max_slow_ring_trc, int, 0);
MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc,
"Set debugfs slow ring trace depth");
int lpfc_debugfs_mask_disc_trc;
static int lpfc_debugfs_mask_disc_trc;
module_param(lpfc_debugfs_mask_disc_trc, int, 0);
MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
"Set debugfs discovery trace mask");
......@@ -127,7 +127,7 @@ static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
static unsigned long lpfc_debugfs_start_time = 0L;
/**
* lpfc_debugfs_disc_trc_data - Dump discovery logging to a buffer.
* lpfc_debugfs_disc_trc_data - Dump discovery logging to a buffer
* @vport: The vport to gather the log info from.
* @buf: The buffer to dump log into.
* @size: The maximum amount of data to process.
......@@ -187,7 +187,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
}
/**
* lpfc_debugfs_slow_ring_trc_data - Dump slow ring logging to a buffer.
* lpfc_debugfs_slow_ring_trc_data - Dump slow ring logging to a buffer
* @phba: The HBA to gather the log info from.
* @buf: The buffer to dump log into.
* @size: The maximum amount of data to process.
......@@ -250,7 +250,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
static int lpfc_debugfs_last_hbq = -1;
/**
* lpfc_debugfs_hbqinfo_data - Dump host buffer queue info to a buffer.
* lpfc_debugfs_hbqinfo_data - Dump host buffer queue info to a buffer
* @phba: The HBA to gather host buffer info from.
* @buf: The buffer to dump log into.
* @size: The maximum amount of data to process.
......@@ -369,7 +369,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
static int lpfc_debugfs_last_hba_slim_off;
/**
* lpfc_debugfs_dumpHBASlim_data - Dump HBA SLIM info to a buffer.
* lpfc_debugfs_dumpHBASlim_data - Dump HBA SLIM info to a buffer
* @phba: The HBA to gather SLIM info from.
* @buf: The buffer to dump log into.
* @size: The maximum amount of data to process.
......@@ -399,8 +399,7 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
len += snprintf(buf+len, size-len, "HBA SLIM\n");
lpfc_memcpy_from_slim(buffer,
((uint8_t *)phba->MBslimaddr) + lpfc_debugfs_last_hba_slim_off,
1024);
phba->MBslimaddr + lpfc_debugfs_last_hba_slim_off, 1024);
ptr = (uint32_t *)&buffer[0];
off = lpfc_debugfs_last_hba_slim_off;
......@@ -426,7 +425,7 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
}
/**
* lpfc_debugfs_dumpHostSlim_data - Dump host SLIM info to a buffer.
* lpfc_debugfs_dumpHostSlim_data - Dump host SLIM info to a buffer
* @phba: The HBA to gather Host SLIM info from.
* @buf: The buffer to dump log into.
* @size: The maximum amount of data to process.
......@@ -501,7 +500,7 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
}
/**
* lpfc_debugfs_nodelist_data - Dump target node list to a buffer.
* lpfc_debugfs_nodelist_data - Dump target node list to a buffer
* @vport: The vport to gather target node info from.
* @buf: The buffer to dump log into.
* @size: The maximum amount of data to process.
......@@ -599,7 +598,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
#endif
/**
* lpfc_debugfs_disc_trc - Store discovery trace log.
* lpfc_debugfs_disc_trc - Store discovery trace log
* @vport: The vport to associate this trace string with for retrieval.
* @mask: Log entry classification.
* @fmt: Format string to be displayed when dumping the log.
......@@ -643,7 +642,7 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
}
/**
* lpfc_debugfs_slow_ring_trc - Store slow ring trace log.
* lpfc_debugfs_slow_ring_trc - Store slow ring trace log
* @phba: The phba to associate this trace string with for retrieval.
* @fmt: Format string to be displayed when dumping the log.
* @data1: 1st data parameter to be applied to @fmt.
......@@ -682,7 +681,7 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
/**
* lpfc_debugfs_disc_trc_open - Open the discovery trace log.
* lpfc_debugfs_disc_trc_open - Open the discovery trace log
* @inode: The inode pointer that contains a vport pointer.
* @file: The file pointer to attach the log output.
*
......@@ -732,7 +731,7 @@ lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file)
}
/**
* lpfc_debugfs_slow_ring_trc_open - Open the Slow Ring trace log.
* lpfc_debugfs_slow_ring_trc_open - Open the Slow Ring trace log
* @inode: The inode pointer that contains a vport pointer.
* @file: The file pointer to attach the log output.
*
......@@ -782,7 +781,7 @@ lpfc_debugfs_slow_ring_trc_open(struct inode *inode, struct file *file)
}
/**
* lpfc_debugfs_hbqinfo_open - Open the hbqinfo debugfs buffer.
* lpfc_debugfs_hbqinfo_open - Open the hbqinfo debugfs buffer
* @inode: The inode pointer that contains a vport pointer.
* @file: The file pointer to attach the log output.
*
......@@ -824,7 +823,7 @@ lpfc_debugfs_hbqinfo_open(struct inode *inode, struct file *file)
}
/**
* lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer.
* lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer
* @inode: The inode pointer that contains a vport pointer.
* @file: The file pointer to attach the log output.
*
......@@ -866,7 +865,7 @@ lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file)
}
/**
* lpfc_debugfs_dumpHostSlim_open - Open the Dump Host SLIM debugfs buffer.
* lpfc_debugfs_dumpHostSlim_open - Open the Dump Host SLIM debugfs buffer
* @inode: The inode pointer that contains a vport pointer.
* @file: The file pointer to attach the log output.
*
......@@ -993,7 +992,7 @@ lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf,
/**
* lpfc_debugfs_nodelist_open - Open the nodelist debugfs file.
* lpfc_debugfs_nodelist_open - Open the nodelist debugfs file
* @inode: The inode pointer that contains a vport pointer.
* @file: The file pointer to attach the log output.
*
......@@ -1035,7 +1034,7 @@ lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file)
}
/**
* lpfc_debugfs_lseek - Seek through a debugfs file.
* lpfc_debugfs_lseek - Seek through a debugfs file
* @file: The file pointer to seek through.
* @off: The offset to seek to or the amount to seek by.
* @whence: Indicates how to seek.
......@@ -1073,7 +1072,7 @@ lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
}
/**
* lpfc_debugfs_read - Read a debugfs file.
* lpfc_debugfs_read - Read a debugfs file
* @file: The file pointer to read from.
* @buf: The buffer to copy the data to.
* @nbytes: The number of bytes to read.
......@@ -1098,7 +1097,7 @@ lpfc_debugfs_read(struct file *file, char __user *buf,
}
/**
* lpfc_debugfs_release - Release the buffer used to store debugfs file data.
* lpfc_debugfs_release - Release the buffer used to store debugfs file data
* @inode: The inode pointer that contains a vport pointer. (unused)
* @file: The file pointer that contains the buffer to release.
*
......@@ -1210,7 +1209,7 @@ static atomic_t lpfc_debugfs_hba_count;
#endif
/**
* lpfc_debugfs_initialize - Initialize debugfs for a vport.
* lpfc_debugfs_initialize - Initialize debugfs for a vport
* @vport: The vport pointer to initialize.
*
* Description:
......@@ -1434,7 +1433,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
}
/**
* lpfc_debugfs_terminate - Tear down debugfs infrastructure for this vport.
* lpfc_debugfs_terminate - Tear down debugfs infrastructure for this vport
* @vport: The vport pointer to remove from debugfs.
*
* Description:
......
......@@ -99,6 +99,7 @@ struct lpfc_nodelist {
#define NLP_USG_FREE_ACK_BIT 0x8 /* Indicate ndlp memory free invoked */
struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
struct lpfc_hba *phba;
struct fc_rport *rport; /* Corresponding FC transport
port structure */
struct lpfc_vport *vport;
......
This diff is collapsed.
......@@ -78,7 +78,7 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
return;
}
phba = ndlp->vport->phba;
phba = ndlp->phba;
lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
"rport terminate: sid:x%x did:x%x flg:x%x",
......@@ -276,7 +276,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
}
/**
* lpfc_alloc_fast_evt: Allocates data structure for posting event.
* lpfc_alloc_fast_evt - Allocates data structure for posting event
* @phba: Pointer to hba context object.
*
* This function is called from the functions which need to post
......@@ -303,7 +303,7 @@ lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
}
/**
* lpfc_free_fast_evt: Frees event data structure.
* lpfc_free_fast_evt - Frees event data structure
* @phba: Pointer to hba context object.
* @evt: Event object which need to be freed.
*
......@@ -319,7 +319,7 @@ lpfc_free_fast_evt(struct lpfc_hba *phba,
}
/**
* lpfc_send_fastpath_evt: Posts events generated from fast path.
* lpfc_send_fastpath_evt - Posts events generated from fast path
* @phba: Pointer to hba context object.
* @evtp: Event data structure.
*
......@@ -1858,13 +1858,18 @@ lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
NLP_STE_UNUSED_NODE);
}
/**
* lpfc_initialize_node: Initialize all fields of node object.
* lpfc_initialize_node - Initialize all fields of node object
* @vport: Pointer to Virtual Port object.
* @ndlp: Pointer to FC node object.
* @did: FC_ID of the node.
* This function is always called when node object need to
* be initialized. It initializes all the fields of the node
* object.
*
* This function is always called when node object need to be initialized.
* It initializes all the fields of the node object. Although the reference
* to phba from @ndlp can be obtained indirectly through it's reference to
* @vport, a direct reference to phba is taken here by @ndlp. This is due
* to the life-span of the @ndlp might go beyond the existence of @vport as
* the final release of ndlp is determined by its reference count. And, the
* operation on @ndlp needs the reference to phba.
**/
static inline void
lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
......@@ -1877,6 +1882,7 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
ndlp->nlp_DID = did;
ndlp->vport = vport;
ndlp->phba = vport->phba;
ndlp->nlp_sid = NLP_NO_SID;
kref_init(&ndlp->kref);
NLP_INT_NODE_ACT(ndlp);
......@@ -2086,7 +2092,6 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
struct lpfc_sli *psli;
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *iocb, *next_iocb;
IOCB_t *icmd;
uint32_t rpi, i;
lpfc_fabric_abort_nport(ndlp);
......@@ -2122,19 +2127,9 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
}
}
while (!list_empty(&completions)) {
iocb = list_get_first(&completions, struct lpfc_iocbq, list);
list_del_init(&iocb->list);
if (!iocb->iocb_cmpl)
lpfc_sli_release_iocbq(phba, iocb);
else {
icmd = &iocb->iocb;
icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
(iocb->iocb_cmpl)(phba, iocb, iocb);
}
}
/* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_ABORTED);
return 0;
}
......@@ -2186,9 +2181,13 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->context1 = NULL;
rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
if (rc == MBX_NOT_FINISHED) {
if (rc != MBX_TIMEOUT)
mempool_free(mbox, phba->mbox_mem_pool);
}
if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
"1836 Could not issue "
"unreg_login(all_rpis) status %d\n", rc);
}
}
......@@ -2206,12 +2205,14 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->context1 = NULL;
rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
if (rc == MBX_NOT_FINISHED) {
if (rc != MBX_TIMEOUT)
mempool_free(mbox, phba->mbox_mem_pool);
if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
"1815 Could not issue "
"unreg_did (default rpis)\n");
mempool_free(mbox, phba->mbox_mem_pool);
}
"unreg_did (default rpis) status %d\n",
rc);
}
}
......@@ -2470,14 +2471,13 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
if (ndlp->nlp_flag & NLP_RCV_PLOGI)
return NULL;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
/* Since this node is marked for discovery,
* delay timeout is not needed.
*/
lpfc_cancel_retry_delay_tmo(vport, ndlp);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
} else
ndlp = NULL;
} else {
......@@ -2740,19 +2740,9 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
}
spin_unlock_irq(&phba->hbalock);
while (!list_empty(&completions)) {
iocb = list_get_first(&completions, struct lpfc_iocbq, list);
list_del_init(&iocb->list);
if (!iocb->iocb_cmpl)
lpfc_sli_release_iocbq(phba, iocb);
else {
icmd = &iocb->iocb;
icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
(iocb->iocb_cmpl) (phba, iocb, iocb);
}
}
/* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_ABORTED);
}
static void
......@@ -3173,7 +3163,7 @@ lpfc_nlp_release(struct kref *kref)
lpfc_nlp_remove(ndlp->vport, ndlp);
/* clear the ndlp active flag for all release cases */
phba = ndlp->vport->phba;
phba = ndlp->phba;
spin_lock_irqsave(&phba->ndlp_lock, flags);
NLP_CLR_NODE_ACT(ndlp);
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
......@@ -3181,7 +3171,7 @@ lpfc_nlp_release(struct kref *kref)
/* free ndlp memory for final ndlp release */
if (NLP_CHK_FREE_REQ(ndlp)) {
kfree(ndlp->lat_data);
mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
}
}
......@@ -3204,7 +3194,7 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp)
* ndlp reference count that is in the process of being
* released.
*/
phba = ndlp->vport->phba;
phba = ndlp->phba;
spin_lock_irqsave(&phba->ndlp_lock, flags);
if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
......@@ -3240,7 +3230,7 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
"node put: did:x%x flg:x%x refcnt:x%x",
ndlp->nlp_DID, ndlp->nlp_flag,
atomic_read(&ndlp->kref.refcount));
phba = ndlp->vport->phba;
phba = ndlp->phba;
spin_lock_irqsave(&phba->ndlp_lock, flags);
/* Check the ndlp memory free acknowledge flag to avoid the
* possible race condition that kref_put got invoked again
......
This diff is collapsed.
......@@ -27,7 +27,7 @@
#define LOG_FCP 0x40 /* FCP traffic history */
#define LOG_NODE 0x80 /* Node table events */
#define LOG_TEMP 0x100 /* Temperature sensor events */
#define LOG_BG 0x200 /* BlockBuard events */
#define LOG_BG 0x200 /* BlockGuard events */
#define LOG_MISC 0x400 /* Miscellaneous events */
#define LOG_SLI 0x800 /* SLI events */
#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */
......
This diff is collapsed.
......@@ -41,7 +41,7 @@
/**
* lpfc_mem_alloc: create and allocate all PCI and memory pools
* lpfc_mem_alloc - create and allocate all PCI and memory pools
* @phba: HBA to allocate pools for
*
* Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool,
......@@ -136,12 +136,12 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
}
/**
* lpfc_mem_free: Frees all PCI and memory allocated by lpfc_mem_alloc
* lpfc_mem_free - Frees all PCI and memory allocated by lpfc_mem_alloc
* @phba: HBA to free memory for
*
* Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool,
* lpfc_hbq_pool. Frees kmalloc-backed mempools for LPFC_MBOXQ_t and
* lpfc_nodelist. Also frees the VPI bitmask.
* lpfc_nodelist. Also frees the VPI bitmask
*
* Returns: None
**/
......@@ -212,7 +212,7 @@ lpfc_mem_free(struct lpfc_hba * phba)
}
/**
* lpfc_mbuf_alloc: Allocate an mbuf from the lpfc_mbuf_pool PCI pool
* lpfc_mbuf_alloc - Allocate an mbuf from the lpfc_mbuf_pool PCI pool
* @phba: HBA which owns the pool to allocate from
* @mem_flags: indicates if this is a priority (MEM_PRI) allocation
* @handle: used to return the DMA-mapped address of the mbuf
......@@ -249,7 +249,7 @@ lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
}
/**
* __lpfc_mem_free: Free an mbuf from the lpfc_mbuf_pool PCI pool (locked)
* __lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (locked)
* @phba: HBA which owns the pool to return to
* @virt: mbuf to free
* @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
......@@ -278,7 +278,7 @@ __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
}
/**
* lpfc_mem_free: Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked)
* lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked)
* @phba: HBA which owns the pool to return to
* @virt: mbuf to free
* @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
......@@ -291,7 +291,6 @@ __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
* Returns: None
**/
void
lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
{
unsigned long iflags;
......@@ -303,7 +302,7 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
}
/**
* lpfc_els_hbq_alloc: Allocate an HBQ buffer
* lpfc_els_hbq_alloc - Allocate an HBQ buffer
* @phba: HBA to allocate HBQ buffer for
*
* Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI
......@@ -335,7 +334,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
}
/**
* lpfc_mem_hbq_free: Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
* lpfc_mem_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
* @phba: HBA buffer was allocated for
* @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
*
......@@ -355,7 +354,7 @@ lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
}
/**
* lpfc_in_buf_free: Free a DMA buffer
* lpfc_in_buf_free - Free a DMA buffer
* @phba: HBA buffer is associated with
* @mp: Buffer to free
*
......
......@@ -192,7 +192,6 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
struct lpfc_iocbq *iocb, *next_iocb;
IOCB_t *cmd;
/* Abort outstanding I/O on NPort <nlp_DID> */
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
......@@ -223,19 +222,10 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
}
spin_unlock_irq(&phba->hbalock);
while (!list_empty(&completions)) {
iocb = list_get_first(&completions, struct lpfc_iocbq, list);
cmd = &iocb->iocb;
list_del_init(&iocb->list);
if (!iocb->iocb_cmpl)
lpfc_sli_release_iocbq(phba, iocb);
else {
cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
(iocb->iocb_cmpl) (phba, iocb, iocb);
}
}
/* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_ABORTED);
lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
return 0;
}
......
This diff is collapsed.
This diff is collapsed.
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2008 Emulex. All rights reserved. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
......@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "8.3.0"
#define LPFC_DRIVER_VERSION "8.3.1"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
......@@ -26,4 +26,4 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
#define LPFC_COPYRIGHT "Copyright(c) 2004-2008 Emulex. All rights reserved."
#define LPFC_COPYRIGHT "Copyright(c) 2004-2009 Emulex. All rights reserved."
......@@ -206,7 +206,7 @@ lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
}
/**
* lpfc_discovery_wait: Wait for driver discovery to quiesce.
* lpfc_discovery_wait - Wait for driver discovery to quiesce
* @vport: The virtual port for which this call is being executed.
*
* This driver calls this routine specifically from lpfc_vport_delete
......@@ -741,7 +741,7 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
/**
* lpfc_vport_reset_stat_data: Reset the statistical data for the vport.
* lpfc_vport_reset_stat_data - Reset the statistical data for the vport
* @vport: Pointer to vport object.
*
* This function resets the statistical data for the vport. This function
......@@ -763,8 +763,7 @@ lpfc_vport_reset_stat_data(struct lpfc_vport *vport)
/**
* lpfc_alloc_bucket: Allocate data buffer required for collecting
* statistical data.
* lpfc_alloc_bucket - Allocate data buffer required for statistical data
* @vport: Pointer to vport object.
*
* This function allocates data buffer required for all the FC
......@@ -797,8 +796,7 @@ lpfc_alloc_bucket(struct lpfc_vport *vport)
}
/**
* lpfc_free_bucket: Free data buffer required for collecting
* statistical data.
* lpfc_free_bucket - Free data buffer required for statistical data
* @vport: Pointer to vport object.
*
* Th function frees statistical data buffer of all the FC
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -50,7 +50,7 @@
#endif
#define MPT2SAS_DEV_NAME "mpt2ctl"
#define MPT2_MAGIC_NUMBER 'm'
#define MPT2_MAGIC_NUMBER 'L'
#define MPT2_IOCTL_DEFAULT_TIMEOUT (10) /* in seconds */
/**
......@@ -295,8 +295,9 @@ struct mpt2_ioctl_btdh_mapping {
/* status bits for ioc->diag_buffer_status */
#define MPT2_DIAG_BUFFER_IS_REGISTERED (0x01)
#define MPT2_DIAG_BUFFER_IS_RELEASED (0x02)
#define MPT2_DIAG_BUFFER_IS_REGISTERED (0x01)
#define MPT2_DIAG_BUFFER_IS_RELEASED (0x02)
#define MPT2_DIAG_BUFFER_IS_DIAG_RESET (0x04)
/* application flags for mpt2_diag_register, mpt2_diag_query */
#define MPT2_APP_FLAGS_APP_OWNED (0x0001)
......
This diff is collapsed.
This diff is collapsed.
......@@ -1291,10 +1291,8 @@ static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
if (--starget->target_blocked == 0) {
SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
"unblocking target at zero depth\n"));
} else {
blk_plug_device(sdev->request_queue);
} else
return 0;
}
}
if (scsi_target_is_busy(starget)) {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -149,6 +149,7 @@ enum fc_rctl {
* Well-known fabric addresses.
*/
enum fc_well_known_fid {
FC_FID_NONE = 0x000000, /* No destination */
FC_FID_BCAST = 0xffffff, /* broadcast */
FC_FID_FLOGI = 0xfffffe, /* fabric login */
FC_FID_FCTRL = 0xfffffd, /* fabric controller */
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment