Commit 2af33e5a authored by James Smart's avatar James Smart Committed by Martin K. Petersen

scsi: lpfc: Remove SANDiags related code

The SANDiags feature is unused, and related code is removed.

Link: https://lore.kernel.org/r/20220819011736.14141-6-jsmart2021@gmail.comCo-developed-by: default avatarJustin Tee <justin.tee@broadcom.com>
Signed-off-by: default avatarJustin Tee <justin.tee@broadcom.com>
Signed-off-by: default avatarJames Smart <jsmart2021@gmail.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 71ddeeaf
......@@ -68,8 +68,6 @@ struct lpfc_sli2_slim;
#define LPFC_MIN_TGT_QDEPTH 10
#define LPFC_MAX_TGT_QDEPTH 0xFFFF
#define LPFC_MAX_BUCKET_COUNT 20 /* Maximum no. of buckets for stat data
collection. */
/*
* Following time intervals are used of adjusting SCSI device
* queue depths when there are driver resource error or Firmware
......@@ -732,8 +730,6 @@ struct lpfc_vport {
struct lpfc_debugfs_trc *disc_trc;
atomic_t disc_trc_cnt;
#endif
uint8_t stat_data_enabled;
uint8_t stat_data_blocked;
struct list_head rcv_buffer_list;
unsigned long rcv_buffer_time_stamp;
uint32_t vport_flag;
......@@ -1436,13 +1432,6 @@ struct lpfc_hba {
*/
#define QUE_BUFTAG_BIT (1<<31)
uint32_t buffer_tag_count;
/* data structure used for latency data collection */
#define LPFC_NO_BUCKET 0
#define LPFC_LINEAR_BUCKET 1
#define LPFC_POWER2_BUCKET 2
uint8_t bucket_type;
uint32_t bucket_base;
uint32_t bucket_step;
/* Maximum number of events that can be outstanding at any time*/
#define LPFC_MAX_EVT_COUNT 512
......
This diff is collapsed.
......@@ -149,7 +149,6 @@ struct lpfc_nodelist {
uint32_t cmd_qdepth;
unsigned long last_change_time;
unsigned long *active_rrqs_xri_bitmap;
struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
uint32_t fc4_prli_sent;
/* flags to keep ndlp alive until special conditions are met */
......
......@@ -4787,22 +4787,6 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
new_state == NLP_STE_UNMAPPED_NODE)
lpfc_nlp_reg_node(vport, ndlp);
if ((new_state == NLP_STE_MAPPED_NODE) &&
(vport->stat_data_enabled)) {
/*
* A new target is discovered, if there is no buffer for
* statistical data collection allocate buffer.
*/
ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
sizeof(struct lpfc_scsicmd_bkt),
GFP_KERNEL);
if (!ndlp->lat_data)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0286 lpfc_nlp_state_cleanup failed to "
"allocate statistical data buffer DID "
"0x%x\n", ndlp->nlp_DID);
}
/*
* If the node just added to Mapped list was an FCP target,
* but the remote port registration failed or assigned a target
......@@ -6647,7 +6631,6 @@ lpfc_nlp_release(struct kref *kref)
ndlp->fc4_xpt_flags = 0;
/* free ndlp memory for final ndlp release */
kfree(ndlp->lat_data);
if (ndlp->phba->sli_rev == LPFC_SLI_REV4)
mempool_free(ndlp->active_rrqs_xri_bitmap,
ndlp->phba->active_rrq_pool);
......
......@@ -111,62 +111,6 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
#define LPFC_INVALID_REFTAG ((u32)-1)
/**
* lpfc_update_stats - Update statistical data for the command completion
* @vport: The virtual port on which this call is executing.
* @lpfc_cmd: lpfc scsi command object pointer.
*
* This function is called when there is a command completion and this
* function updates the statistical data for the command completion.
**/
static void
lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
unsigned long flags;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
unsigned long latency;
int i;
if (!vport->stat_data_enabled ||
vport->stat_data_blocked ||
(cmd->result))
return;
latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
rdata = lpfc_cmd->rdata;
pnode = rdata->pnode;
spin_lock_irqsave(shost->host_lock, flags);
if (!pnode ||
!pnode->lat_data ||
(phba->bucket_type == LPFC_NO_BUCKET)) {
spin_unlock_irqrestore(shost->host_lock, flags);
return;
}
if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
phba->bucket_step;
/* check array subscript bounds */
if (i < 0)
i = 0;
else if (i >= LPFC_MAX_BUCKET_COUNT)
i = LPFC_MAX_BUCKET_COUNT - 1;
} else {
for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
if (latency <= (phba->bucket_base +
((1<<i)*phba->bucket_step)))
break;
}
pnode->lat_data[i].cmd_count++;
spin_unlock_irqrestore(shost->host_lock, flags);
}
/**
* lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
* @phba: The Hba for which this call is being executed.
......@@ -4335,8 +4279,6 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
cmd->retries, scsi_get_resid(cmd));
}
lpfc_update_stats(vport, lpfc_cmd);
if (vport->cfg_max_scsicmpl_time &&
time_after(jiffies, lpfc_cmd->start_time +
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
......@@ -4617,7 +4559,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
scsi_get_resid(cmd));
}
lpfc_update_stats(vport, lpfc_cmd);
if (vport->cfg_max_scsicmpl_time &&
time_after(jiffies, lpfc_cmd->start_time +
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
......
......@@ -126,10 +126,6 @@ struct fcp_cmnd {
};
struct lpfc_scsicmd_bkt {
uint32_t cmd_count;
};
#define LPFC_SCSI_DMA_EXT_SIZE 264
#define LPFC_BPL_SIZE 1024
#define MDAC_DIRECT_CMD 0x22
......
......@@ -809,74 +809,3 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
kfree(vports);
}
/**
* lpfc_vport_reset_stat_data - Reset the statistical data for the vport
* @vport: Pointer to vport object.
*
* This function resets the statistical data for the vport. This function
* is called with the host_lock held
**/
void
lpfc_vport_reset_stat_data(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->lat_data)
memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT *
sizeof(struct lpfc_scsicmd_bkt));
}
}
/**
* lpfc_alloc_bucket - Allocate data buffer required for statistical data
* @vport: Pointer to vport object.
*
* This function allocates data buffer required for all the FC
* nodes of the vport to collect statistical data.
**/
void
lpfc_alloc_bucket(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
kfree(ndlp->lat_data);
ndlp->lat_data = NULL;
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
sizeof(struct lpfc_scsicmd_bkt),
GFP_ATOMIC);
if (!ndlp->lat_data)
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"0287 lpfc_alloc_bucket failed to "
"allocate statistical data buffer DID "
"0x%x\n", ndlp->nlp_DID);
}
}
}
/**
* lpfc_free_bucket - Free data buffer required for statistical data
* @vport: Pointer to vport object.
*
* Th function frees statistical data buffer of all the FC
* nodes of the vport.
**/
void
lpfc_free_bucket(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
kfree(ndlp->lat_data);
ndlp->lat_data = NULL;
}
}
......@@ -115,8 +115,4 @@ struct vport_cmd_tag {
void lpfc_vport_set_state(struct lpfc_vport *vport,
enum fc_vport_state new_state);
void lpfc_vport_reset_stat_data(struct lpfc_vport *);
void lpfc_alloc_bucket(struct lpfc_vport *);
void lpfc_free_bucket(struct lpfc_vport *);
#endif /* H_LPFC_VPORT */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment