Commit e84d3437 authored by Martin K. Petersen's avatar Martin K. Petersen

Merge branch '6.8/s/mpi3mr2' into 6.8/scsi-staging

Two driver updates from Chandrakanth patil at Broadcom:

  scsi: mpi3mr: Update driver version to 8.5.1.0.0
  scsi: mpi3mr: Support for preallocation of SGL BSG data buffers part-3
  scsi: mpi3mr: Support for preallocation of SGL BSG data buffers part-2
  scsi: mpi3mr: Support for preallocation of SGL BSG data buffers part-1
  scsi: mpi3mr: Fetch correct device dev handle for status reply descriptor
  scsi: mpi3mr: Block PEL Enable Command on Controller Reset and Unrecoverable State
  scsi: mpi3mr: Clean up block devices post controller reset
  scsi: mpi3mr: Refresh sdev queue depth after controller reset
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parents f200dad9 d0a60e3e
......@@ -55,8 +55,8 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
#define MPI3MR_DRIVER_VERSION "8.5.0.0.50"
#define MPI3MR_DRIVER_RELDATE "22-November-2023"
#define MPI3MR_DRIVER_VERSION "8.5.1.0.0"
#define MPI3MR_DRIVER_RELDATE "5-December-2023"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
......@@ -218,14 +218,16 @@ extern atomic64_t event_counter;
* @length: SGE length
* @rsvd: Reserved
* @rsvd1: Reserved
* @sgl_type: sgl type
* @sub_type: sgl sub type
* @type: sgl type
*/
struct mpi3mr_nvme_pt_sge {
u64 base_addr;
u32 length;
__le64 base_addr;
__le32 length;
u16 rsvd;
u8 rsvd1;
u8 sgl_type;
u8 sub_type:4;
u8 type:4;
};
/**
......@@ -247,6 +249,8 @@ struct mpi3mr_buf_map {
u32 kern_buf_len;
dma_addr_t kern_buf_dma;
u8 data_dir;
u16 num_dma_desc;
struct dma_memory_desc *dma_desc;
};
/* IOC State definitions */
......@@ -477,6 +481,10 @@ struct mpi3mr_throttle_group_info {
/* HBA port flags */
#define MPI3MR_HBA_PORT_FLAG_DIRTY 0x01
/* IOCTL data transfer sge*/
#define MPI3MR_NUM_IOCTL_SGE 256
#define MPI3MR_IOCTL_SGE_SIZE (8 * 1024)
/**
* struct mpi3mr_hba_port - HBA's port information
* @port_id: Port number
......@@ -1042,6 +1050,11 @@ struct scmd_priv {
* @sas_node_lock: Lock to protect SAS node list
* @hba_port_table_list: List of HBA Ports
* @enclosure_list: List of Enclosure objects
* @ioctl_dma_pool: DMA pool for IOCTL data buffers
* @ioctl_sge: DMA buffer descriptors for IOCTL data
* @ioctl_chain_sge: DMA buffer descriptor for IOCTL chain
* @ioctl_resp_sge: DMA buffer descriptor for Mgmt cmd response
* @ioctl_sges_allocated: Flag for IOCTL SGEs allocated or not
*/
struct mpi3mr_ioc {
struct list_head list;
......@@ -1227,6 +1240,12 @@ struct mpi3mr_ioc {
spinlock_t sas_node_lock;
struct list_head hba_port_table_list;
struct list_head enclosure_list;
struct dma_pool *ioctl_dma_pool;
struct dma_memory_desc ioctl_sge[MPI3MR_NUM_IOCTL_SGE];
struct dma_memory_desc ioctl_chain_sge;
struct dma_memory_desc ioctl_resp_sge;
bool ioctl_sges_allocated;
};
/**
......
This diff is collapsed.
......@@ -1058,6 +1058,114 @@ enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
return MRIOC_STATE_RESET_REQUESTED;
}
/**
* mpi3mr_free_ioctl_dma_memory - free memory for ioctl dma
* @mrioc: Adapter instance reference
*
* Free the DMA memory allocated for IOCTL handling purpose.
*
* Return: None
*/
static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
{
struct dma_memory_desc *mem_desc;
u16 i;
if (!mrioc->ioctl_dma_pool)
return;
for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
mem_desc = &mrioc->ioctl_sge[i];
if (mem_desc->addr) {
dma_pool_free(mrioc->ioctl_dma_pool,
mem_desc->addr,
mem_desc->dma_addr);
mem_desc->addr = NULL;
}
}
dma_pool_destroy(mrioc->ioctl_dma_pool);
mrioc->ioctl_dma_pool = NULL;
mem_desc = &mrioc->ioctl_chain_sge;
if (mem_desc->addr) {
dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
mem_desc->addr, mem_desc->dma_addr);
mem_desc->addr = NULL;
}
mem_desc = &mrioc->ioctl_resp_sge;
if (mem_desc->addr) {
dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
mem_desc->addr, mem_desc->dma_addr);
mem_desc->addr = NULL;
}
mrioc->ioctl_sges_allocated = false;
}
/**
* mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma
* @mrioc: Adapter instance reference
*
* This function allocates dmaable memory required to handle the
* application issued MPI3 IOCTL requests.
*
* Return: None
*/
static void mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
{
struct dma_memory_desc *mem_desc;
u16 i;
mrioc->ioctl_dma_pool = dma_pool_create("ioctl dma pool",
&mrioc->pdev->dev,
MPI3MR_IOCTL_SGE_SIZE,
MPI3MR_PAGE_SIZE_4K, 0);
if (!mrioc->ioctl_dma_pool) {
ioc_err(mrioc, "ioctl_dma_pool: dma_pool_create failed\n");
goto out_failed;
}
for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
mem_desc = &mrioc->ioctl_sge[i];
mem_desc->size = MPI3MR_IOCTL_SGE_SIZE;
mem_desc->addr = dma_pool_zalloc(mrioc->ioctl_dma_pool,
GFP_KERNEL,
&mem_desc->dma_addr);
if (!mem_desc->addr)
goto out_failed;
}
mem_desc = &mrioc->ioctl_chain_sge;
mem_desc->size = MPI3MR_PAGE_SIZE_4K;
mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
mem_desc->size,
&mem_desc->dma_addr,
GFP_KERNEL);
if (!mem_desc->addr)
goto out_failed;
mem_desc = &mrioc->ioctl_resp_sge;
mem_desc->size = MPI3MR_PAGE_SIZE_4K;
mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
mem_desc->size,
&mem_desc->dma_addr,
GFP_KERNEL);
if (!mem_desc->addr)
goto out_failed;
mrioc->ioctl_sges_allocated = true;
return;
out_failed:
ioc_warn(mrioc, "cannot allocate DMA memory for the mpt commands\n"
"from the applications, application interface for MPT command is disabled\n");
mpi3mr_free_ioctl_dma_memory(mrioc);
}
/**
* mpi3mr_clear_reset_history - clear reset history
* @mrioc: Adapter instance reference
......@@ -3874,6 +3982,9 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
}
}
dprint_init(mrioc, "allocating ioctl dma buffers\n");
mpi3mr_alloc_ioctl_dma_memory(mrioc);
if (!mrioc->init_cmds.reply) {
retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
if (retval) {
......@@ -4293,6 +4404,7 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
struct mpi3mr_intr_info *intr_info;
mpi3mr_free_enclosure_list(mrioc);
mpi3mr_free_ioctl_dma_memory(mrioc);
if (mrioc->sense_buf_pool) {
if (mrioc->sense_buf)
......
......@@ -1047,8 +1047,9 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
list) {
if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) &&
tgtdev->host_exposed && tgtdev->starget &&
tgtdev->starget->hostdata) {
tgtdev->is_hidden &&
tgtdev->host_exposed && tgtdev->starget &&
tgtdev->starget->hostdata) {
tgt_priv = tgtdev->starget->hostdata;
tgt_priv->dev_removed = 1;
atomic_set(&tgt_priv->block_io, 0);
......@@ -1064,14 +1065,24 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true);
mpi3mr_tgtdev_put(tgtdev);
} else if (tgtdev->is_hidden & tgtdev->host_exposed) {
dprint_reset(mrioc, "hiding target device with perst_id(%d)\n",
tgtdev->perst_id);
mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
}
}
tgtdev = NULL;
list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
!tgtdev->is_hidden && !tgtdev->host_exposed)
mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
!tgtdev->is_hidden) {
if (!tgtdev->host_exposed)
mpi3mr_report_tgtdev_to_host(mrioc,
tgtdev->perst_id);
else if (tgtdev->starget)
starget_for_each_device(tgtdev->starget,
(void *)tgtdev, mpi3mr_update_sdev);
}
}
}
......@@ -3194,6 +3205,7 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
tg = stgt_priv_data->throttle_group;
throttle_enabled_dev =
stgt_priv_data->io_throttle_enabled;
dev_handle = stgt_priv_data->dev_handle;
}
}
if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) &&
......
......@@ -491,6 +491,8 @@ struct mpi3_nvme_encapsulated_error_reply {
#define MPI3MR_NVME_DATA_FORMAT_PRP 0
#define MPI3MR_NVME_DATA_FORMAT_SGL1 1
#define MPI3MR_NVME_DATA_FORMAT_SGL2 2
#define MPI3MR_NVMESGL_DATA_SEGMENT 0x00
#define MPI3MR_NVMESGL_LAST_SEGMENT 0x03
/* MPI3: task management related definitions */
struct mpi3_scsi_task_mgmt_request {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment