Commit e96277a5 authored by Martin K. Petersen's avatar Martin K. Petersen

Merge branch '6.5/scsi-staging' into 6.5/scsi-fixes

Pull in the currently staged SCSI fixes for 6.5.
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parents 06c2afb8 a97ccaa4
This diff is collapsed.
...@@ -442,7 +442,6 @@ struct blk_revalidate_zone_args { ...@@ -442,7 +442,6 @@ struct blk_revalidate_zone_args {
unsigned long *conv_zones_bitmap; unsigned long *conv_zones_bitmap;
unsigned long *seq_zones_wlock; unsigned long *seq_zones_wlock;
unsigned int nr_zones; unsigned int nr_zones;
sector_t zone_sectors;
sector_t sector; sector_t sector;
}; };
...@@ -456,38 +455,34 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, ...@@ -456,38 +455,34 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
struct gendisk *disk = args->disk; struct gendisk *disk = args->disk;
struct request_queue *q = disk->queue; struct request_queue *q = disk->queue;
sector_t capacity = get_capacity(disk); sector_t capacity = get_capacity(disk);
sector_t zone_sectors = q->limits.chunk_sectors;
/* Check for bad zones and holes in the zone report */
if (zone->start != args->sector) {
pr_warn("%s: Zone gap at sectors %llu..%llu\n",
disk->disk_name, args->sector, zone->start);
return -ENODEV;
}
if (zone->start >= capacity || !zone->len) {
pr_warn("%s: Invalid zone start %llu, length %llu\n",
disk->disk_name, zone->start, zone->len);
return -ENODEV;
}
/* /*
* All zones must have the same size, with the exception on an eventual * All zones must have the same size, with the exception on an eventual
* smaller last zone. * smaller last zone.
*/ */
if (zone->start == 0) { if (zone->start + zone->len < capacity) {
if (zone->len == 0 || !is_power_of_2(zone->len)) { if (zone->len != zone_sectors) {
pr_warn("%s: Invalid zoned device with non power of two zone size (%llu)\n",
disk->disk_name, zone->len);
return -ENODEV;
}
args->zone_sectors = zone->len;
args->nr_zones = (capacity + zone->len - 1) >> ilog2(zone->len);
} else if (zone->start + args->zone_sectors < capacity) {
if (zone->len != args->zone_sectors) {
pr_warn("%s: Invalid zoned device with non constant zone size\n", pr_warn("%s: Invalid zoned device with non constant zone size\n",
disk->disk_name); disk->disk_name);
return -ENODEV; return -ENODEV;
} }
} else { } else if (zone->len > zone_sectors) {
if (zone->len > args->zone_sectors) { pr_warn("%s: Invalid zoned device with larger last zone size\n",
pr_warn("%s: Invalid zoned device with larger last zone size\n", disk->disk_name);
disk->disk_name);
return -ENODEV;
}
}
/* Check for holes in the zone report */
if (zone->start != args->sector) {
pr_warn("%s: Zone gap at sectors %llu..%llu\n",
disk->disk_name, args->sector, zone->start);
return -ENODEV; return -ENODEV;
} }
...@@ -526,11 +521,13 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, ...@@ -526,11 +521,13 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
* @disk: Target disk * @disk: Target disk
* @update_driver_data: Callback to update driver data on the frozen disk * @update_driver_data: Callback to update driver data on the frozen disk
* *
* Helper function for low-level device drivers to (re) allocate and initialize * Helper function for low-level device drivers to check and (re) allocate and
* a disk request queue zone bitmaps. This functions should normally be called * initialize a disk request queue zone bitmaps. This functions should normally
* within the disk ->revalidate method for blk-mq based drivers. For BIO based * be called within the disk ->revalidate method for blk-mq based drivers.
* drivers only q->nr_zones needs to be updated so that the sysfs exposed value * Before calling this function, the device driver must already have set the
* is correct. * device zone size (chunk_sector limit) and the max zone append limit.
* For BIO based drivers, this function cannot be used. BIO based device drivers
* only need to set disk->nr_zones so that the sysfs exposed value is correct.
* If the @update_driver_data callback function is not NULL, the callback is * If the @update_driver_data callback function is not NULL, the callback is
* executed with the device request queue frozen after all zones have been * executed with the device request queue frozen after all zones have been
* checked. * checked.
...@@ -539,9 +536,9 @@ int blk_revalidate_disk_zones(struct gendisk *disk, ...@@ -539,9 +536,9 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
void (*update_driver_data)(struct gendisk *disk)) void (*update_driver_data)(struct gendisk *disk))
{ {
struct request_queue *q = disk->queue; struct request_queue *q = disk->queue;
struct blk_revalidate_zone_args args = { sector_t zone_sectors = q->limits.chunk_sectors;
.disk = disk, sector_t capacity = get_capacity(disk);
}; struct blk_revalidate_zone_args args = { };
unsigned int noio_flag; unsigned int noio_flag;
int ret; int ret;
...@@ -550,13 +547,31 @@ int blk_revalidate_disk_zones(struct gendisk *disk, ...@@ -550,13 +547,31 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
if (WARN_ON_ONCE(!queue_is_mq(q))) if (WARN_ON_ONCE(!queue_is_mq(q)))
return -EIO; return -EIO;
if (!get_capacity(disk)) if (!capacity)
return -EIO; return -ENODEV;
/*
* Checks that the device driver indicated a valid zone size and that
* the max zone append limit is set.
*/
if (!zone_sectors || !is_power_of_2(zone_sectors)) {
pr_warn("%s: Invalid non power of two zone size (%llu)\n",
disk->disk_name, zone_sectors);
return -ENODEV;
}
if (!q->limits.max_zone_append_sectors) {
pr_warn("%s: Invalid 0 maximum zone append limit\n",
disk->disk_name);
return -ENODEV;
}
/* /*
* Ensure that all memory allocations in this context are done as if * Ensure that all memory allocations in this context are done as if
* GFP_NOIO was specified. * GFP_NOIO was specified.
*/ */
args.disk = disk;
args.nr_zones = (capacity + zone_sectors - 1) >> ilog2(zone_sectors);
noio_flag = memalloc_noio_save(); noio_flag = memalloc_noio_save();
ret = disk->fops->report_zones(disk, 0, UINT_MAX, ret = disk->fops->report_zones(disk, 0, UINT_MAX,
blk_revalidate_zone_cb, &args); blk_revalidate_zone_cb, &args);
...@@ -570,7 +585,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk, ...@@ -570,7 +585,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
* If zones where reported, make sure that the entire disk capacity * If zones where reported, make sure that the entire disk capacity
* has been checked. * has been checked.
*/ */
if (ret > 0 && args.sector != get_capacity(disk)) { if (ret > 0 && args.sector != capacity) {
pr_warn("%s: Missing zones from sector %llu\n", pr_warn("%s: Missing zones from sector %llu\n",
disk->disk_name, args.sector); disk->disk_name, args.sector);
ret = -ENODEV; ret = -ENODEV;
...@@ -583,7 +598,6 @@ int blk_revalidate_disk_zones(struct gendisk *disk, ...@@ -583,7 +598,6 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
*/ */
blk_mq_freeze_queue(q); blk_mq_freeze_queue(q);
if (ret > 0) { if (ret > 0) {
blk_queue_chunk_sectors(q, args.zone_sectors);
disk->nr_zones = args.nr_zones; disk->nr_zones = args.nr_zones;
swap(disk->seq_zones_wlock, args.seq_zones_wlock); swap(disk->seq_zones_wlock, args.seq_zones_wlock);
swap(disk->conv_zones_bitmap, args.conv_zones_bitmap); swap(disk->conv_zones_bitmap, args.conv_zones_bitmap);
......
...@@ -162,21 +162,15 @@ int null_register_zoned_dev(struct nullb *nullb) ...@@ -162,21 +162,15 @@ int null_register_zoned_dev(struct nullb *nullb)
disk_set_zoned(nullb->disk, BLK_ZONED_HM); disk_set_zoned(nullb->disk, BLK_ZONED_HM);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE); blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
blk_queue_chunk_sectors(q, dev->zone_size_sects);
if (queue_is_mq(q)) { nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
int ret = blk_revalidate_disk_zones(nullb->disk, NULL);
if (ret)
return ret;
} else {
blk_queue_chunk_sectors(q, dev->zone_size_sects);
nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
}
blk_queue_max_zone_append_sectors(q, dev->zone_size_sects); blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
disk_set_max_open_zones(nullb->disk, dev->zone_max_open); disk_set_max_open_zones(nullb->disk, dev->zone_max_open);
disk_set_max_active_zones(nullb->disk, dev->zone_max_active); disk_set_max_active_zones(nullb->disk, dev->zone_max_active);
if (queue_is_mq(q))
return blk_revalidate_disk_zones(nullb->disk, NULL);
return 0; return 0;
} }
......
...@@ -751,7 +751,6 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev, ...@@ -751,7 +751,6 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
{ {
u32 v, wg; u32 v, wg;
u8 model; u8 model;
int ret;
virtio_cread(vdev, struct virtio_blk_config, virtio_cread(vdev, struct virtio_blk_config,
zoned.model, &model); zoned.model, &model);
...@@ -806,6 +805,7 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev, ...@@ -806,6 +805,7 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
vblk->zone_sectors); vblk->zone_sectors);
return -ENODEV; return -ENODEV;
} }
blk_queue_chunk_sectors(q, vblk->zone_sectors);
dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors); dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors);
if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) { if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
...@@ -814,26 +814,22 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev, ...@@ -814,26 +814,22 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
blk_queue_max_discard_sectors(q, 0); blk_queue_max_discard_sectors(q, 0);
} }
ret = blk_revalidate_disk_zones(vblk->disk, NULL); virtio_cread(vdev, struct virtio_blk_config,
if (!ret) { zoned.max_append_sectors, &v);
virtio_cread(vdev, struct virtio_blk_config, if (!v) {
zoned.max_append_sectors, &v); dev_warn(&vdev->dev, "zero max_append_sectors reported\n");
if (!v) { return -ENODEV;
dev_warn(&vdev->dev, "zero max_append_sectors reported\n"); }
return -ENODEV; if ((v << SECTOR_SHIFT) < wg) {
} dev_err(&vdev->dev,
if ((v << SECTOR_SHIFT) < wg) { "write granularity %u exceeds max_append_sectors %u limit\n",
dev_err(&vdev->dev, wg, v);
"write granularity %u exceeds max_append_sectors %u limit\n", return -ENODEV;
wg, v);
return -ENODEV;
}
blk_queue_max_zone_append_sectors(q, v);
dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
} }
blk_queue_max_zone_append_sectors(q, v);
dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
return ret; return blk_revalidate_disk_zones(vblk->disk, NULL);
} }
#else #else
......
...@@ -10,12 +10,11 @@ ...@@ -10,12 +10,11 @@
int nvme_revalidate_zones(struct nvme_ns *ns) int nvme_revalidate_zones(struct nvme_ns *ns)
{ {
struct request_queue *q = ns->queue; struct request_queue *q = ns->queue;
int ret;
ret = blk_revalidate_disk_zones(ns->disk, NULL); blk_queue_chunk_sectors(q, ns->zsze);
if (!ret) blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
return ret; return blk_revalidate_disk_zones(ns->disk, NULL);
} }
static int nvme_set_max_append(struct nvme_ctrl *ctrl) static int nvme_set_max_append(struct nvme_ctrl *ctrl)
......
...@@ -2618,7 +2618,7 @@ struct aac_hba_info { ...@@ -2618,7 +2618,7 @@ struct aac_hba_info {
struct aac_aifcmd { struct aac_aifcmd {
__le32 command; /* Tell host what type of notify this is */ __le32 command; /* Tell host what type of notify this is */
__le32 seqnum; /* To allow ordering of reports (if necessary) */ __le32 seqnum; /* To allow ordering of reports (if necessary) */
u8 data[1]; /* Undefined length (from kernel viewpoint) */ u8 data[]; /* Undefined length (from kernel viewpoint) */
}; };
/** /**
......
...@@ -465,7 +465,7 @@ int fnic_trace_buf_init(void) ...@@ -465,7 +465,7 @@ int fnic_trace_buf_init(void)
fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/ fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/
FNIC_ENTRY_SIZE_BYTES; FNIC_ENTRY_SIZE_BYTES;
fnic_trace_buf_p = (unsigned long)vzalloc(trace_max_pages * PAGE_SIZE); fnic_trace_buf_p = (unsigned long)vcalloc(trace_max_pages, PAGE_SIZE);
if (!fnic_trace_buf_p) { if (!fnic_trace_buf_p) {
printk(KERN_ERR PFX "Failed to allocate memory " printk(KERN_ERR PFX "Failed to allocate memory "
"for fnic_trace_buf_p\n"); "for fnic_trace_buf_p\n");
......
...@@ -6944,7 +6944,9 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) ...@@ -6944,7 +6944,9 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
if (rc) if (rc)
return; return;
/* Reset HBA FCF states after successful unregister FCF */ /* Reset HBA FCF states after successful unregister FCF */
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag = 0; phba->fcf.fcf_flag = 0;
spin_unlock_irq(&phba->hbalock);
phba->fcf.current_rec.flag = 0; phba->fcf.current_rec.flag = 0;
/* /*
......
...@@ -4462,7 +4462,6 @@ struct qla_hw_data { ...@@ -4462,7 +4462,6 @@ struct qla_hw_data {
/* n2n */ /* n2n */
struct fc_els_flogi plogi_els_payld; struct fc_els_flogi plogi_els_payld;
#define LOGIN_TEMPLATE_SIZE (sizeof(struct fc_els_flogi) - 4)
void *swl; void *swl;
......
...@@ -8434,7 +8434,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, ...@@ -8434,7 +8434,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
ql_dbg(ql_dbg_init, vha, 0x0163, ql_dbg(ql_dbg_init, vha, 0x0163,
"-> fwdt%u template allocate template %#x words...\n", "-> fwdt%u template allocate template %#x words...\n",
j, risc_size); j, risc_size);
fwdt->template = vmalloc(risc_size * sizeof(*dcode)); fwdt->template = vmalloc_array(risc_size, sizeof(*dcode));
if (!fwdt->template) { if (!fwdt->template) {
ql_log(ql_log_warn, vha, 0x0164, ql_log(ql_log_warn, vha, 0x0164,
"-> fwdt%u failed allocate template.\n", j); "-> fwdt%u failed allocate template.\n", j);
...@@ -8689,7 +8689,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) ...@@ -8689,7 +8689,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
ql_dbg(ql_dbg_init, vha, 0x0173, ql_dbg(ql_dbg_init, vha, 0x0173,
"-> fwdt%u template allocate template %#x words...\n", "-> fwdt%u template allocate template %#x words...\n",
j, risc_size); j, risc_size);
fwdt->template = vmalloc(risc_size * sizeof(*dcode)); fwdt->template = vmalloc_array(risc_size, sizeof(*dcode));
if (!fwdt->template) { if (!fwdt->template) {
ql_log(ql_log_warn, vha, 0x0174, ql_log(ql_log_warn, vha, 0x0174,
"-> fwdt%u failed allocate template.\n", j); "-> fwdt%u failed allocate template.\n", j);
......
...@@ -3073,7 +3073,8 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, ...@@ -3073,7 +3073,8 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
memset(ptr, 0, sizeof(struct els_plogi_payload)); memset(ptr, 0, sizeof(struct els_plogi_payload));
memset(resp_ptr, 0, sizeof(struct els_plogi_payload)); memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
memcpy(elsio->u.els_plogi.els_plogi_pyld->data, memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
&ha->plogi_els_payld.fl_csp, LOGIN_TEMPLATE_SIZE); (void *)&ha->plogi_els_payld + offsetof(struct fc_els_flogi, fl_csp),
sizeof(ha->plogi_els_payld) - offsetof(struct fc_els_flogi, fl_csp));
elsio->u.els_plogi.els_cmd = els_opcode; elsio->u.els_plogi.els_cmd = els_opcode;
elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode; elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
...@@ -3911,7 +3912,7 @@ qla2x00_start_sp(srb_t *sp) ...@@ -3911,7 +3912,7 @@ qla2x00_start_sp(srb_t *sp)
pkt = __qla2x00_alloc_iocbs(sp->qpair, sp); pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
if (!pkt) { if (!pkt) {
rval = EAGAIN; rval = -EAGAIN;
ql_log(ql_log_warn, vha, 0x700c, ql_log(ql_log_warn, vha, 0x700c,
"qla2x00_alloc_iocbs failed.\n"); "qla2x00_alloc_iocbs failed.\n");
goto done; goto done;
......
...@@ -841,11 +841,6 @@ static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES; ...@@ -841,11 +841,6 @@ static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */ static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
static int poll_queues; /* iouring iopoll interface.*/ static int poll_queues; /* iouring iopoll interface.*/
static DEFINE_RWLOCK(atomic_rw);
static DEFINE_RWLOCK(atomic_rw2);
static rwlock_t *ramdisk_lck_a[2];
static char sdebug_proc_name[] = MY_NAME; static char sdebug_proc_name[] = MY_NAME;
static const char *my_name = MY_NAME; static const char *my_name = MY_NAME;
...@@ -6818,9 +6813,6 @@ static int __init scsi_debug_init(void) ...@@ -6818,9 +6813,6 @@ static int __init scsi_debug_init(void)
int k, ret, hosts_to_add; int k, ret, hosts_to_add;
int idx = -1; int idx = -1;
ramdisk_lck_a[0] = &atomic_rw;
ramdisk_lck_a[1] = &atomic_rw2;
if (sdebug_ndelay >= 1000 * 1000 * 1000) { if (sdebug_ndelay >= 1000 * 1000 * 1000) {
pr_warn("ndelay must be less than 1 second, ignored\n"); pr_warn("ndelay must be less than 1 second, ignored\n");
sdebug_ndelay = 0; sdebug_ndelay = 0;
......
...@@ -831,7 +831,6 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp) ...@@ -831,7 +831,6 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
struct request_queue *q = disk->queue; struct request_queue *q = disk->queue;
u32 zone_blocks = sdkp->early_zone_info.zone_blocks; u32 zone_blocks = sdkp->early_zone_info.zone_blocks;
unsigned int nr_zones = sdkp->early_zone_info.nr_zones; unsigned int nr_zones = sdkp->early_zone_info.nr_zones;
u32 max_append;
int ret = 0; int ret = 0;
unsigned int flags; unsigned int flags;
...@@ -876,6 +875,11 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp) ...@@ -876,6 +875,11 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
goto unlock; goto unlock;
} }
blk_queue_chunk_sectors(q,
logical_to_sectors(sdkp->device, zone_blocks));
blk_queue_max_zone_append_sectors(q,
q->limits.max_segments << PAGE_SECTORS_SHIFT);
ret = blk_revalidate_disk_zones(disk, sd_zbc_revalidate_zones_cb); ret = blk_revalidate_disk_zones(disk, sd_zbc_revalidate_zones_cb);
memalloc_noio_restore(flags); memalloc_noio_restore(flags);
...@@ -888,12 +892,6 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp) ...@@ -888,12 +892,6 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
goto unlock; goto unlock;
} }
max_append = min_t(u32, logical_to_sectors(sdkp->device, zone_blocks),
q->limits.max_segments << PAGE_SECTORS_SHIFT);
max_append = min_t(u32, max_append, queue_max_hw_sectors(q));
blk_queue_max_zone_append_sectors(q, max_append);
sd_zbc_print_zones(sdkp); sd_zbc_print_zones(sdkp);
unlock: unlock:
......
...@@ -8520,6 +8520,41 @@ static int ufshcd_device_params_init(struct ufs_hba *hba) ...@@ -8520,6 +8520,41 @@ static int ufshcd_device_params_init(struct ufs_hba *hba)
return ret; return ret;
} }
static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
{
int err;
struct ufs_query_req *request = NULL;
struct ufs_query_res *response = NULL;
struct ufs_dev_info *dev_info = &hba->dev_info;
struct utp_upiu_query_v4_0 *upiu_data;
if (dev_info->wspecversion < 0x400)
return;
ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response,
UPIU_QUERY_OPCODE_WRITE_ATTR,
QUERY_ATTR_IDN_TIMESTAMP, 0, 0);
request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
upiu_data = (struct utp_upiu_query_v4_0 *)&request->upiu_req;
put_unaligned_be64(ktime_get_real_ns(), &upiu_data->osf3);
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
if (err)
dev_err(hba->dev, "%s: failed to set timestamp %d\n",
__func__, err);
mutex_unlock(&hba->dev_cmd.lock);
ufshcd_release(hba);
}
/** /**
* ufshcd_add_lus - probe and add UFS logical units * ufshcd_add_lus - probe and add UFS logical units
* @hba: per-adapter instance * @hba: per-adapter instance
...@@ -8708,6 +8743,8 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) ...@@ -8708,6 +8743,8 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
ufshcd_set_ufs_dev_active(hba); ufshcd_set_ufs_dev_active(hba);
ufshcd_force_reset_auto_bkops(hba); ufshcd_force_reset_auto_bkops(hba);
ufshcd_set_timestamp_attr(hba);
/* Gear up to HS gear if supported */ /* Gear up to HS gear if supported */
if (hba->max_pwr_info.is_valid) { if (hba->max_pwr_info.is_valid) {
/* /*
...@@ -9749,6 +9786,7 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) ...@@ -9749,6 +9786,7 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE); ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
if (ret) if (ret)
goto set_old_link_state; goto set_old_link_state;
ufshcd_set_timestamp_attr(hba);
} }
if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
......
...@@ -72,6 +72,7 @@ config SCSI_UFS_QCOM ...@@ -72,6 +72,7 @@ config SCSI_UFS_QCOM
config SCSI_UFS_MEDIATEK config SCSI_UFS_MEDIATEK
tristate "Mediatek specific hooks to UFS controller platform driver" tristate "Mediatek specific hooks to UFS controller platform driver"
depends on SCSI_UFSHCD_PLATFORM && ARCH_MEDIATEK depends on SCSI_UFSHCD_PLATFORM && ARCH_MEDIATEK
depends on RESET_CONTROLLER
select PHY_MTK_UFS select PHY_MTK_UFS
select RESET_TI_SYSCON select RESET_TI_SYSCON
help help
......
...@@ -70,6 +70,31 @@ struct utp_upiu_query { ...@@ -70,6 +70,31 @@ struct utp_upiu_query {
__be32 reserved[2]; __be32 reserved[2];
}; };
/**
* struct utp_upiu_query_v4_0 - upiu request buffer structure for
* query request >= UFS 4.0 spec.
* @opcode: command to perform B-0
* @idn: a value that indicates the particular type of data B-1
* @index: Index to further identify data B-2
* @selector: Index to further identify data B-3
* @osf4: spec field B-5
* @osf5: spec field B 6,7
* @osf6: spec field DW 8,9
* @osf7: spec field DW 10,11
*/
struct utp_upiu_query_v4_0 {
__u8 opcode;
__u8 idn;
__u8 index;
__u8 selector;
__u8 osf3;
__u8 osf4;
__be16 osf5;
__be32 osf6;
__be32 osf7;
__be32 reserved;
};
/** /**
* struct utp_upiu_cmd - Command UPIU structure * struct utp_upiu_cmd - Command UPIU structure
* @data_transfer_len: Data Transfer Length DW-3 * @data_transfer_len: Data Transfer Length DW-3
......
...@@ -170,6 +170,7 @@ enum attr_idn { ...@@ -170,6 +170,7 @@ enum attr_idn {
QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E, QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E,
QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F, QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F,
QUERY_ATTR_IDN_EXT_IID_EN = 0x2A, QUERY_ATTR_IDN_EXT_IID_EN = 0x2A,
QUERY_ATTR_IDN_TIMESTAMP = 0x30
}; };
/* Descriptor idn for Query requests */ /* Descriptor idn for Query requests */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment