Commit 23caa33d authored by Avri Altman's avatar Avri Altman Committed by Martin K. Petersen
parent 2ef23e4b
...@@ -2501,7 +2501,7 @@ static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int ...@@ -2501,7 +2501,7 @@ static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int
* 11b to indicate Dword granularity. A value of '3' * 11b to indicate Dword granularity. A value of '3'
* indicates 4 bytes, '7' indicates 8 bytes, etc." * indicates 4 bytes, '7' indicates 8 bytes, etc."
*/ */
WARN_ONCE(len > 256 * 1024, "len = %#x\n", len); WARN_ONCE(len > SZ_256K, "len = %#x\n", len);
prd->size = cpu_to_le32(len - 1); prd->size = cpu_to_le32(len - 1);
prd->addr = cpu_to_le64(sg->dma_address); prd->addr = cpu_to_le64(sg->dma_address);
prd->reserved = 0; prd->reserved = 0;
...@@ -3733,7 +3733,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) ...@@ -3733,7 +3733,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
/* /*
* Allocate memory for UTP Transfer descriptors * Allocate memory for UTP Transfer descriptors
* UFSHCI requires 1024 byte alignment of UTRD * UFSHCI requires 1KB alignment of UTRD
*/ */
utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev, hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
...@@ -3741,7 +3741,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) ...@@ -3741,7 +3741,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
&hba->utrdl_dma_addr, &hba->utrdl_dma_addr,
GFP_KERNEL); GFP_KERNEL);
if (!hba->utrdl_base_addr || if (!hba->utrdl_base_addr ||
WARN_ON(hba->utrdl_dma_addr & (1024 - 1))) { WARN_ON(hba->utrdl_dma_addr & (SZ_1K - 1))) {
dev_err(hba->dev, dev_err(hba->dev,
"Transfer Descriptor Memory allocation failed\n"); "Transfer Descriptor Memory allocation failed\n");
goto out; goto out;
...@@ -3757,7 +3757,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) ...@@ -3757,7 +3757,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
goto skip_utmrdl; goto skip_utmrdl;
/* /*
* Allocate memory for UTP Task Management descriptors * Allocate memory for UTP Task Management descriptors
* UFSHCI requires 1024 byte alignment of UTMRD * UFSHCI requires 1KB alignment of UTMRD
*/ */
utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev, hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
...@@ -3765,7 +3765,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) ...@@ -3765,7 +3765,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
&hba->utmrdl_dma_addr, &hba->utmrdl_dma_addr,
GFP_KERNEL); GFP_KERNEL);
if (!hba->utmrdl_base_addr || if (!hba->utmrdl_base_addr ||
WARN_ON(hba->utmrdl_dma_addr & (1024 - 1))) { WARN_ON(hba->utmrdl_dma_addr & (SZ_1K - 1))) {
dev_err(hba->dev, dev_err(hba->dev,
"Task Management Descriptor Memory allocation failed\n"); "Task Management Descriptor Memory allocation failed\n");
goto out; goto out;
...@@ -5102,7 +5102,7 @@ static int ufshcd_slave_configure(struct scsi_device *sdev) ...@@ -5102,7 +5102,7 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT) if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT)
blk_queue_update_dma_alignment(q, 4096 - 1); blk_queue_update_dma_alignment(q, SZ_4K - 1);
/* /*
* Block runtime-pm until all consumers are added. * Block runtime-pm until all consumers are added.
* Refer ufshcd_setup_links(). * Refer ufshcd_setup_links().
...@@ -8728,7 +8728,7 @@ static const struct scsi_host_template ufshcd_driver_template = { ...@@ -8728,7 +8728,7 @@ static const struct scsi_host_template ufshcd_driver_template = {
.cmd_per_lun = UFSHCD_CMD_PER_LUN, .cmd_per_lun = UFSHCD_CMD_PER_LUN,
.can_queue = UFSHCD_CAN_QUEUE, .can_queue = UFSHCD_CAN_QUEUE,
.max_segment_size = PRDT_DATA_BYTE_COUNT_MAX, .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
.max_sectors = (1 << 20) / SECTOR_SIZE, /* 1 MiB */ .max_sectors = SZ_1M / SECTOR_SIZE,
.max_host_blocked = 1, .max_host_blocked = 1,
.track_queue_depth = 1, .track_queue_depth = 1,
.skip_settle_delay = 1, .skip_settle_delay = 1,
......
...@@ -30,7 +30,7 @@ static struct kmem_cache *ufshpb_mctx_cache; ...@@ -30,7 +30,7 @@ static struct kmem_cache *ufshpb_mctx_cache;
static mempool_t *ufshpb_mctx_pool; static mempool_t *ufshpb_mctx_pool;
static mempool_t *ufshpb_page_pool; static mempool_t *ufshpb_page_pool;
/* A cache size of 2MB can cache ppn in the 1GB range. */ /* A cache size of 2MB can cache ppn in the 1GB range. */
static unsigned int ufshpb_host_map_kbytes = 2048; static unsigned int ufshpb_host_map_kbytes = SZ_2K;
static int tot_active_srgn_pages; static int tot_active_srgn_pages;
static struct workqueue_struct *ufshpb_wq; static struct workqueue_struct *ufshpb_wq;
...@@ -2461,7 +2461,7 @@ static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba) ...@@ -2461,7 +2461,7 @@ static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
init_success = !ufshpb_check_hpb_reset_query(hba); init_success = !ufshpb_check_hpb_reset_query(hba);
pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE; pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * SZ_1K) / PAGE_SIZE;
if (pool_size > tot_active_srgn_pages) { if (pool_size > tot_active_srgn_pages) {
mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages); mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
mempool_resize(ufshpb_page_pool, tot_active_srgn_pages); mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
...@@ -2527,7 +2527,7 @@ static int ufshpb_init_mem_wq(struct ufs_hba *hba) ...@@ -2527,7 +2527,7 @@ static int ufshpb_init_mem_wq(struct ufs_hba *hba)
return -ENOMEM; return -ENOMEM;
} }
pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE; pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * SZ_1K) / PAGE_SIZE;
dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n", dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
__func__, __LINE__, ufshpb_host_map_kbytes, pool_size); __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
/* hpb map & entries macro */ /* hpb map & entries macro */
#define HPB_RGN_SIZE_UNIT 512 #define HPB_RGN_SIZE_UNIT 512
#define HPB_ENTRY_BLOCK_SIZE 4096 #define HPB_ENTRY_BLOCK_SIZE SZ_4K
#define HPB_ENTRY_SIZE 0x8 #define HPB_ENTRY_SIZE 0x8
#define PINNED_NOT_SET U32_MAX #define PINNED_NOT_SET U32_MAX
......
...@@ -1306,7 +1306,7 @@ static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba, ...@@ -1306,7 +1306,7 @@ static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba,
* (ufshcd_async_scan()). Note: this callback may also be called * (ufshcd_async_scan()). Note: this callback may also be called
* from other functions than ufshcd_init(). * from other functions than ufshcd_init().
*/ */
hba->host->max_segment_size = 4096; hba->host->max_segment_size = SZ_4K;
if (ufs->drv_data->pre_hce_enable) { if (ufs->drv_data->pre_hce_enable) {
ret = ufs->drv_data->pre_hce_enable(ufs); ret = ufs->drv_data->pre_hce_enable(ufs);
......
...@@ -335,29 +335,29 @@ static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba) ...@@ -335,29 +335,29 @@ static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
/* PA_TxSkip */ /* PA_TxSkip */
ufshcd_dme_set(hba, UIC_ARG_MIB(0x155c), 0x0); ufshcd_dme_set(hba, UIC_ARG_MIB(0x155c), 0x0);
/*PA_PWRModeUserData0 = 8191, default is 0*/ /*PA_PWRModeUserData0 = 8191, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), 8191); ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), SZ_8K - 1);
/*PA_PWRModeUserData1 = 65535, default is 0*/ /*PA_PWRModeUserData1 = 65535, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), 65535); ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), SZ_64K - 1);
/*PA_PWRModeUserData2 = 32767, default is 0*/ /*PA_PWRModeUserData2 = 32767, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), 32767); ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), SZ_32K - 1);
/*DME_FC0ProtectionTimeOutVal = 8191, default is 0*/ /*DME_FC0ProtectionTimeOutVal = 8191, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), 8191); ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), SZ_8K - 1);
/*DME_TC0ReplayTimeOutVal = 65535, default is 0*/ /*DME_TC0ReplayTimeOutVal = 65535, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), 65535); ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), SZ_64K - 1);
/*DME_AFC0ReqTimeOutVal = 32767, default is 0*/ /*DME_AFC0ReqTimeOutVal = 32767, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), 32767); ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), SZ_32K - 1);
/*PA_PWRModeUserData3 = 8191, default is 0*/ /*PA_PWRModeUserData3 = 8191, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), 8191); ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), SZ_8K - 1);
/*PA_PWRModeUserData4 = 65535, default is 0*/ /*PA_PWRModeUserData4 = 65535, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), 65535); ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), SZ_64K - 1);
/*PA_PWRModeUserData5 = 32767, default is 0*/ /*PA_PWRModeUserData5 = 32767, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), 32767); ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), SZ_32K - 1);
/*DME_FC1ProtectionTimeOutVal = 8191, default is 0*/ /*DME_FC1ProtectionTimeOutVal = 8191, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), 8191); ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), SZ_8K - 1);
/*DME_TC1ReplayTimeOutVal = 65535, default is 0*/ /*DME_TC1ReplayTimeOutVal = 65535, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), 65535); ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), SZ_64K - 1);
/*DME_AFC1ReqTimeOutVal = 32767, default is 0*/ /*DME_AFC1ReqTimeOutVal = 32767, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), 32767); ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), SZ_32K - 1);
} }
static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba, static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
......
...@@ -453,7 +453,7 @@ enum { ...@@ -453,7 +453,7 @@ enum {
}; };
/* The maximum length of the data byte count field in the PRDT is 256KB */ /* The maximum length of the data byte count field in the PRDT is 256KB */
#define PRDT_DATA_BYTE_COUNT_MAX (256 * 1024) #define PRDT_DATA_BYTE_COUNT_MAX SZ_256K
/* The granularity of the data byte count field in the PRDT is 32-bit */ /* The granularity of the data byte count field in the PRDT is 32-bit */
#define PRDT_DATA_BYTE_COUNT_PAD 4 #define PRDT_DATA_BYTE_COUNT_PAD 4
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment