Commit 3a2c1f68 authored by Avri Altman's avatar Avri Altman Committed by Martin K. Petersen

scsi: ufs: ufshpb: Add host control mode support to rsp_upiu

In device control mode, the device may recommend the host to either
activate or inactivate a region, and the host should follow. Meaning those
are not actually recommendations, but more of instructions.

Conversely, in host control mode, the recommendation protocol is slightly
changed:

 a) The device may only recommend the host to update a subregion of an
    already-active region. And,

 b) The device may *not* recommend to inactivate a region.

Furthermore, in host control mode, the host may choose not to follow any of
the device's recommendations. However, in case of a recommendation to
update an active and clean subregion, it is better to follow those
recommendation because otherwise the host has no other way to know that
some internal relocation took place.

Link: https://lore.kernel.org/r/20210712095039.8093-3-avri.altman@wdc.comReviewed-by: default avatarDaejun Park <daejun7.park@samsung.com>
Signed-off-by: default avatarAvri Altman <avri.altman@wdc.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 119ee38c
...@@ -167,6 +167,8 @@ static void ufshpb_set_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx, ...@@ -167,6 +167,8 @@ static void ufshpb_set_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
else else
set_bit_len = cnt; set_bit_len = cnt;
set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
if (rgn->rgn_state != HPB_RGN_INACTIVE && if (rgn->rgn_state != HPB_RGN_INACTIVE &&
srgn->srgn_state == HPB_SRGN_VALID) srgn->srgn_state == HPB_SRGN_VALID)
bitmap_set(srgn->mctx->ppn_dirty, srgn_offset, set_bit_len); bitmap_set(srgn->mctx->ppn_dirty, srgn_offset, set_bit_len);
...@@ -236,6 +238,11 @@ static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx, ...@@ -236,6 +238,11 @@ static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
return false; return false;
} }
static inline bool is_rgn_dirty(struct ufshpb_region *rgn)
{
return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
}
static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb, static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb,
struct ufshpb_map_ctx *mctx, int pos, struct ufshpb_map_ctx *mctx, int pos,
int len, __be64 *ppn_buf) int len, __be64 *ppn_buf)
...@@ -713,6 +720,7 @@ static void ufshpb_put_map_req(struct ufshpb_lu *hpb, ...@@ -713,6 +720,7 @@ static void ufshpb_put_map_req(struct ufshpb_lu *hpb,
static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb, static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
struct ufshpb_subregion *srgn) struct ufshpb_subregion *srgn)
{ {
struct ufshpb_region *rgn;
u32 num_entries = hpb->entries_per_srgn; u32 num_entries = hpb->entries_per_srgn;
if (!srgn->mctx) { if (!srgn->mctx) {
...@@ -726,6 +734,10 @@ static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb, ...@@ -726,6 +734,10 @@ static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
num_entries = hpb->last_srgn_entries; num_entries = hpb->last_srgn_entries;
bitmap_zero(srgn->mctx->ppn_dirty, num_entries); bitmap_zero(srgn->mctx->ppn_dirty, num_entries);
rgn = hpb->rgn_tbl + srgn->rgn_idx;
clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
return 0; return 0;
} }
...@@ -1238,6 +1250,18 @@ static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb, ...@@ -1238,6 +1250,18 @@ static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
srgn_i = srgn_i =
be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn); be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
rgn = hpb->rgn_tbl + rgn_i;
if (hpb->is_hcm &&
(rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) {
/*
* in host control mode, subregion activation
* recommendations are only allowed to active regions.
* Also, ignore recommendations for dirty regions - the
* host will make decisions concerning those by himself
*/
continue;
}
dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
"activate(%d) region %d - %d\n", i, rgn_i, srgn_i); "activate(%d) region %d - %d\n", i, rgn_i, srgn_i);
...@@ -1245,7 +1269,6 @@ static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb, ...@@ -1245,7 +1269,6 @@ static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
ufshpb_update_active_info(hpb, rgn_i, srgn_i); ufshpb_update_active_info(hpb, rgn_i, srgn_i);
spin_unlock(&hpb->rsp_list_lock); spin_unlock(&hpb->rsp_list_lock);
rgn = hpb->rgn_tbl + rgn_i;
srgn = rgn->srgn_tbl + srgn_i; srgn = rgn->srgn_tbl + srgn_i;
/* blocking HPB_READ */ /* blocking HPB_READ */
...@@ -1256,6 +1279,14 @@ static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb, ...@@ -1256,6 +1279,14 @@ static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
hpb->stats.rb_active_cnt++; hpb->stats.rb_active_cnt++;
} }
if (hpb->is_hcm) {
/*
* in host control mode the device is not allowed to inactivate
* regions
*/
goto out;
}
for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) { for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]); rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
...@@ -1280,6 +1311,7 @@ static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb, ...@@ -1280,6 +1311,7 @@ static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
hpb->stats.rb_inactive_cnt++; hpb->stats.rb_inactive_cnt++;
} }
out:
dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n", dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n",
rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt); rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
......
...@@ -121,6 +121,8 @@ struct ufshpb_region { ...@@ -121,6 +121,8 @@ struct ufshpb_region {
/* below information is used by lru */ /* below information is used by lru */
struct list_head list_lru_rgn; struct list_head list_lru_rgn;
unsigned long rgn_flags;
#define RGN_FLAG_DIRTY 0
}; };
#define for_each_sub_region(rgn, i, srgn) \ #define for_each_sub_region(rgn, i, srgn) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment