Commit c00a93cb authored by Roger Pau Monne's avatar Roger Pau Monne Committed by Jiri Slaby

xen-blkfront: restore the non-persistent data path

commit bfe11d6d upstream.

When persistent grants were added they were always used, even if the
backend doesn't have this feature (there's no harm in always using the
same set of pages). This restores the old data path when the backend
doesn't have persistent grants, removing the burden of doing a memcpy
when it is not actually needed.
Signed-off-by: default avatarRoger Pau Monné <roger.pau@citrix.com>
Reported-by: default avatarFelipe Franciosi <felipe.franciosi@citrix.com>
Cc: Felipe Franciosi <felipe.franciosi@citrix.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
[v2: Fix up whitespace issues]
Signed-off-by: default avatarJiri Slaby <jslaby@suse.cz>
parent 30ea6725
...@@ -121,7 +121,8 @@ struct blkfront_info ...@@ -121,7 +121,8 @@ struct blkfront_info
struct work_struct work; struct work_struct work;
struct gnttab_free_callback callback; struct gnttab_free_callback callback;
struct blk_shadow shadow[BLK_RING_SIZE]; struct blk_shadow shadow[BLK_RING_SIZE];
struct list_head persistent_gnts; struct list_head grants;
struct list_head indirect_pages;
unsigned int persistent_gnts_c; unsigned int persistent_gnts_c;
unsigned long shadow_free; unsigned long shadow_free;
unsigned int feature_flush; unsigned int feature_flush;
...@@ -200,15 +201,17 @@ static int fill_grant_buffer(struct blkfront_info *info, int num) ...@@ -200,15 +201,17 @@ static int fill_grant_buffer(struct blkfront_info *info, int num)
if (!gnt_list_entry) if (!gnt_list_entry)
goto out_of_memory; goto out_of_memory;
granted_page = alloc_page(GFP_NOIO); if (info->feature_persistent) {
if (!granted_page) { granted_page = alloc_page(GFP_NOIO);
kfree(gnt_list_entry); if (!granted_page) {
goto out_of_memory; kfree(gnt_list_entry);
goto out_of_memory;
}
gnt_list_entry->pfn = page_to_pfn(granted_page);
} }
gnt_list_entry->pfn = page_to_pfn(granted_page);
gnt_list_entry->gref = GRANT_INVALID_REF; gnt_list_entry->gref = GRANT_INVALID_REF;
list_add(&gnt_list_entry->node, &info->persistent_gnts); list_add(&gnt_list_entry->node, &info->grants);
i++; i++;
} }
...@@ -216,9 +219,10 @@ static int fill_grant_buffer(struct blkfront_info *info, int num) ...@@ -216,9 +219,10 @@ static int fill_grant_buffer(struct blkfront_info *info, int num)
out_of_memory: out_of_memory:
list_for_each_entry_safe(gnt_list_entry, n, list_for_each_entry_safe(gnt_list_entry, n,
&info->persistent_gnts, node) { &info->grants, node) {
list_del(&gnt_list_entry->node); list_del(&gnt_list_entry->node);
__free_page(pfn_to_page(gnt_list_entry->pfn)); if (info->feature_persistent)
__free_page(pfn_to_page(gnt_list_entry->pfn));
kfree(gnt_list_entry); kfree(gnt_list_entry);
i--; i--;
} }
...@@ -227,13 +231,14 @@ static int fill_grant_buffer(struct blkfront_info *info, int num) ...@@ -227,13 +231,14 @@ static int fill_grant_buffer(struct blkfront_info *info, int num)
} }
static struct grant *get_grant(grant_ref_t *gref_head, static struct grant *get_grant(grant_ref_t *gref_head,
unsigned long pfn,
struct blkfront_info *info) struct blkfront_info *info)
{ {
struct grant *gnt_list_entry; struct grant *gnt_list_entry;
unsigned long buffer_mfn; unsigned long buffer_mfn;
BUG_ON(list_empty(&info->persistent_gnts)); BUG_ON(list_empty(&info->grants));
gnt_list_entry = list_first_entry(&info->persistent_gnts, struct grant, gnt_list_entry = list_first_entry(&info->grants, struct grant,
node); node);
list_del(&gnt_list_entry->node); list_del(&gnt_list_entry->node);
...@@ -245,6 +250,10 @@ static struct grant *get_grant(grant_ref_t *gref_head, ...@@ -245,6 +250,10 @@ static struct grant *get_grant(grant_ref_t *gref_head,
/* Assign a gref to this page */ /* Assign a gref to this page */
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
BUG_ON(gnt_list_entry->gref == -ENOSPC); BUG_ON(gnt_list_entry->gref == -ENOSPC);
if (!info->feature_persistent) {
BUG_ON(!pfn);
gnt_list_entry->pfn = pfn;
}
buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
gnttab_grant_foreign_access_ref(gnt_list_entry->gref, gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
info->xbdev->otherend_id, info->xbdev->otherend_id,
...@@ -477,22 +486,34 @@ static int blkif_queue_request(struct request *req) ...@@ -477,22 +486,34 @@ static int blkif_queue_request(struct request *req)
if ((ring_req->operation == BLKIF_OP_INDIRECT) && if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
(i % SEGS_PER_INDIRECT_FRAME == 0)) { (i % SEGS_PER_INDIRECT_FRAME == 0)) {
unsigned long pfn;
if (segments) if (segments)
kunmap_atomic(segments); kunmap_atomic(segments);
n = i / SEGS_PER_INDIRECT_FRAME; n = i / SEGS_PER_INDIRECT_FRAME;
gnt_list_entry = get_grant(&gref_head, info); if (!info->feature_persistent) {
struct page *indirect_page;
/* Fetch a pre-allocated page to use for indirect grefs */
BUG_ON(list_empty(&info->indirect_pages));
indirect_page = list_first_entry(&info->indirect_pages,
struct page, lru);
list_del(&indirect_page->lru);
pfn = page_to_pfn(indirect_page);
}
gnt_list_entry = get_grant(&gref_head, pfn, info);
info->shadow[id].indirect_grants[n] = gnt_list_entry; info->shadow[id].indirect_grants[n] = gnt_list_entry;
segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn)); segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref; ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
} }
gnt_list_entry = get_grant(&gref_head, info); gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info);
ref = gnt_list_entry->gref; ref = gnt_list_entry->gref;
info->shadow[id].grants_used[i] = gnt_list_entry; info->shadow[id].grants_used[i] = gnt_list_entry;
if (rq_data_dir(req)) { if (rq_data_dir(req) && info->feature_persistent) {
char *bvec_data; char *bvec_data;
void *shared_data; void *shared_data;
...@@ -904,21 +925,36 @@ static void blkif_free(struct blkfront_info *info, int suspend) ...@@ -904,21 +925,36 @@ static void blkif_free(struct blkfront_info *info, int suspend)
blk_stop_queue(info->rq); blk_stop_queue(info->rq);
/* Remove all persistent grants */ /* Remove all persistent grants */
if (!list_empty(&info->persistent_gnts)) { if (!list_empty(&info->grants)) {
list_for_each_entry_safe(persistent_gnt, n, list_for_each_entry_safe(persistent_gnt, n,
&info->persistent_gnts, node) { &info->grants, node) {
list_del(&persistent_gnt->node); list_del(&persistent_gnt->node);
if (persistent_gnt->gref != GRANT_INVALID_REF) { if (persistent_gnt->gref != GRANT_INVALID_REF) {
gnttab_end_foreign_access(persistent_gnt->gref, gnttab_end_foreign_access(persistent_gnt->gref,
0, 0UL); 0, 0UL);
info->persistent_gnts_c--; info->persistent_gnts_c--;
} }
__free_page(pfn_to_page(persistent_gnt->pfn)); if (info->feature_persistent)
__free_page(pfn_to_page(persistent_gnt->pfn));
kfree(persistent_gnt); kfree(persistent_gnt);
} }
} }
BUG_ON(info->persistent_gnts_c != 0); BUG_ON(info->persistent_gnts_c != 0);
/*
* Remove indirect pages, this only happens when using indirect
* descriptors but not persistent grants
*/
if (!list_empty(&info->indirect_pages)) {
struct page *indirect_page, *n;
BUG_ON(info->feature_persistent);
list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) {
list_del(&indirect_page->lru);
__free_page(indirect_page);
}
}
for (i = 0; i < BLK_RING_SIZE; i++) { for (i = 0; i < BLK_RING_SIZE; i++) {
/* /*
* Clear persistent grants present in requests already * Clear persistent grants present in requests already
...@@ -933,7 +969,8 @@ static void blkif_free(struct blkfront_info *info, int suspend) ...@@ -933,7 +969,8 @@ static void blkif_free(struct blkfront_info *info, int suspend)
for (j = 0; j < segs; j++) { for (j = 0; j < segs; j++) {
persistent_gnt = info->shadow[i].grants_used[j]; persistent_gnt = info->shadow[i].grants_used[j];
gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
__free_page(pfn_to_page(persistent_gnt->pfn)); if (info->feature_persistent)
__free_page(pfn_to_page(persistent_gnt->pfn));
kfree(persistent_gnt); kfree(persistent_gnt);
} }
...@@ -992,7 +1029,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, ...@@ -992,7 +1029,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
nseg = s->req.operation == BLKIF_OP_INDIRECT ? nseg = s->req.operation == BLKIF_OP_INDIRECT ?
s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments; s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
if (bret->operation == BLKIF_OP_READ) { if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
/* /*
* Copy the data received from the backend into the bvec. * Copy the data received from the backend into the bvec.
* Since bv_offset can be different than 0, and bv_len different * Since bv_offset can be different than 0, and bv_len different
...@@ -1020,7 +1057,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, ...@@ -1020,7 +1057,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
* we add it at the head of the list, so it will be * we add it at the head of the list, so it will be
* reused first. * reused first.
*/ */
list_add(&s->grants_used[i]->node, &info->persistent_gnts); if (!info->feature_persistent)
pr_alert_ratelimited("backed has not unmapped grant: %u\n",
s->grants_used[i]->gref);
list_add(&s->grants_used[i]->node, &info->grants);
info->persistent_gnts_c++; info->persistent_gnts_c++;
} else { } else {
/* /*
...@@ -1031,19 +1071,29 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, ...@@ -1031,19 +1071,29 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
*/ */
gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL); gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
s->grants_used[i]->gref = GRANT_INVALID_REF; s->grants_used[i]->gref = GRANT_INVALID_REF;
list_add_tail(&s->grants_used[i]->node, &info->persistent_gnts); list_add_tail(&s->grants_used[i]->node, &info->grants);
} }
} }
if (s->req.operation == BLKIF_OP_INDIRECT) { if (s->req.operation == BLKIF_OP_INDIRECT) {
for (i = 0; i < INDIRECT_GREFS(nseg); i++) { for (i = 0; i < INDIRECT_GREFS(nseg); i++) {
if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) { if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
list_add(&s->indirect_grants[i]->node, &info->persistent_gnts); if (!info->feature_persistent)
pr_alert_ratelimited("backed has not unmapped grant: %u\n",
s->indirect_grants[i]->gref);
list_add(&s->indirect_grants[i]->node, &info->grants);
info->persistent_gnts_c++; info->persistent_gnts_c++;
} else { } else {
struct page *indirect_page;
gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL); gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
/*
* Add the used indirect page back to the list of
* available pages for indirect grefs.
*/
indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
list_add(&indirect_page->lru, &info->indirect_pages);
s->indirect_grants[i]->gref = GRANT_INVALID_REF; s->indirect_grants[i]->gref = GRANT_INVALID_REF;
list_add_tail(&s->indirect_grants[i]->node, list_add_tail(&s->indirect_grants[i]->node, &info->grants);
&info->persistent_gnts);
} }
} }
} }
...@@ -1338,7 +1388,8 @@ static int blkfront_probe(struct xenbus_device *dev, ...@@ -1338,7 +1388,8 @@ static int blkfront_probe(struct xenbus_device *dev,
spin_lock_init(&info->io_lock); spin_lock_init(&info->io_lock);
info->xbdev = dev; info->xbdev = dev;
info->vdevice = vdevice; info->vdevice = vdevice;
INIT_LIST_HEAD(&info->persistent_gnts); INIT_LIST_HEAD(&info->grants);
INIT_LIST_HEAD(&info->indirect_pages);
info->persistent_gnts_c = 0; info->persistent_gnts_c = 0;
info->connected = BLKIF_STATE_DISCONNECTED; info->connected = BLKIF_STATE_DISCONNECTED;
INIT_WORK(&info->work, blkif_restart_queue); INIT_WORK(&info->work, blkif_restart_queue);
...@@ -1685,6 +1736,23 @@ static int blkfront_setup_indirect(struct blkfront_info *info) ...@@ -1685,6 +1736,23 @@ static int blkfront_setup_indirect(struct blkfront_info *info)
if (err) if (err)
goto out_of_memory; goto out_of_memory;
if (!info->feature_persistent && info->max_indirect_segments) {
/*
* We are using indirect descriptors but not persistent
* grants, we need to allocate a set of pages that can be
* used for mapping indirect grefs
*/
int num = INDIRECT_GREFS(segs) * BLK_RING_SIZE;
BUG_ON(!list_empty(&info->indirect_pages));
for (i = 0; i < num; i++) {
struct page *indirect_page = alloc_page(GFP_NOIO);
if (!indirect_page)
goto out_of_memory;
list_add(&indirect_page->lru, &info->indirect_pages);
}
}
for (i = 0; i < BLK_RING_SIZE; i++) { for (i = 0; i < BLK_RING_SIZE; i++) {
info->shadow[i].grants_used = kzalloc( info->shadow[i].grants_used = kzalloc(
sizeof(info->shadow[i].grants_used[0]) * segs, sizeof(info->shadow[i].grants_used[0]) * segs,
...@@ -1715,6 +1783,13 @@ static int blkfront_setup_indirect(struct blkfront_info *info) ...@@ -1715,6 +1783,13 @@ static int blkfront_setup_indirect(struct blkfront_info *info)
kfree(info->shadow[i].indirect_grants); kfree(info->shadow[i].indirect_grants);
info->shadow[i].indirect_grants = NULL; info->shadow[i].indirect_grants = NULL;
} }
if (!list_empty(&info->indirect_pages)) {
struct page *indirect_page, *n;
list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) {
list_del(&indirect_page->lru);
__free_page(indirect_page);
}
}
return -ENOMEM; return -ENOMEM;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment