Commit 361aba56 authored by Boaz Harrosh's avatar Boaz Harrosh

ore: fix BUG_ON, too few sgs when reading

When reading RAID5 files, in rare cases, we calculated too
few sg segments. There should be two extra for the beginning
and end partial units.

Also "too few sg segments" should not be a BUG_ON there is
all the mechanics in place to handle it, as a short read.
So just return -ENOMEM and the rest of the code will gracefully
split the IO.

[Bug in 3.2.0 Kernel]
CC: Stable Tree <stable@kernel.org>
Signed-off-by: default avatarBoaz Harrosh <bharrosh@panasas.com>
parent ffefb8ea
...@@ -266,7 +266,7 @@ int ore_get_rw_state(struct ore_layout *layout, struct ore_components *oc, ...@@ -266,7 +266,7 @@ int ore_get_rw_state(struct ore_layout *layout, struct ore_components *oc,
/* first/last seg is split */ /* first/last seg is split */
num_raid_units += layout->group_width; num_raid_units += layout->group_width;
sgs_per_dev = div_u64(num_raid_units, data_devs); sgs_per_dev = div_u64(num_raid_units, data_devs) + 2;
} else { } else {
/* For Writes add parity pages array. */ /* For Writes add parity pages array. */
max_par_pages = num_raid_units * pages_in_unit * max_par_pages = num_raid_units * pages_in_unit *
......
...@@ -551,7 +551,11 @@ int _ore_add_parity_unit(struct ore_io_state *ios, ...@@ -551,7 +551,11 @@ int _ore_add_parity_unit(struct ore_io_state *ios,
unsigned cur_len) unsigned cur_len)
{ {
if (ios->reading) { if (ios->reading) {
BUG_ON(per_dev->cur_sg >= ios->sgs_per_dev); if (per_dev->cur_sg >= ios->sgs_per_dev) {
ORE_DBGMSG("cur_sg(%d) >= sgs_per_dev(%d)\n" ,
per_dev->cur_sg, ios->sgs_per_dev);
return -ENOMEM;
}
_ore_add_sg_seg(per_dev, cur_len, true); _ore_add_sg_seg(per_dev, cur_len, true);
} else { } else {
struct __stripe_pages_2d *sp2d = ios->sp2d; struct __stripe_pages_2d *sp2d = ios->sp2d;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment