Commit 22391ac3 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'md-next' of https://github.com/liu-song-6/linux into for-5.2/block

Pull MD changes from Song.

* 'md-next' of https://github.com/liu-song-6/linux:
  md: add __acquires/__releases annotations to handle_active_stripes
  md: add __acquires/__releases annotations to (un)lock_two_stripes
  md: mark md_cluster_mod static
  md: use correct type in super_1_sync
  md: use correct type in super_1_load
  md: use correct types in md_bitmap_print_sb
  md: add a missing endianness conversion in check_sb_changes
  md: add mddev->pers to avoid potential NULL pointer dereference
parents 0d413829 efcd487c
...@@ -490,10 +490,10 @@ void md_bitmap_print_sb(struct bitmap *bitmap) ...@@ -490,10 +490,10 @@ void md_bitmap_print_sb(struct bitmap *bitmap)
pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic)); pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
pr_debug(" version: %d\n", le32_to_cpu(sb->version)); pr_debug(" version: %d\n", le32_to_cpu(sb->version));
pr_debug(" uuid: %08x.%08x.%08x.%08x\n", pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
le32_to_cpu(*(__u32 *)(sb->uuid+0)), le32_to_cpu(*(__le32 *)(sb->uuid+0)),
le32_to_cpu(*(__u32 *)(sb->uuid+4)), le32_to_cpu(*(__le32 *)(sb->uuid+4)),
le32_to_cpu(*(__u32 *)(sb->uuid+8)), le32_to_cpu(*(__le32 *)(sb->uuid+8)),
le32_to_cpu(*(__u32 *)(sb->uuid+12))); le32_to_cpu(*(__le32 *)(sb->uuid+12)));
pr_debug(" events: %llu\n", pr_debug(" events: %llu\n",
(unsigned long long) le64_to_cpu(sb->events)); (unsigned long long) le64_to_cpu(sb->events));
pr_debug("events cleared: %llu\n", pr_debug("events cleared: %llu\n",
......
...@@ -88,8 +88,7 @@ static struct kobj_type md_ktype; ...@@ -88,8 +88,7 @@ static struct kobj_type md_ktype;
struct md_cluster_operations *md_cluster_ops; struct md_cluster_operations *md_cluster_ops;
EXPORT_SYMBOL(md_cluster_ops); EXPORT_SYMBOL(md_cluster_ops);
struct module *md_cluster_mod; static struct module *md_cluster_mod;
EXPORT_SYMBOL(md_cluster_mod);
static DECLARE_WAIT_QUEUE_HEAD(resync_wait); static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
static struct workqueue_struct *md_wq; static struct workqueue_struct *md_wq;
...@@ -1548,7 +1547,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ ...@@ -1548,7 +1547,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
*/ */
s32 offset; s32 offset;
sector_t bb_sector; sector_t bb_sector;
u64 *bbp; __le64 *bbp;
int i; int i;
int sectors = le16_to_cpu(sb->bblog_size); int sectors = le16_to_cpu(sb->bblog_size);
if (sectors > (PAGE_SIZE / 512)) if (sectors > (PAGE_SIZE / 512))
...@@ -1560,7 +1559,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ ...@@ -1560,7 +1559,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
if (!sync_page_io(rdev, bb_sector, sectors << 9, if (!sync_page_io(rdev, bb_sector, sectors << 9,
rdev->bb_page, REQ_OP_READ, 0, true)) rdev->bb_page, REQ_OP_READ, 0, true))
return -EIO; return -EIO;
bbp = (u64 *)page_address(rdev->bb_page); bbp = (__le64 *)page_address(rdev->bb_page);
rdev->badblocks.shift = sb->bblog_shift; rdev->badblocks.shift = sb->bblog_shift;
for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) { for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
u64 bb = le64_to_cpu(*bbp); u64 bb = le64_to_cpu(*bbp);
...@@ -1872,7 +1871,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) ...@@ -1872,7 +1871,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
md_error(mddev, rdev); md_error(mddev, rdev);
else { else {
struct badblocks *bb = &rdev->badblocks; struct badblocks *bb = &rdev->badblocks;
u64 *bbp = (u64 *)page_address(rdev->bb_page); __le64 *bbp = (__le64 *)page_address(rdev->bb_page);
u64 *p = bb->page; u64 *p = bb->page;
sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS); sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
if (bb->changed) { if (bb->changed) {
...@@ -2850,7 +2849,9 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) ...@@ -2850,7 +2849,9 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
err = 0; err = 0;
} }
} else if (cmd_match(buf, "re-add")) { } else if (cmd_match(buf, "re-add")) {
if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) && if (!rdev->mddev->pers)
err = -EINVAL;
else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
rdev->saved_raid_disk >= 0) { rdev->saved_raid_disk >= 0) {
/* clear_bit is performed _after_ all the devices /* clear_bit is performed _after_ all the devices
* have their local Faulty bit cleared. If any writes * have their local Faulty bit cleared. If any writes
...@@ -9225,7 +9226,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) ...@@ -9225,7 +9226,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
* reshape is happening in the remote node, we need to * reshape is happening in the remote node, we need to
* update reshape_position and call start_reshape. * update reshape_position and call start_reshape.
*/ */
mddev->reshape_position = sb->reshape_position; mddev->reshape_position = le64_to_cpu(sb->reshape_position);
if (mddev->pers->update_reshape_pos) if (mddev->pers->update_reshape_pos)
mddev->pers->update_reshape_pos(mddev); mddev->pers->update_reshape_pos(mddev);
if (mddev->pers->start_reshape) if (mddev->pers->start_reshape)
......
...@@ -711,6 +711,8 @@ static bool is_full_stripe_write(struct stripe_head *sh) ...@@ -711,6 +711,8 @@ static bool is_full_stripe_write(struct stripe_head *sh)
} }
static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
__acquires(&sh1->stripe_lock)
__acquires(&sh2->stripe_lock)
{ {
if (sh1 > sh2) { if (sh1 > sh2) {
spin_lock_irq(&sh2->stripe_lock); spin_lock_irq(&sh2->stripe_lock);
...@@ -722,6 +724,8 @@ static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) ...@@ -722,6 +724,8 @@ static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
} }
static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
__releases(&sh1->stripe_lock)
__releases(&sh2->stripe_lock)
{ {
spin_unlock(&sh1->stripe_lock); spin_unlock(&sh1->stripe_lock);
spin_unlock_irq(&sh2->stripe_lock); spin_unlock_irq(&sh2->stripe_lock);
...@@ -6155,6 +6159,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio, ...@@ -6155,6 +6159,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio,
static int handle_active_stripes(struct r5conf *conf, int group, static int handle_active_stripes(struct r5conf *conf, int group,
struct r5worker *worker, struct r5worker *worker,
struct list_head *temp_inactive_list) struct list_head *temp_inactive_list)
__releases(&conf->device_lock)
__acquires(&conf->device_lock)
{ {
struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
int i, batch_size = 0, hash; int i, batch_size = 0, hash;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment