Commit 7b28afe0 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'for-3.0-important' of git://git.drbd.org/linux-2.6-drbd into for-linus

parents 726e99ab 86e1e98e
...@@ -79,7 +79,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev, ...@@ -79,7 +79,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
md_io.error = 0; md_io.error = 0;
if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags)) if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
rw |= REQ_FUA; rw |= REQ_FUA | REQ_FLUSH;
rw |= REQ_SYNC; rw |= REQ_SYNC;
bio = bio_alloc(GFP_NOIO, 1); bio = bio_alloc(GFP_NOIO, 1);
......
...@@ -112,9 +112,6 @@ struct drbd_bitmap { ...@@ -112,9 +112,6 @@ struct drbd_bitmap {
struct task_struct *bm_task; struct task_struct *bm_task;
}; };
static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
unsigned long e, int val, const enum km_type km);
#define bm_print_lock_info(m) __bm_print_lock_info(m, __func__) #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func) static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
{ {
...@@ -994,6 +991,9 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must ...@@ -994,6 +991,9 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
bio_endio(bio, -EIO); bio_endio(bio, -EIO);
} else { } else {
submit_bio(rw, bio); submit_bio(rw, bio);
/* this should not count as user activity and cause the
* resync to throttle -- see drbd_rs_should_slow_down(). */
atomic_add(len >> 9, &mdev->rs_sect_ev);
} }
} }
...@@ -1256,7 +1256,7 @@ unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_f ...@@ -1256,7 +1256,7 @@ unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_f
* expected to be called for only a few bits (e - s about BITS_PER_LONG). * expected to be called for only a few bits (e - s about BITS_PER_LONG).
* Must hold bitmap lock already. */ * Must hold bitmap lock already. */
static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
unsigned long e, int val, const enum km_type km) unsigned long e, int val)
{ {
struct drbd_bitmap *b = mdev->bitmap; struct drbd_bitmap *b = mdev->bitmap;
unsigned long *p_addr = NULL; unsigned long *p_addr = NULL;
...@@ -1274,14 +1274,14 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, ...@@ -1274,14 +1274,14 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
unsigned int page_nr = bm_bit_to_page_idx(b, bitnr); unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
if (page_nr != last_page_nr) { if (page_nr != last_page_nr) {
if (p_addr) if (p_addr)
__bm_unmap(p_addr, km); __bm_unmap(p_addr, KM_IRQ1);
if (c < 0) if (c < 0)
bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
else if (c > 0) else if (c > 0)
bm_set_page_need_writeout(b->bm_pages[last_page_nr]); bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
changed_total += c; changed_total += c;
c = 0; c = 0;
p_addr = __bm_map_pidx(b, page_nr, km); p_addr = __bm_map_pidx(b, page_nr, KM_IRQ1);
last_page_nr = page_nr; last_page_nr = page_nr;
} }
if (val) if (val)
...@@ -1290,7 +1290,7 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, ...@@ -1290,7 +1290,7 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr)); c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
} }
if (p_addr) if (p_addr)
__bm_unmap(p_addr, km); __bm_unmap(p_addr, KM_IRQ1);
if (c < 0) if (c < 0)
bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
else if (c > 0) else if (c > 0)
...@@ -1318,7 +1318,7 @@ static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, ...@@ -1318,7 +1318,7 @@ static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags) if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
bm_print_lock_info(mdev); bm_print_lock_info(mdev);
c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1); c = __bm_change_bits_to(mdev, s, e, val);
spin_unlock_irqrestore(&b->bm_lock, flags); spin_unlock_irqrestore(&b->bm_lock, flags);
return c; return c;
...@@ -1343,16 +1343,17 @@ static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b, ...@@ -1343,16 +1343,17 @@ static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
{ {
int i; int i;
int bits; int bits;
unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_USER0); unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_IRQ1);
for (i = first_word; i < last_word; i++) { for (i = first_word; i < last_word; i++) {
bits = hweight_long(paddr[i]); bits = hweight_long(paddr[i]);
paddr[i] = ~0UL; paddr[i] = ~0UL;
b->bm_set += BITS_PER_LONG - bits; b->bm_set += BITS_PER_LONG - bits;
} }
kunmap_atomic(paddr, KM_USER0); kunmap_atomic(paddr, KM_IRQ1);
} }
/* Same thing as drbd_bm_set_bits, but without taking the spin_lock_irqsave. /* Same thing as drbd_bm_set_bits,
* but more efficient for a large bit range.
* You must first drbd_bm_lock(). * You must first drbd_bm_lock().
* Can be called to set the whole bitmap in one go. * Can be called to set the whole bitmap in one go.
* Sets bits from s to e _inclusive_. */ * Sets bits from s to e _inclusive_. */
...@@ -1366,6 +1367,7 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi ...@@ -1366,6 +1367,7 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
* Do not use memset, because we must account for changes, * Do not use memset, because we must account for changes,
* so we need to loop over the words with hweight() anyways. * so we need to loop over the words with hweight() anyways.
*/ */
struct drbd_bitmap *b = mdev->bitmap;
unsigned long sl = ALIGN(s,BITS_PER_LONG); unsigned long sl = ALIGN(s,BITS_PER_LONG);
unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1); unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
int first_page; int first_page;
...@@ -1376,15 +1378,19 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi ...@@ -1376,15 +1378,19 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
if (e - s <= 3*BITS_PER_LONG) { if (e - s <= 3*BITS_PER_LONG) {
/* don't bother; el and sl may even be wrong. */ /* don't bother; el and sl may even be wrong. */
__bm_change_bits_to(mdev, s, e, 1, KM_USER0); spin_lock_irq(&b->bm_lock);
__bm_change_bits_to(mdev, s, e, 1);
spin_unlock_irq(&b->bm_lock);
return; return;
} }
/* difference is large enough that we can trust sl and el */ /* difference is large enough that we can trust sl and el */
spin_lock_irq(&b->bm_lock);
/* bits filling the current long */ /* bits filling the current long */
if (sl) if (sl)
__bm_change_bits_to(mdev, s, sl-1, 1, KM_USER0); __bm_change_bits_to(mdev, s, sl-1, 1);
first_page = sl >> (3 + PAGE_SHIFT); first_page = sl >> (3 + PAGE_SHIFT);
last_page = el >> (3 + PAGE_SHIFT); last_page = el >> (3 + PAGE_SHIFT);
...@@ -1397,8 +1403,10 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi ...@@ -1397,8 +1403,10 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
/* first and full pages, unless first page == last page */ /* first and full pages, unless first page == last page */
for (page_nr = first_page; page_nr < last_page; page_nr++) { for (page_nr = first_page; page_nr < last_page; page_nr++) {
bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word); bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word);
spin_unlock_irq(&b->bm_lock);
cond_resched(); cond_resched();
first_word = 0; first_word = 0;
spin_lock_irq(&b->bm_lock);
} }
/* last page (respectively only page, for first page == last page) */ /* last page (respectively only page, for first page == last page) */
...@@ -1411,7 +1419,8 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi ...@@ -1411,7 +1419,8 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
* it would trigger an assert in __bm_change_bits_to() * it would trigger an assert in __bm_change_bits_to()
*/ */
if (el <= e) if (el <= e)
__bm_change_bits_to(mdev, el, e, 1, KM_USER0); __bm_change_bits_to(mdev, el, e, 1);
spin_unlock_irq(&b->bm_lock);
} }
/* returns bit state /* returns bit state
......
...@@ -4602,6 +4602,11 @@ int drbd_asender(struct drbd_thread *thi) ...@@ -4602,6 +4602,11 @@ int drbd_asender(struct drbd_thread *thi)
dev_err(DEV, "meta connection shut down by peer.\n"); dev_err(DEV, "meta connection shut down by peer.\n");
goto reconnect; goto reconnect;
} else if (rv == -EAGAIN) { } else if (rv == -EAGAIN) {
/* If the data socket received something meanwhile,
* that is good enough: peer is still alive. */
if (time_after(mdev->last_received,
jiffies - mdev->meta.socket->sk->sk_rcvtimeo))
continue;
if (ping_timeout_active) { if (ping_timeout_active) {
dev_err(DEV, "PingAck did not arrive in time.\n"); dev_err(DEV, "PingAck did not arrive in time.\n");
goto reconnect; goto reconnect;
...@@ -4637,6 +4642,7 @@ int drbd_asender(struct drbd_thread *thi) ...@@ -4637,6 +4642,7 @@ int drbd_asender(struct drbd_thread *thi)
goto reconnect; goto reconnect;
} }
if (received == expect) { if (received == expect) {
mdev->last_received = jiffies;
D_ASSERT(cmd != NULL); D_ASSERT(cmd != NULL);
if (!cmd->process(mdev, h)) if (!cmd->process(mdev, h))
goto reconnect; goto reconnect;
......
...@@ -536,12 +536,7 @@ static int w_make_resync_request(struct drbd_conf *mdev, ...@@ -536,12 +536,7 @@ static int w_make_resync_request(struct drbd_conf *mdev,
return 1; return 1;
} }
/* starting with drbd 8.3.8, we can handle multi-bio EEs, max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
* if it should be necessary */
max_bio_size =
mdev->agreed_pro_version < 94 ? queue_max_hw_sectors(mdev->rq_queue) << 9 :
mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_BIO_SIZE;
number = drbd_rs_number_requests(mdev); number = drbd_rs_number_requests(mdev);
if (number == 0) if (number == 0)
goto requeue; goto requeue;
......
...@@ -117,10 +117,10 @@ ...@@ -117,10 +117,10 @@
/* drbdsetup XY resize -d Z /* drbdsetup XY resize -d Z
* you are free to reduce the device size to nothing, if you want to. * you are free to reduce the device size to nothing, if you want to.
* the upper limit with 64bit kernel, enough ram and flexible meta data * the upper limit with 64bit kernel, enough ram and flexible meta data
* is 16 TB, currently. */ * is 1 PiB, currently. */
/* DRBD_MAX_SECTORS */ /* DRBD_MAX_SECTORS */
#define DRBD_DISK_SIZE_SECT_MIN 0 #define DRBD_DISK_SIZE_SECT_MIN 0
#define DRBD_DISK_SIZE_SECT_MAX (16 * (2LLU << 30)) #define DRBD_DISK_SIZE_SECT_MAX (1 * (2LLU << 40))
#define DRBD_DISK_SIZE_SECT_DEF 0 /* = disabled = no user size... */ #define DRBD_DISK_SIZE_SECT_DEF 0 /* = disabled = no user size... */
#define DRBD_ON_IO_ERROR_DEF EP_PASS_ON #define DRBD_ON_IO_ERROR_DEF EP_PASS_ON
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment