Commit d7252d0d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-20190104' of git://git.kernel.dk/linux-block

Pull block updates and fixes from Jens Axboe:

 - Pulled in MD changes that Shaohua had queued up for 4.21.

   Unfortunately we lost Shaohua late 2018, I'm sending these in on his
   behalf.

 - In conjunction with the above, I added a CREDITS entry for Shaoua.

 - sunvdc queue restart fix (Ming)

* tag 'for-linus-20190104' of git://git.kernel.dk/linux-block:
  Add CREDITS entry for Shaohua Li
  block: sunvdc: don't run hw queue synchronously from irq context
  md: fix raid10 hang issue caused by barrier
  raid10: refactor common wait code from regular read/write request
  md: remvoe redundant condition check
  lib/raid6: add option to skip algo benchmarking
  lib/raid6: sort algos in rough performance order
  lib/raid6: check for assembler SSSE3 support
  lib/raid6: avoid __attribute_const__ redefinition
  lib/raid6: add missing include for raid6test
  md: remove set but not used variable 'bi_rdev'
parents 0fe4e2d5 59f75fd0
...@@ -2208,6 +2208,12 @@ N: Christopher Li ...@@ -2208,6 +2208,12 @@ N: Christopher Li
E: sparse@chrisli.org E: sparse@chrisli.org
D: Sparse maintainer 2009 - 2018 D: Sparse maintainer 2009 - 2018
N: Shaohua Li
D: Worked on many parts of the kernel, from core x86, ACPI, PCI, KVM, MM,
D: and much more. He was the maintainer of MD from 2016 to 2018. Shaohua
D: passed away late 2018, he will be greatly missed.
W: https://www.spinics.net/lists/raid/msg61993.html
N: Stephan Linz N: Stephan Linz
E: linz@mazet.de E: linz@mazet.de
E: Stephan.Linz@gmx.de E: Stephan.Linz@gmx.de
......
...@@ -181,7 +181,7 @@ static void vdc_blk_queue_start(struct vdc_port *port) ...@@ -181,7 +181,7 @@ static void vdc_blk_queue_start(struct vdc_port *port)
* allocated a disk. * allocated a disk.
*/ */
if (port->disk && vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50) if (port->disk && vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
blk_mq_start_hw_queues(port->disk->queue); blk_mq_start_stopped_hw_queues(port->disk->queue, true);
} }
static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for) static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
......
...@@ -2147,14 +2147,12 @@ EXPORT_SYMBOL(md_integrity_register); ...@@ -2147,14 +2147,12 @@ EXPORT_SYMBOL(md_integrity_register);
*/ */
int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
{ {
struct blk_integrity *bi_rdev;
struct blk_integrity *bi_mddev; struct blk_integrity *bi_mddev;
char name[BDEVNAME_SIZE]; char name[BDEVNAME_SIZE];
if (!mddev->gendisk) if (!mddev->gendisk)
return 0; return 0;
bi_rdev = bdev_get_integrity(rdev->bdev);
bi_mddev = blk_get_integrity(mddev->gendisk); bi_mddev = blk_get_integrity(mddev->gendisk);
if (!bi_mddev) /* nothing to do */ if (!bi_mddev) /* nothing to do */
...@@ -5693,14 +5691,10 @@ int md_run(struct mddev *mddev) ...@@ -5693,14 +5691,10 @@ int md_run(struct mddev *mddev)
return 0; return 0;
abort: abort:
if (mddev->flush_bio_pool) { mempool_destroy(mddev->flush_bio_pool);
mempool_destroy(mddev->flush_bio_pool); mddev->flush_bio_pool = NULL;
mddev->flush_bio_pool = NULL; mempool_destroy(mddev->flush_pool);
} mddev->flush_pool = NULL;
if (mddev->flush_pool){
mempool_destroy(mddev->flush_pool);
mddev->flush_pool = NULL;
}
return err; return err;
} }
......
...@@ -1124,6 +1124,29 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) ...@@ -1124,6 +1124,29 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
kfree(plug); kfree(plug);
} }
/*
* 1. Register the new request and wait if the reconstruction thread has put
* up a bar for new requests. Continue immediately if no resync is active
* currently.
* 2. If IO spans the reshape position. Need to wait for reshape to pass.
*/
static void regular_request_wait(struct mddev *mddev, struct r10conf *conf,
struct bio *bio, sector_t sectors)
{
wait_barrier(conf);
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
bio->bi_iter.bi_sector < conf->reshape_progress &&
bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
raid10_log(conf->mddev, "wait reshape");
allow_barrier(conf);
wait_event(conf->wait_barrier,
conf->reshape_progress <= bio->bi_iter.bi_sector ||
conf->reshape_progress >= bio->bi_iter.bi_sector +
sectors);
wait_barrier(conf);
}
}
static void raid10_read_request(struct mddev *mddev, struct bio *bio, static void raid10_read_request(struct mddev *mddev, struct bio *bio,
struct r10bio *r10_bio) struct r10bio *r10_bio)
{ {
...@@ -1132,7 +1155,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, ...@@ -1132,7 +1155,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
const int op = bio_op(bio); const int op = bio_op(bio);
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
int max_sectors; int max_sectors;
sector_t sectors;
struct md_rdev *rdev; struct md_rdev *rdev;
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
int slot = r10_bio->read_slot; int slot = r10_bio->read_slot;
...@@ -1166,30 +1188,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, ...@@ -1166,30 +1188,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
} }
rcu_read_unlock(); rcu_read_unlock();
} }
/*
* Register the new request and wait if the reconstruction
* thread has put up a bar for new requests.
* Continue immediately if no resync is active currently.
*/
wait_barrier(conf);
sectors = r10_bio->sectors;
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
bio->bi_iter.bi_sector < conf->reshape_progress &&
bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
/*
* IO spans the reshape position. Need to wait for reshape to
* pass
*/
raid10_log(conf->mddev, "wait reshape");
allow_barrier(conf);
wait_event(conf->wait_barrier,
conf->reshape_progress <= bio->bi_iter.bi_sector ||
conf->reshape_progress >= bio->bi_iter.bi_sector +
sectors);
wait_barrier(conf);
}
regular_request_wait(mddev, conf, bio, r10_bio->sectors);
rdev = read_balance(conf, r10_bio, &max_sectors); rdev = read_balance(conf, r10_bio, &max_sectors);
if (!rdev) { if (!rdev) {
if (err_rdev) { if (err_rdev) {
...@@ -1209,7 +1209,9 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, ...@@ -1209,7 +1209,9 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
struct bio *split = bio_split(bio, max_sectors, struct bio *split = bio_split(bio, max_sectors,
gfp, &conf->bio_split); gfp, &conf->bio_split);
bio_chain(split, bio); bio_chain(split, bio);
allow_barrier(conf);
generic_make_request(bio); generic_make_request(bio);
wait_barrier(conf);
bio = split; bio = split;
r10_bio->master_bio = bio; r10_bio->master_bio = bio;
r10_bio->sectors = max_sectors; r10_bio->sectors = max_sectors;
...@@ -1332,30 +1334,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, ...@@ -1332,30 +1334,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
finish_wait(&conf->wait_barrier, &w); finish_wait(&conf->wait_barrier, &w);
} }
/*
* Register the new request and wait if the reconstruction
* thread has put up a bar for new requests.
* Continue immediately if no resync is active currently.
*/
wait_barrier(conf);
sectors = r10_bio->sectors; sectors = r10_bio->sectors;
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && regular_request_wait(mddev, conf, bio, sectors);
bio->bi_iter.bi_sector < conf->reshape_progress &&
bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
/*
* IO spans the reshape position. Need to wait for reshape to
* pass
*/
raid10_log(conf->mddev, "wait reshape");
allow_barrier(conf);
wait_event(conf->wait_barrier,
conf->reshape_progress <= bio->bi_iter.bi_sector ||
conf->reshape_progress >= bio->bi_iter.bi_sector +
sectors);
wait_barrier(conf);
}
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
(mddev->reshape_backwards (mddev->reshape_backwards
? (bio->bi_iter.bi_sector < conf->reshape_safe && ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
...@@ -1514,7 +1494,9 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, ...@@ -1514,7 +1494,9 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
struct bio *split = bio_split(bio, r10_bio->sectors, struct bio *split = bio_split(bio, r10_bio->sectors,
GFP_NOIO, &conf->bio_split); GFP_NOIO, &conf->bio_split);
bio_chain(split, bio); bio_chain(split, bio);
allow_barrier(conf);
generic_make_request(bio); generic_make_request(bio);
wait_barrier(conf);
bio = split; bio = split;
r10_bio->master_bio = bio; r10_bio->master_bio = bio;
} }
......
...@@ -35,6 +35,7 @@ extern const char raid6_empty_zero_page[PAGE_SIZE]; ...@@ -35,6 +35,7 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
#include <limits.h> #include <limits.h>
#include <stddef.h> #include <stddef.h>
#include <sys/mman.h> #include <sys/mman.h>
#include <sys/time.h>
#include <sys/types.h> #include <sys/types.h>
/* Not standard, but glibc defines it */ /* Not standard, but glibc defines it */
...@@ -52,7 +53,9 @@ extern const char raid6_empty_zero_page[PAGE_SIZE]; ...@@ -52,7 +53,9 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
#define __init #define __init
#define __exit #define __exit
#define __attribute_const__ __attribute__((const)) #ifndef __attribute_const__
# define __attribute_const__ __attribute__((const))
#endif
#define noinline __attribute__((noinline)) #define noinline __attribute__((noinline))
#define preempt_enable() #define preempt_enable()
...@@ -67,6 +70,9 @@ extern const char raid6_empty_zero_page[PAGE_SIZE]; ...@@ -67,6 +70,9 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
#define MODULE_DESCRIPTION(desc) #define MODULE_DESCRIPTION(desc)
#define subsys_initcall(x) #define subsys_initcall(x)
#define module_exit(x) #define module_exit(x)
#define IS_ENABLED(x) (x)
#define CONFIG_RAID6_PQ_BENCHMARK 1
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
/* Routine choices */ /* Routine choices */
......
...@@ -10,6 +10,14 @@ menu "Library routines" ...@@ -10,6 +10,14 @@ menu "Library routines"
config RAID6_PQ config RAID6_PQ
tristate tristate
config RAID6_PQ_BENCHMARK
bool "Automatically choose fastest RAID6 PQ functions"
depends on RAID6_PQ
default y
help
Benchmark all available RAID6 PQ functions on init and choose the
fastest one.
config BITREVERSE config BITREVERSE
tristate tristate
......
...@@ -34,64 +34,64 @@ struct raid6_calls raid6_call; ...@@ -34,64 +34,64 @@ struct raid6_calls raid6_call;
EXPORT_SYMBOL_GPL(raid6_call); EXPORT_SYMBOL_GPL(raid6_call);
const struct raid6_calls * const raid6_algos[] = { const struct raid6_calls * const raid6_algos[] = {
#if defined(__ia64__)
&raid6_intx16,
&raid6_intx32,
#endif
#if defined(__i386__) && !defined(__arch_um__) #if defined(__i386__) && !defined(__arch_um__)
&raid6_mmxx1,
&raid6_mmxx2,
&raid6_sse1x1,
&raid6_sse1x2,
&raid6_sse2x1,
&raid6_sse2x2,
#ifdef CONFIG_AS_AVX2
&raid6_avx2x1,
&raid6_avx2x2,
#endif
#ifdef CONFIG_AS_AVX512 #ifdef CONFIG_AS_AVX512
&raid6_avx512x1,
&raid6_avx512x2, &raid6_avx512x2,
&raid6_avx512x1,
#endif #endif
#endif
#if defined(__x86_64__) && !defined(__arch_um__)
&raid6_sse2x1,
&raid6_sse2x2,
&raid6_sse2x4,
#ifdef CONFIG_AS_AVX2 #ifdef CONFIG_AS_AVX2
&raid6_avx2x1,
&raid6_avx2x2, &raid6_avx2x2,
&raid6_avx2x4, &raid6_avx2x1,
#endif
&raid6_sse2x2,
&raid6_sse2x1,
&raid6_sse1x2,
&raid6_sse1x1,
&raid6_mmxx2,
&raid6_mmxx1,
#endif #endif
#if defined(__x86_64__) && !defined(__arch_um__)
#ifdef CONFIG_AS_AVX512 #ifdef CONFIG_AS_AVX512
&raid6_avx512x1,
&raid6_avx512x2,
&raid6_avx512x4, &raid6_avx512x4,
&raid6_avx512x2,
&raid6_avx512x1,
#endif #endif
#ifdef CONFIG_AS_AVX2
&raid6_avx2x4,
&raid6_avx2x2,
&raid6_avx2x1,
#endif
&raid6_sse2x4,
&raid6_sse2x2,
&raid6_sse2x1,
#endif #endif
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
&raid6_altivec1,
&raid6_altivec2,
&raid6_altivec4,
&raid6_altivec8,
&raid6_vpermxor1,
&raid6_vpermxor2,
&raid6_vpermxor4,
&raid6_vpermxor8, &raid6_vpermxor8,
&raid6_vpermxor4,
&raid6_vpermxor2,
&raid6_vpermxor1,
&raid6_altivec8,
&raid6_altivec4,
&raid6_altivec2,
&raid6_altivec1,
#endif #endif
#if defined(CONFIG_S390) #if defined(CONFIG_S390)
&raid6_s390vx8, &raid6_s390vx8,
#endif #endif
&raid6_intx1,
&raid6_intx2,
&raid6_intx4,
&raid6_intx8,
#ifdef CONFIG_KERNEL_MODE_NEON #ifdef CONFIG_KERNEL_MODE_NEON
&raid6_neonx1,
&raid6_neonx2,
&raid6_neonx4,
&raid6_neonx8, &raid6_neonx8,
&raid6_neonx4,
&raid6_neonx2,
&raid6_neonx1,
#endif #endif
#if defined(__ia64__)
&raid6_intx32,
&raid6_intx16,
#endif
&raid6_intx8,
&raid6_intx4,
&raid6_intx2,
&raid6_intx1,
NULL NULL
}; };
...@@ -163,6 +163,11 @@ static inline const struct raid6_calls *raid6_choose_gen( ...@@ -163,6 +163,11 @@ static inline const struct raid6_calls *raid6_choose_gen(
if ((*algo)->valid && !(*algo)->valid()) if ((*algo)->valid && !(*algo)->valid())
continue; continue;
if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
best = *algo;
break;
}
perf = 0; perf = 0;
preempt_disable(); preempt_disable();
......
...@@ -34,6 +34,9 @@ endif ...@@ -34,6 +34,9 @@ endif
ifeq ($(IS_X86),yes) ifeq ($(IS_X86),yes)
OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o
CFLAGS += $(shell echo "pshufb %xmm0, %xmm0" | \
gcc -c -x assembler - >&/dev/null && \
rm ./-.o && echo -DCONFIG_AS_SSSE3=1)
CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" | \ CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" | \
gcc -c -x assembler - >&/dev/null && \ gcc -c -x assembler - >&/dev/null && \
rm ./-.o && echo -DCONFIG_AS_AVX2=1) rm ./-.o && echo -DCONFIG_AS_AVX2=1)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment