Commit d429a363 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-3.17/drivers' of git://git.kernel.dk/linux-block

Pull block driver changes from Jens Axboe:
 "Nothing out of the ordinary here, this pull request contains:

   - A big round of fixes for bcache from Kent Overstreet, Slava Pestov,
     and Surbhi Palande.  No new features, just a lot of fixes.

   - The usual round of drbd updates from Andreas Gruenbacher, Lars
     Ellenberg, and Philipp Reisner.

   - virtio_blk was converted to blk-mq back in 3.13, but now Ming Lei
     has taken it one step further and added support for actually using
     more than one queue.

   - Addition of an explicit SG_FLAG_Q_AT_HEAD for block/bsg, to
     compliment the the default behavior of adding to the tail of the
     queue.  From Douglas Gilbert"

* 'for-3.17/drivers' of git://git.kernel.dk/linux-block: (86 commits)
  bcache: Drop unneeded blk_sync_queue() calls
  bcache: add mutex lock for bch_is_open
  bcache: Correct printing of btree_gc_max_duration_ms
  bcache: try to set b->parent properly
  bcache: fix memory corruption in init error path
  bcache: fix crash with incomplete cache set
  bcache: Fix more early shutdown bugs
  bcache: fix use-after-free in btree_gc_coalesce()
  bcache: Fix an infinite loop in journal replay
  bcache: fix crash in bcache_btree_node_alloc_fail tracepoint
  bcache: bcache_write tracepoint was crashing
  bcache: fix typo in bch_bkey_equal_header
  bcache: Allocate bounce buffers with GFP_NOWAIT
  bcache: Make sure to pass GFP_WAIT to mempool_alloc()
  bcache: fix uninterruptible sleep in writeback thread
  bcache: wait for buckets when allocating new btree root
  bcache: fix crash on shutdown in passthrough mode
  bcache: fix lockdep warnings on shutdown
  bcache allocator: send discards with correct size
  bcache: Fix to remove the rcu_sched stalls.
  ...
parents 4a319a49 99d54001
...@@ -290,6 +290,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, ...@@ -290,6 +290,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
unsigned long start_time; unsigned long start_time;
ssize_t ret = 0; ssize_t ret = 0;
int writing = 0; int writing = 0;
int at_head = 0;
struct request *rq; struct request *rq;
char sense[SCSI_SENSE_BUFFERSIZE]; char sense[SCSI_SENSE_BUFFERSIZE];
struct bio *bio; struct bio *bio;
...@@ -313,6 +314,8 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, ...@@ -313,6 +314,8 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
case SG_DXFER_FROM_DEV: case SG_DXFER_FROM_DEV:
break; break;
} }
if (hdr->flags & SG_FLAG_Q_AT_HEAD)
at_head = 1;
rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
if (!rq) if (!rq)
...@@ -369,7 +372,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, ...@@ -369,7 +372,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
* (if he doesn't check that is his problem). * (if he doesn't check that is his problem).
* N.B. a non-zero SCSI status is _not_ necessarily an error. * N.B. a non-zero SCSI status is _not_ necessarily an error.
*/ */
blk_execute_rq(q, bd_disk, rq, 0); blk_execute_rq(q, bd_disk, rq, at_head);
hdr->duration = jiffies_to_msecs(jiffies - start_time); hdr->duration = jiffies_to_msecs(jiffies - start_time);
......
...@@ -3,5 +3,6 @@ drbd-y += drbd_worker.o drbd_receiver.o drbd_req.o drbd_actlog.o ...@@ -3,5 +3,6 @@ drbd-y += drbd_worker.o drbd_receiver.o drbd_req.o drbd_actlog.o
drbd-y += drbd_main.o drbd_strings.o drbd_nl.o drbd-y += drbd_main.o drbd_strings.o drbd_nl.o
drbd-y += drbd_interval.o drbd_state.o drbd-y += drbd_interval.o drbd_state.o
drbd-y += drbd_nla.o drbd-y += drbd_nla.o
drbd-$(CONFIG_DEBUG_FS) += drbd_debugfs.o
obj-$(CONFIG_BLK_DEV_DRBD) += drbd.o obj-$(CONFIG_BLK_DEV_DRBD) += drbd.o
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/debugfs.h>
#include "drbd_int.h"
#ifdef CONFIG_DEBUG_FS
int __init drbd_debugfs_init(void);
void drbd_debugfs_cleanup(void);
void drbd_debugfs_resource_add(struct drbd_resource *resource);
void drbd_debugfs_resource_cleanup(struct drbd_resource *resource);
void drbd_debugfs_connection_add(struct drbd_connection *connection);
void drbd_debugfs_connection_cleanup(struct drbd_connection *connection);
void drbd_debugfs_device_add(struct drbd_device *device);
void drbd_debugfs_device_cleanup(struct drbd_device *device);
void drbd_debugfs_peer_device_add(struct drbd_peer_device *peer_device);
void drbd_debugfs_peer_device_cleanup(struct drbd_peer_device *peer_device);
#else
static inline int __init drbd_debugfs_init(void) { return -ENODEV; }
static inline void drbd_debugfs_cleanup(void) { }
static inline void drbd_debugfs_resource_add(struct drbd_resource *resource) { }
static inline void drbd_debugfs_resource_cleanup(struct drbd_resource *resource) { }
static inline void drbd_debugfs_connection_add(struct drbd_connection *connection) { }
static inline void drbd_debugfs_connection_cleanup(struct drbd_connection *connection) { }
static inline void drbd_debugfs_device_add(struct drbd_device *device) { }
static inline void drbd_debugfs_device_cleanup(struct drbd_device *device) { }
static inline void drbd_debugfs_peer_device_add(struct drbd_peer_device *peer_device) { }
static inline void drbd_debugfs_peer_device_cleanup(struct drbd_peer_device *peer_device) { }
#endif
This diff is collapsed.
...@@ -10,7 +10,9 @@ struct drbd_interval { ...@@ -10,7 +10,9 @@ struct drbd_interval {
unsigned int size; /* size in bytes */ unsigned int size; /* size in bytes */
sector_t end; /* highest interval end in subtree */ sector_t end; /* highest interval end in subtree */
int local:1 /* local or remote request? */; int local:1 /* local or remote request? */;
int waiting:1; int waiting:1; /* someone is waiting for this to complete */
int completed:1; /* this has been completed already;
* ignore for conflict detection */
}; };
static inline void drbd_clear_interval(struct drbd_interval *i) static inline void drbd_clear_interval(struct drbd_interval *i)
......
This diff is collapsed.
This diff is collapsed.
...@@ -60,20 +60,65 @@ static void seq_printf_with_thousands_grouping(struct seq_file *seq, long v) ...@@ -60,20 +60,65 @@ static void seq_printf_with_thousands_grouping(struct seq_file *seq, long v)
seq_printf(seq, "%ld", v); seq_printf(seq, "%ld", v);
} }
static void drbd_get_syncer_progress(struct drbd_device *device,
union drbd_dev_state state, unsigned long *rs_total,
unsigned long *bits_left, unsigned int *per_mil_done)
{
/* this is to break it at compile time when we change that, in case we
* want to support more than (1<<32) bits on a 32bit arch. */
typecheck(unsigned long, device->rs_total);
*rs_total = device->rs_total;
/* note: both rs_total and rs_left are in bits, i.e. in
* units of BM_BLOCK_SIZE.
* for the percentage, we don't care. */
if (state.conn == C_VERIFY_S || state.conn == C_VERIFY_T)
*bits_left = device->ov_left;
else
*bits_left = drbd_bm_total_weight(device) - device->rs_failed;
/* >> 10 to prevent overflow,
* +1 to prevent division by zero */
if (*bits_left > *rs_total) {
/* D'oh. Maybe a logic bug somewhere. More likely just a race
* between state change and reset of rs_total.
*/
*bits_left = *rs_total;
*per_mil_done = *rs_total ? 0 : 1000;
} else {
/* Make sure the division happens in long context.
* We allow up to one petabyte storage right now,
* at a granularity of 4k per bit that is 2**38 bits.
* After shift right and multiplication by 1000,
* this should still fit easily into a 32bit long,
* so we don't need a 64bit division on 32bit arch.
* Note: currently we don't support such large bitmaps on 32bit
* arch anyways, but no harm done to be prepared for it here.
*/
unsigned int shift = *rs_total > UINT_MAX ? 16 : 10;
unsigned long left = *bits_left >> shift;
unsigned long total = 1UL + (*rs_total >> shift);
unsigned long tmp = 1000UL - left * 1000UL/total;
*per_mil_done = tmp;
}
}
/*lge /*lge
* progress bars shamelessly adapted from driver/md/md.c * progress bars shamelessly adapted from driver/md/md.c
* output looks like * output looks like
* [=====>..............] 33.5% (23456/123456) * [=====>..............] 33.5% (23456/123456)
* finish: 2:20:20 speed: 6,345 (6,456) K/sec * finish: 2:20:20 speed: 6,345 (6,456) K/sec
*/ */
static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *seq) static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *seq,
union drbd_dev_state state)
{ {
unsigned long db, dt, dbdt, rt, rs_left; unsigned long db, dt, dbdt, rt, rs_total, rs_left;
unsigned int res; unsigned int res;
int i, x, y; int i, x, y;
int stalled = 0; int stalled = 0;
drbd_get_syncer_progress(device, &rs_left, &res); drbd_get_syncer_progress(device, state, &rs_total, &rs_left, &res);
x = res/50; x = res/50;
y = 20-x; y = 20-x;
...@@ -85,21 +130,21 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se ...@@ -85,21 +130,21 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
seq_printf(seq, "."); seq_printf(seq, ".");
seq_printf(seq, "] "); seq_printf(seq, "] ");
if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T) if (state.conn == C_VERIFY_S || state.conn == C_VERIFY_T)
seq_printf(seq, "verified:"); seq_printf(seq, "verified:");
else else
seq_printf(seq, "sync'ed:"); seq_printf(seq, "sync'ed:");
seq_printf(seq, "%3u.%u%% ", res / 10, res % 10); seq_printf(seq, "%3u.%u%% ", res / 10, res % 10);
/* if more than a few GB, display in MB */ /* if more than a few GB, display in MB */
if (device->rs_total > (4UL << (30 - BM_BLOCK_SHIFT))) if (rs_total > (4UL << (30 - BM_BLOCK_SHIFT)))
seq_printf(seq, "(%lu/%lu)M", seq_printf(seq, "(%lu/%lu)M",
(unsigned long) Bit2KB(rs_left >> 10), (unsigned long) Bit2KB(rs_left >> 10),
(unsigned long) Bit2KB(device->rs_total >> 10)); (unsigned long) Bit2KB(rs_total >> 10));
else else
seq_printf(seq, "(%lu/%lu)K\n\t", seq_printf(seq, "(%lu/%lu)K\n\t",
(unsigned long) Bit2KB(rs_left), (unsigned long) Bit2KB(rs_left),
(unsigned long) Bit2KB(device->rs_total)); (unsigned long) Bit2KB(rs_total));
/* see drivers/md/md.c /* see drivers/md/md.c
* We do not want to overflow, so the order of operands and * We do not want to overflow, so the order of operands and
...@@ -150,13 +195,13 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se ...@@ -150,13 +195,13 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
dt = (jiffies - device->rs_start - device->rs_paused) / HZ; dt = (jiffies - device->rs_start - device->rs_paused) / HZ;
if (dt == 0) if (dt == 0)
dt = 1; dt = 1;
db = device->rs_total - rs_left; db = rs_total - rs_left;
dbdt = Bit2KB(db/dt); dbdt = Bit2KB(db/dt);
seq_printf_with_thousands_grouping(seq, dbdt); seq_printf_with_thousands_grouping(seq, dbdt);
seq_printf(seq, ")"); seq_printf(seq, ")");
if (device->state.conn == C_SYNC_TARGET || if (state.conn == C_SYNC_TARGET ||
device->state.conn == C_VERIFY_S) { state.conn == C_VERIFY_S) {
seq_printf(seq, " want: "); seq_printf(seq, " want: ");
seq_printf_with_thousands_grouping(seq, device->c_sync_rate); seq_printf_with_thousands_grouping(seq, device->c_sync_rate);
} }
...@@ -168,8 +213,8 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se ...@@ -168,8 +213,8 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
unsigned long bm_bits = drbd_bm_bits(device); unsigned long bm_bits = drbd_bm_bits(device);
unsigned long bit_pos; unsigned long bit_pos;
unsigned long long stop_sector = 0; unsigned long long stop_sector = 0;
if (device->state.conn == C_VERIFY_S || if (state.conn == C_VERIFY_S ||
device->state.conn == C_VERIFY_T) { state.conn == C_VERIFY_T) {
bit_pos = bm_bits - device->ov_left; bit_pos = bm_bits - device->ov_left;
if (verify_can_do_stop_sector(device)) if (verify_can_do_stop_sector(device))
stop_sector = device->ov_stop_sector; stop_sector = device->ov_stop_sector;
...@@ -188,22 +233,13 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se ...@@ -188,22 +233,13 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
} }
} }
static void resync_dump_detail(struct seq_file *seq, struct lc_element *e)
{
struct bm_extent *bme = lc_entry(e, struct bm_extent, lce);
seq_printf(seq, "%5d %s %s\n", bme->rs_left,
bme->flags & BME_NO_WRITES ? "NO_WRITES" : "---------",
bme->flags & BME_LOCKED ? "LOCKED" : "------"
);
}
static int drbd_seq_show(struct seq_file *seq, void *v) static int drbd_seq_show(struct seq_file *seq, void *v)
{ {
int i, prev_i = -1; int i, prev_i = -1;
const char *sn; const char *sn;
struct drbd_device *device; struct drbd_device *device;
struct net_conf *nc; struct net_conf *nc;
union drbd_dev_state state;
char wp; char wp;
static char write_ordering_chars[] = { static char write_ordering_chars[] = {
...@@ -241,11 +277,12 @@ static int drbd_seq_show(struct seq_file *seq, void *v) ...@@ -241,11 +277,12 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "\n"); seq_printf(seq, "\n");
prev_i = i; prev_i = i;
sn = drbd_conn_str(device->state.conn); state = device->state;
sn = drbd_conn_str(state.conn);
if (device->state.conn == C_STANDALONE && if (state.conn == C_STANDALONE &&
device->state.disk == D_DISKLESS && state.disk == D_DISKLESS &&
device->state.role == R_SECONDARY) { state.role == R_SECONDARY) {
seq_printf(seq, "%2d: cs:Unconfigured\n", i); seq_printf(seq, "%2d: cs:Unconfigured\n", i);
} else { } else {
/* reset device->congestion_reason */ /* reset device->congestion_reason */
...@@ -258,15 +295,15 @@ static int drbd_seq_show(struct seq_file *seq, void *v) ...@@ -258,15 +295,15 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
" ns:%u nr:%u dw:%u dr:%u al:%u bm:%u " " ns:%u nr:%u dw:%u dr:%u al:%u bm:%u "
"lo:%d pe:%d ua:%d ap:%d ep:%d wo:%c", "lo:%d pe:%d ua:%d ap:%d ep:%d wo:%c",
i, sn, i, sn,
drbd_role_str(device->state.role), drbd_role_str(state.role),
drbd_role_str(device->state.peer), drbd_role_str(state.peer),
drbd_disk_str(device->state.disk), drbd_disk_str(state.disk),
drbd_disk_str(device->state.pdsk), drbd_disk_str(state.pdsk),
wp, wp,
drbd_suspended(device) ? 's' : 'r', drbd_suspended(device) ? 's' : 'r',
device->state.aftr_isp ? 'a' : '-', state.aftr_isp ? 'a' : '-',
device->state.peer_isp ? 'p' : '-', state.peer_isp ? 'p' : '-',
device->state.user_isp ? 'u' : '-', state.user_isp ? 'u' : '-',
device->congestion_reason ?: '-', device->congestion_reason ?: '-',
test_bit(AL_SUSPENDED, &device->flags) ? 's' : '-', test_bit(AL_SUSPENDED, &device->flags) ? 's' : '-',
device->send_cnt/2, device->send_cnt/2,
...@@ -281,17 +318,17 @@ static int drbd_seq_show(struct seq_file *seq, void *v) ...@@ -281,17 +318,17 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
atomic_read(&device->unacked_cnt), atomic_read(&device->unacked_cnt),
atomic_read(&device->ap_bio_cnt), atomic_read(&device->ap_bio_cnt),
first_peer_device(device)->connection->epochs, first_peer_device(device)->connection->epochs,
write_ordering_chars[first_peer_device(device)->connection->write_ordering] write_ordering_chars[device->resource->write_ordering]
); );
seq_printf(seq, " oos:%llu\n", seq_printf(seq, " oos:%llu\n",
Bit2KB((unsigned long long) Bit2KB((unsigned long long)
drbd_bm_total_weight(device))); drbd_bm_total_weight(device)));
} }
if (device->state.conn == C_SYNC_SOURCE || if (state.conn == C_SYNC_SOURCE ||
device->state.conn == C_SYNC_TARGET || state.conn == C_SYNC_TARGET ||
device->state.conn == C_VERIFY_S || state.conn == C_VERIFY_S ||
device->state.conn == C_VERIFY_T) state.conn == C_VERIFY_T)
drbd_syncer_progress(device, seq); drbd_syncer_progress(device, seq, state);
if (proc_details >= 1 && get_ldev_if_state(device, D_FAILED)) { if (proc_details >= 1 && get_ldev_if_state(device, D_FAILED)) {
lc_seq_printf_stats(seq, device->resync); lc_seq_printf_stats(seq, device->resync);
...@@ -299,12 +336,8 @@ static int drbd_seq_show(struct seq_file *seq, void *v) ...@@ -299,12 +336,8 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
put_ldev(device); put_ldev(device);
} }
if (proc_details >= 2) { if (proc_details >= 2)
if (device->resync) { seq_printf(seq, "\tblocked on activity log: %d\n", atomic_read(&device->ap_actlog_cnt));
lc_seq_dump_details(seq, device->resync, "rs_left",
resync_dump_detail);
}
}
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -316,7 +349,7 @@ static int drbd_proc_open(struct inode *inode, struct file *file) ...@@ -316,7 +349,7 @@ static int drbd_proc_open(struct inode *inode, struct file *file)
int err; int err;
if (try_module_get(THIS_MODULE)) { if (try_module_get(THIS_MODULE)) {
err = single_open(file, drbd_seq_show, PDE_DATA(inode)); err = single_open(file, drbd_seq_show, NULL);
if (err) if (err)
module_put(THIS_MODULE); module_put(THIS_MODULE);
return err; return err;
......
This diff is collapsed.
This diff is collapsed.
...@@ -288,6 +288,7 @@ extern void complete_master_bio(struct drbd_device *device, ...@@ -288,6 +288,7 @@ extern void complete_master_bio(struct drbd_device *device,
extern void request_timer_fn(unsigned long data); extern void request_timer_fn(unsigned long data);
extern void tl_restart(struct drbd_connection *connection, enum drbd_req_event what); extern void tl_restart(struct drbd_connection *connection, enum drbd_req_event what);
extern void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what); extern void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what);
extern void tl_abort_disk_io(struct drbd_device *device);
/* this is in drbd_main.c */ /* this is in drbd_main.c */
extern void drbd_restart_request(struct drbd_request *req); extern void drbd_restart_request(struct drbd_request *req);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -331,7 +331,7 @@ static int bch_allocator_thread(void *arg) ...@@ -331,7 +331,7 @@ static int bch_allocator_thread(void *arg)
mutex_unlock(&ca->set->bucket_lock); mutex_unlock(&ca->set->bucket_lock);
blkdev_issue_discard(ca->bdev, blkdev_issue_discard(ca->bdev,
bucket_to_sector(ca->set, bucket), bucket_to_sector(ca->set, bucket),
ca->sb.block_size, GFP_KERNEL, 0); ca->sb.bucket_size, GFP_KERNEL, 0);
mutex_lock(&ca->set->bucket_lock); mutex_lock(&ca->set->bucket_lock);
} }
......
...@@ -477,9 +477,13 @@ struct gc_stat { ...@@ -477,9 +477,13 @@ struct gc_stat {
* CACHE_SET_STOPPING always gets set first when we're closing down a cache set; * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
* we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e. * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
* flushing dirty data). * flushing dirty data).
*
* CACHE_SET_RUNNING means all cache devices have been registered and journal
* replay is complete.
*/ */
#define CACHE_SET_UNREGISTERING 0 #define CACHE_SET_UNREGISTERING 0
#define CACHE_SET_STOPPING 1 #define CACHE_SET_STOPPING 1
#define CACHE_SET_RUNNING 2
struct cache_set { struct cache_set {
struct closure cl; struct closure cl;
......
...@@ -1182,7 +1182,7 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, ...@@ -1182,7 +1182,7 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
{ {
uint64_t start_time; uint64_t start_time;
bool used_mempool = false; bool used_mempool = false;
struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO, struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT,
order); order);
if (!out) { if (!out) {
struct page *outp; struct page *outp;
......
...@@ -453,7 +453,7 @@ static inline bool bch_bkey_equal_header(const struct bkey *l, ...@@ -453,7 +453,7 @@ static inline bool bch_bkey_equal_header(const struct bkey *l,
{ {
return (KEY_DIRTY(l) == KEY_DIRTY(r) && return (KEY_DIRTY(l) == KEY_DIRTY(r) &&
KEY_PTRS(l) == KEY_PTRS(r) && KEY_PTRS(l) == KEY_PTRS(r) &&
KEY_CSUM(l) == KEY_CSUM(l)); KEY_CSUM(l) == KEY_CSUM(r));
} }
/* Keylists */ /* Keylists */
......
This diff is collapsed.
...@@ -242,9 +242,10 @@ void __bch_btree_node_write(struct btree *, struct closure *); ...@@ -242,9 +242,10 @@ void __bch_btree_node_write(struct btree *, struct closure *);
void bch_btree_node_write(struct btree *, struct closure *); void bch_btree_node_write(struct btree *, struct closure *);
void bch_btree_set_root(struct btree *); void bch_btree_set_root(struct btree *);
struct btree *bch_btree_node_alloc(struct cache_set *, struct btree_op *, int); struct btree *__bch_btree_node_alloc(struct cache_set *, struct btree_op *,
int, bool, struct btree *);
struct btree *bch_btree_node_get(struct cache_set *, struct btree_op *, struct btree *bch_btree_node_get(struct cache_set *, struct btree_op *,
struct bkey *, int, bool); struct bkey *, int, bool, struct btree *);
int bch_btree_insert_check_key(struct btree *, struct btree_op *, int bch_btree_insert_check_key(struct btree *, struct btree_op *,
struct bkey *); struct bkey *);
......
This diff is collapsed.
...@@ -9,5 +9,6 @@ struct cache_set; ...@@ -9,5 +9,6 @@ struct cache_set;
void bch_extent_to_text(char *, size_t, const struct bkey *); void bch_extent_to_text(char *, size_t, const struct bkey *);
bool __bch_btree_ptr_invalid(struct cache_set *, const struct bkey *); bool __bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
bool __bch_extent_invalid(struct cache_set *, const struct bkey *);
#endif /* _BCACHE_EXTENTS_H */ #endif /* _BCACHE_EXTENTS_H */
This diff is collapsed.
...@@ -311,7 +311,8 @@ void bch_data_insert(struct closure *cl) ...@@ -311,7 +311,8 @@ void bch_data_insert(struct closure *cl)
{ {
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
trace_bcache_write(op->bio, op->writeback, op->bypass); trace_bcache_write(op->c, op->inode, op->bio,
op->writeback, op->bypass);
bch_keylist_init(&op->insert_keys); bch_keylist_init(&op->insert_keys);
bio_get(op->bio); bio_get(op->bio);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment