Commit 0d167518 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-3.5/core' of git://git.kernel.dk/linux-block

Merge block/IO core bits from Jens Axboe:
 "This is a bit bigger on the core side than usual, but that is purely
  because we decided to hold off on parts of Tejun's submission on 3.4
  to give it a bit more time to simmer.  As a consequence, it's seen a
  long cycle in for-next.

  It contains:

   - Bug fix from Dan, wrong locking type.
   - Relax splice gifting restriction from Eric.
   - A ton of updates from Tejun, primarily for blkcg.  This improves
     the code a lot, making the API nicer and cleaner, and also includes
     fixes for how we handle and tie policies and re-activate on
     switches.  The changes also include generic bug fixes.
   - A simple fix from Vivek, along with a fix for doing proper delayed
     allocation of the blkcg stats."

Fix up annoying conflict just due to different merge resolution in
Documentation/feature-removal-schedule.txt

* 'for-3.5/core' of git://git.kernel.dk/linux-block: (92 commits)
  blkcg: tg_stats_alloc_lock is an irq lock
  vmsplice: relax alignement requirements for SPLICE_F_GIFT
  blkcg: use radix tree to index blkgs from blkcg
  blkcg: fix blkcg->css ref leak in __blkg_lookup_create()
  block: fix elvpriv allocation failure handling
  block: collapse blk_alloc_request() into get_request()
  blkcg: collapse blkcg_policy_ops into blkcg_policy
  blkcg: embed struct blkg_policy_data in policy specific data
  blkcg: mass rename of blkcg API
  blkcg: style cleanups for blk-cgroup.h
  blkcg: remove blkio_group->path[]
  blkcg: blkg_rwstat_read() was missing inline
  blkcg: shoot down blkgs if all policies are deactivated
  blkcg: drop stuff unused after per-queue policy activation update
  blkcg: implement per-queue policy activation
  blkcg: add request_queue->root_blkg
  blkcg: make request_queue bypassing on allocation
  blkcg: make sure blkg_lookup() returns %NULL if @q is bypassing
  blkcg: make blkg_conf_prep() take @pol and return with queue lock held
  blkcg: remove static policy ID enums
  ...
parents 2f83766d ff26eaad
......@@ -23,8 +23,6 @@ config IOSCHED_DEADLINE
config IOSCHED_CFQ
tristate "CFQ I/O scheduler"
# If BLK_CGROUP is a module, CFQ has to be built as module.
depends on (BLK_CGROUP=m && m) || !BLK_CGROUP || BLK_CGROUP=y
default y
---help---
The CFQ I/O scheduler tries to distribute bandwidth equally
......@@ -34,8 +32,6 @@ config IOSCHED_CFQ
This is the default I/O scheduler.
Note: If BLK_CGROUP=m, then CFQ can be built only as module.
config CFQ_GROUP_IOSCHED
bool "CFQ Group Scheduling support"
depends on IOSCHED_CFQ && BLK_CGROUP
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -155,20 +155,20 @@ void put_io_context(struct io_context *ioc)
}
EXPORT_SYMBOL(put_io_context);
/* Called by the exiting task */
void exit_io_context(struct task_struct *task)
/**
* put_io_context_active - put active reference on ioc
* @ioc: ioc of interest
*
* Undo get_io_context_active(). If active reference reaches zero after
* put, @ioc can never issue further IOs and ioscheds are notified.
*/
void put_io_context_active(struct io_context *ioc)
{
struct io_context *ioc;
struct io_cq *icq;
struct hlist_node *n;
unsigned long flags;
struct io_cq *icq;
task_lock(task);
ioc = task->io_context;
task->io_context = NULL;
task_unlock(task);
if (!atomic_dec_and_test(&ioc->nr_tasks)) {
if (!atomic_dec_and_test(&ioc->active_ref)) {
put_io_context(ioc);
return;
}
......@@ -197,6 +197,20 @@ void exit_io_context(struct task_struct *task)
put_io_context(ioc);
}
/* Called by the exiting task */
void exit_io_context(struct task_struct *task)
{
struct io_context *ioc;
task_lock(task);
ioc = task->io_context;
task->io_context = NULL;
task_unlock(task);
atomic_dec(&ioc->nr_tasks);
put_io_context_active(ioc);
}
/**
* ioc_clear_queue - break any ioc association with the specified queue
* @q: request_queue being cleared
......@@ -218,19 +232,18 @@ void ioc_clear_queue(struct request_queue *q)
}
}
void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
int node)
int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
{
struct io_context *ioc;
ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
node);
if (unlikely(!ioc))
return;
return -ENOMEM;
/* initialize */
atomic_long_set(&ioc->refcount, 1);
atomic_set(&ioc->nr_tasks, 1);
atomic_set(&ioc->active_ref, 1);
spin_lock_init(&ioc->lock);
INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
INIT_HLIST_HEAD(&ioc->icq_list);
......@@ -250,6 +263,8 @@ void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
else
kmem_cache_free(iocontext_cachep, ioc);
task_unlock(task);
return 0;
}
/**
......@@ -281,7 +296,7 @@ struct io_context *get_task_io_context(struct task_struct *task,
return ioc;
}
task_unlock(task);
} while (create_io_context(task, gfp_flags, node));
} while (!create_task_io_context(task, gfp_flags, node));
return NULL;
}
......@@ -325,26 +340,23 @@ EXPORT_SYMBOL(ioc_lookup_icq);
/**
* ioc_create_icq - create and link io_cq
* @ioc: io_context of interest
* @q: request_queue of interest
* @gfp_mask: allocation mask
*
* Make sure io_cq linking %current->io_context and @q exists. If either
* io_context and/or icq don't exist, they will be created using @gfp_mask.
* Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
* will be created using @gfp_mask.
*
* The caller is responsible for ensuring @ioc won't go away and @q is
* alive and will stay alive until this function returns.
*/
struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
gfp_t gfp_mask)
{
struct elevator_type *et = q->elevator->type;
struct io_context *ioc;
struct io_cq *icq;
/* allocate stuff */
ioc = create_io_context(current, gfp_mask, q->node);
if (!ioc)
return NULL;
icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
q->node);
if (!icq)
......@@ -382,74 +394,6 @@ struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
return icq;
}
void ioc_set_icq_flags(struct io_context *ioc, unsigned int flags)
{
struct io_cq *icq;
struct hlist_node *n;
hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
icq->flags |= flags;
}
/**
* ioc_ioprio_changed - notify ioprio change
* @ioc: io_context of interest
* @ioprio: new ioprio
*
* @ioc's ioprio has changed to @ioprio. Set %ICQ_IOPRIO_CHANGED for all
* icq's. iosched is responsible for checking the bit and applying it on
* request issue path.
*/
void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
{
unsigned long flags;
spin_lock_irqsave(&ioc->lock, flags);
ioc->ioprio = ioprio;
ioc_set_icq_flags(ioc, ICQ_IOPRIO_CHANGED);
spin_unlock_irqrestore(&ioc->lock, flags);
}
/**
* ioc_cgroup_changed - notify cgroup change
* @ioc: io_context of interest
*
* @ioc's cgroup has changed. Set %ICQ_CGROUP_CHANGED for all icq's.
* iosched is responsible for checking the bit and applying it on request
* issue path.
*/
void ioc_cgroup_changed(struct io_context *ioc)
{
unsigned long flags;
spin_lock_irqsave(&ioc->lock, flags);
ioc_set_icq_flags(ioc, ICQ_CGROUP_CHANGED);
spin_unlock_irqrestore(&ioc->lock, flags);
}
EXPORT_SYMBOL(ioc_cgroup_changed);
/**
* icq_get_changed - fetch and clear icq changed mask
* @icq: icq of interest
*
* Fetch and clear ICQ_*_CHANGED bits from @icq. Grabs and releases
* @icq->ioc->lock.
*/
unsigned icq_get_changed(struct io_cq *icq)
{
unsigned int changed = 0;
unsigned long flags;
if (unlikely(icq->flags & ICQ_CHANGED_MASK)) {
spin_lock_irqsave(&icq->ioc->lock, flags);
changed = icq->flags & ICQ_CHANGED_MASK;
icq->flags &= ~ICQ_CHANGED_MASK;
spin_unlock_irqrestore(&icq->ioc->lock, flags);
}
return changed;
}
EXPORT_SYMBOL(icq_get_changed);
static int __init blk_ioc_init(void)
{
iocontext_cachep = kmem_cache_create("blkdev_ioc",
......
......@@ -9,6 +9,7 @@
#include <linux/blktrace_api.h>
#include "blk.h"
#include "blk-cgroup.h"
struct queue_sysfs_entry {
struct attribute attr;
......@@ -479,6 +480,8 @@ static void blk_release_queue(struct kobject *kobj)
blk_sync_queue(q);
blkcg_exit_queue(q);
if (q->elevator) {
spin_lock_irq(q->queue_lock);
ioc_clear_queue(q);
......@@ -486,15 +489,12 @@ static void blk_release_queue(struct kobject *kobj)
elevator_exit(q->elevator);
}
blk_throtl_exit(q);
if (rl->rq_pool)
mempool_destroy(rl->rq_pool);
if (q->queue_tags)
__blk_queue_free_tags(q);
blk_throtl_release(q);
blk_trace_shutdown(q);
bdi_destroy(&q->backing_dev_info);
......
This diff is collapsed.
......@@ -23,7 +23,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
struct bio *bio);
void blk_drain_queue(struct request_queue *q, bool drain_all);
void blk_queue_bypass_start(struct request_queue *q);
void blk_queue_bypass_end(struct request_queue *q);
void blk_dequeue_request(struct request *rq);
void __blk_queue_free_tags(struct request_queue *q);
bool __blk_end_bidi_request(struct request *rq, int error,
......@@ -144,9 +145,6 @@ void blk_queue_congestion_threshold(struct request_queue *q);
int blk_dev_init(void);
void elv_quiesce_start(struct request_queue *q);
void elv_quiesce_end(struct request_queue *q);
/*
* Return the threshold (number of used requests) at which the queue is
......@@ -186,32 +184,30 @@ static inline int blk_do_io_stat(struct request *rq)
*/
void get_io_context(struct io_context *ioc);
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask);
struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
gfp_t gfp_mask);
void ioc_clear_queue(struct request_queue *q);
void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask,
int node);
int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
/**
* create_io_context - try to create task->io_context
* @task: target task
* @gfp_mask: allocation mask
* @node: allocation node
*
* If @task->io_context is %NULL, allocate a new io_context and install it.
* Returns the current @task->io_context which may be %NULL if allocation
* failed.
* If %current->io_context is %NULL, allocate a new io_context and install
* it. Returns the current %current->io_context which may be %NULL if
* allocation failed.
*
* Note that this function can't be called with IRQ disabled because
* task_lock which protects @task->io_context is IRQ-unsafe.
* task_lock which protects %current->io_context is IRQ-unsafe.
*/
static inline struct io_context *create_io_context(struct task_struct *task,
gfp_t gfp_mask, int node)
static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
{
WARN_ON_ONCE(irqs_disabled());
if (unlikely(!task->io_context))
create_io_context_slowpath(task, gfp_mask, node);
return task->io_context;
if (unlikely(!current->io_context))
create_task_io_context(current, gfp_mask, node);
return current->io_context;
}
/*
......@@ -222,7 +218,6 @@ extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
extern void blk_throtl_drain(struct request_queue *q);
extern int blk_throtl_init(struct request_queue *q);
extern void blk_throtl_exit(struct request_queue *q);
extern void blk_throtl_release(struct request_queue *q);
#else /* CONFIG_BLK_DEV_THROTTLING */
static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
{
......@@ -231,7 +226,6 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
static inline void blk_throtl_drain(struct request_queue *q) { }
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
static inline void blk_throtl_exit(struct request_queue *q) { }
static inline void blk_throtl_release(struct request_queue *q) { }
#endif /* CONFIG_BLK_DEV_THROTTLING */
#endif /* BLK_INTERNAL_H */
This diff is collapsed.
#ifndef _CFQ_H
#define _CFQ_H
#include "blk-cgroup.h"
#ifdef CONFIG_CFQ_GROUP_IOSCHED
static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
struct blkio_group *curr_blkg, bool direction, bool sync)
{
blkiocg_update_io_add_stats(blkg, curr_blkg, direction, sync);
}
static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
unsigned long dequeue)
{
blkiocg_update_dequeue_stats(blkg, dequeue);
}
static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
unsigned long time, unsigned long unaccounted_time)
{
blkiocg_update_timeslice_used(blkg, time, unaccounted_time);
}
static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg)
{
blkiocg_set_start_empty_time(blkg);
}
static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
bool direction, bool sync)
{
blkiocg_update_io_remove_stats(blkg, direction, sync);
}
static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
bool direction, bool sync)
{
blkiocg_update_io_merged_stats(blkg, direction, sync);
}
static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
{
blkiocg_update_idle_time_stats(blkg);
}
static inline void
cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
{
blkiocg_update_avg_queue_size_stats(blkg);
}
static inline void
cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
{
blkiocg_update_set_idle_time_stats(blkg);
}
static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
uint64_t bytes, bool direction, bool sync)
{
blkiocg_update_dispatch_stats(blkg, bytes, direction, sync);
}
static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
{
blkiocg_update_completion_stats(blkg, start_time, io_start_time,
direction, sync);
}
static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
struct blkio_group *blkg, void *key, dev_t dev) {
blkiocg_add_blkio_group(blkcg, blkg, key, dev, BLKIO_POLICY_PROP);
}
static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
{
return blkiocg_del_blkio_group(blkg);
}
#else /* CFQ_GROUP_IOSCHED */
static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
struct blkio_group *curr_blkg, bool direction, bool sync) {}
static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
unsigned long dequeue) {}
static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
unsigned long time, unsigned long unaccounted_time) {}
static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
bool direction, bool sync) {}
static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
bool direction, bool sync) {}
static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
{
}
static inline void
cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) {}
static inline void
cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) {}
static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
uint64_t bytes, bool direction, bool sync) {}
static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {}
static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
struct blkio_group *blkg, void *key, dev_t dev) {}
static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
{
return 0;
}
#endif /* CFQ_GROUP_IOSCHED */
#endif
......@@ -337,13 +337,13 @@ static void deadline_exit_queue(struct elevator_queue *e)
/*
* initialize elevator private data (deadline_data).
*/
static void *deadline_init_queue(struct request_queue *q)
static int deadline_init_queue(struct request_queue *q)
{
struct deadline_data *dd;
dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
if (!dd)
return NULL;
return -ENOMEM;
INIT_LIST_HEAD(&dd->fifo_list[READ]);
INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
......@@ -354,7 +354,9 @@ static void *deadline_init_queue(struct request_queue *q)
dd->writes_starved = writes_starved;
dd->front_merges = 1;
dd->fifo_batch = fifo_batch;
return dd;
q->elevator->elevator_data = dd;
return 0;
}
/*
......
This diff is collapsed.
......@@ -59,15 +59,17 @@ noop_latter_request(struct request_queue *q, struct request *rq)
return list_entry(rq->queuelist.next, struct request, queuelist);
}
static void *noop_init_queue(struct request_queue *q)
static int noop_init_queue(struct request_queue *q)
{
struct noop_data *nd;
nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
if (!nd)
return NULL;
return -ENOMEM;
INIT_LIST_HEAD(&nd->queue);
return nd;
q->elevator->elevator_data = nd;
return 0;
}
static void noop_exit_queue(struct elevator_queue *e)
......
......@@ -19,12 +19,14 @@
#include <linux/swap.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/iocontext.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/mempool.h>
#include <linux/workqueue.h>
#include <linux/cgroup.h>
#include <scsi/sg.h> /* for struct sg_iovec */
#include <trace/events/block.h>
......@@ -418,6 +420,7 @@ void bio_put(struct bio *bio)
* last put frees it
*/
if (atomic_dec_and_test(&bio->bi_cnt)) {
bio_disassociate_task(bio);
bio->bi_next = NULL;
bio->bi_destructor(bio);
}
......@@ -1646,6 +1649,64 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
}
EXPORT_SYMBOL(bioset_create);
#ifdef CONFIG_BLK_CGROUP
/**
* bio_associate_current - associate a bio with %current
* @bio: target bio
*
* Associate @bio with %current if it hasn't been associated yet. Block
* layer will treat @bio as if it were issued by %current no matter which
* task actually issues it.
*
* This function takes an extra reference of @task's io_context and blkcg
* which will be put when @bio is released. The caller must own @bio,
* ensure %current->io_context exists, and is responsible for synchronizing
* calls to this function.
*/
int bio_associate_current(struct bio *bio)
{
struct io_context *ioc;
struct cgroup_subsys_state *css;
if (bio->bi_ioc)
return -EBUSY;
ioc = current->io_context;
if (!ioc)
return -ENOENT;
/* acquire active ref on @ioc and associate */
get_io_context_active(ioc);
bio->bi_ioc = ioc;
/* associate blkcg if exists */
rcu_read_lock();
css = task_subsys_state(current, blkio_subsys_id);
if (css && css_tryget(css))
bio->bi_css = css;
rcu_read_unlock();
return 0;
}
/**
* bio_disassociate_task - undo bio_associate_current()
* @bio: target bio
*/
void bio_disassociate_task(struct bio *bio)
{
if (bio->bi_ioc) {
put_io_context(bio->bi_ioc);
bio->bi_ioc = NULL;
}
if (bio->bi_css) {
css_put(bio->bi_css);
bio->bi_css = NULL;
}
}
#endif /* CONFIG_BLK_CGROUP */
static void __init biovec_init_slabs(void)
{
int i;
......
......@@ -50,7 +50,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
if (ioc) {
ioc_ioprio_changed(ioc, ioprio);
ioc->ioprio = ioprio;
put_io_context(ioc);
}
......
......@@ -1388,7 +1388,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
*/
static int get_iovec_page_array(const struct iovec __user *iov,
unsigned int nr_vecs, struct page **pages,
struct partial_page *partial, int aligned,
struct partial_page *partial, bool aligned,
unsigned int pipe_buffers)
{
int buffers = 0, error = 0;
......@@ -1626,7 +1626,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
return -ENOMEM;
spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
spd.partial, flags & SPLICE_F_GIFT,
spd.partial, false,
pipe->buffers);
if (spd.nr_pages <= 0)
ret = spd.nr_pages;
......
......@@ -269,6 +269,14 @@ extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set
extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
extern unsigned int bvec_nr_vecs(unsigned short idx);
#ifdef CONFIG_BLK_CGROUP
int bio_associate_current(struct bio *bio);
void bio_disassociate_task(struct bio *bio);
#else /* CONFIG_BLK_CGROUP */
static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
static inline void bio_disassociate_task(struct bio *bio) { }
#endif /* CONFIG_BLK_CGROUP */
/*
* bio_set is used to allow other portions of the IO system to
* allocate their own private memory pools for bio and iovec structures.
......
......@@ -14,6 +14,8 @@ struct bio;
struct bio_integrity_payload;
struct page;
struct block_device;
struct io_context;
struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *, int);
typedef void (bio_destructor_t) (struct bio *);
......@@ -66,6 +68,14 @@ struct bio {
bio_end_io_t *bi_end_io;
void *bi_private;
#ifdef CONFIG_BLK_CGROUP
/*
* Optional ioc and css associated with this bio. Put on bio
* release. Read comment on top of bio_associate_current().
*/
struct io_context *bi_ioc;
struct cgroup_subsys_state *bi_css;
#endif
#if defined(CONFIG_BLK_DEV_INTEGRITY)
struct bio_integrity_payload *bi_integrity; /* data integrity */
#endif
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -803,7 +803,7 @@ config RT_GROUP_SCHED
endif #CGROUP_SCHED
config BLK_CGROUP
tristate "Block IO controller"
bool "Block IO controller"
depends on BLOCK
default n
---help---
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment