Commit dddec01e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block: (37 commits)
  splice: fix generic_file_splice_read() race with page invalidation
  ramfs: enable splice write
  drivers/block/pktcdvd.c: avoid useless memset
  cdrom: revert commit 22a9189f (cdrom: use kmalloced buffers instead of buffers on stack)
  scsi: sr avoids useless buffer allocation
  block: blk_rq_map_kern uses the bounce buffers for stack buffers
  block: add blk_queue_update_dma_pad
  DAC960: push down BKL
  pktcdvd: push BKL down into driver
  paride: push ioctl down into driver
  block: use get_unaligned_* helpers
  block: extend queue_flag bitops
  block: request_module(): use format string
  Add bvec_merge_data to handle stacked devices and ->merge_bvec()
  block: integrity flags can't use bit ops on unsigned short
  cmdfilter: extend default read filter
  sg: fix odd style (extra parenthesis) introduced by cmd filter patch
  block: add bounce support to blk_rq_map_user_iov
  cfq-iosched: get rid of enable_idle being unused warning
  allow userspace to modify scsi command filter on per device basis
  ...
parents 7daf705f 32502b84
......@@ -26,3 +26,37 @@ Description:
I/O statistics of partition <part>. The format is the
same as the above-written /sys/block/<disk>/stat
format.
What: /sys/block/<disk>/integrity/format
Date: June 2008
Contact: Martin K. Petersen <martin.petersen@oracle.com>
Description:
Metadata format for integrity capable block device.
E.g. T10-DIF-TYPE1-CRC.
What: /sys/block/<disk>/integrity/read_verify
Date: June 2008
Contact: Martin K. Petersen <martin.petersen@oracle.com>
Description:
Indicates whether the block layer should verify the
integrity of read requests serviced by devices that
support sending integrity metadata.
What: /sys/block/<disk>/integrity/tag_size
Date: June 2008
Contact: Martin K. Petersen <martin.petersen@oracle.com>
Description:
Number of bytes of integrity tag space available per
512 bytes of data.
What: /sys/block/<disk>/integrity/write_generate
Date: June 2008
Contact: Martin K. Petersen <martin.petersen@oracle.com>
Description:
Indicates whether the block layer should automatically
generate checksums for write requests bound for
devices that support receiving integrity metadata.
This diff is collapsed.
......@@ -81,6 +81,18 @@ config BLK_DEV_BSG
If unsure, say N.
config BLK_DEV_INTEGRITY
bool "Block layer data integrity support"
---help---
Some storage devices allow extra information to be
stored/retrieved to help protect the data. The block layer
data integrity option provides hooks which can be used by
filesystems to ensure better data integrity.
Say yes here if you have a storage device that provides the
T10/SCSI Data Integrity Field or the T13/ATA External Path
Protection. If in doubt, say N.
endif # BLOCK
config BLOCK_COMPAT
......
......@@ -4,7 +4,8 @@
obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o ioctl.o genhd.o scsi_ioctl.o
blk-exec.o blk-merge.o ioctl.o genhd.o scsi_ioctl.o \
cmd-filter.o
obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
......@@ -14,3 +15,4 @@ obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
......@@ -151,6 +151,7 @@ enum arq_state {
static DEFINE_PER_CPU(unsigned long, ioc_count);
static struct completion *ioc_gone;
static DEFINE_SPINLOCK(ioc_gone_lock);
static void as_move_to_dispatch(struct as_data *ad, struct request *rq);
static void as_antic_stop(struct as_data *ad);
......@@ -164,8 +165,19 @@ static void free_as_io_context(struct as_io_context *aic)
{
kfree(aic);
elv_ioc_count_dec(ioc_count);
if (ioc_gone && !elv_ioc_count_read(ioc_count))
complete(ioc_gone);
if (ioc_gone) {
/*
* AS scheduler is exiting, grab exit lock and check
* the pending io context count. If it hits zero,
* complete ioc_gone and set it back to NULL.
*/
spin_lock(&ioc_gone_lock);
if (ioc_gone && !elv_ioc_count_read(ioc_count)) {
complete(ioc_gone);
ioc_gone = NULL;
}
spin_unlock(&ioc_gone_lock);
}
}
static void as_trim(struct io_context *ioc)
......@@ -1493,7 +1505,7 @@ static void __exit as_exit(void)
/* ioc_gone's update must be visible before reading ioc_count */
smp_wmb();
if (elv_ioc_count_read(ioc_count))
wait_for_completion(ioc_gone);
wait_for_completion(&all_gone);
synchronize_rcu();
}
......
......@@ -143,6 +143,10 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
bio->bi_size -= nbytes;
bio->bi_sector += (nbytes >> 9);
if (bio_integrity(bio))
bio_integrity_advance(bio, nbytes);
if (bio->bi_size == 0)
bio_endio(bio, error);
} else {
......@@ -201,8 +205,7 @@ void blk_plug_device(struct request_queue *q)
if (blk_queue_stopped(q))
return;
if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
__set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
}
......@@ -217,10 +220,9 @@ int blk_remove_plug(struct request_queue *q)
{
WARN_ON(!irqs_disabled());
if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
return 0;
queue_flag_clear(QUEUE_FLAG_PLUGGED, q);
del_timer(&q->unplug_timer);
return 1;
}
......@@ -324,8 +326,7 @@ void blk_start_queue(struct request_queue *q)
* one level of recursion is ok and is much faster than kicking
* the unplug handling
*/
if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
queue_flag_set(QUEUE_FLAG_REENTER, q);
if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
q->request_fn(q);
queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else {
......@@ -390,8 +391,7 @@ void __blk_run_queue(struct request_queue *q)
* handling reinvoke the handler shortly if we already got there.
*/
if (!elv_queue_empty(q)) {
if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
queue_flag_set(QUEUE_FLAG_REENTER, q);
if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
q->request_fn(q);
queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else {
......@@ -1381,6 +1381,9 @@ static inline void __generic_make_request(struct bio *bio)
*/
blk_partition_remap(bio);
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
goto end_io;
if (old_sector != -1)
blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
old_sector);
......
This diff is collapsed.
......@@ -210,6 +210,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
if (!bio_flagged(bio, BIO_USER_MAPPED))
rq->cmd_flags |= REQ_COPY_USER;
blk_queue_bounce(q, &bio);
bio_get(bio);
blk_rq_bio_prep(q, rq, bio);
rq->buffer = rq->data = NULL;
......@@ -268,6 +269,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
int reading = rq_data_dir(rq) == READ;
int do_copy = 0;
struct bio *bio;
unsigned long stack_mask = ~(THREAD_SIZE - 1);
if (len > (q->max_hw_sectors << 9))
return -EINVAL;
......@@ -278,6 +280,10 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
alignment = queue_dma_alignment(q) | q->dma_pad_mask;
do_copy = ((kaddr & alignment) || (len & alignment));
if (!((kaddr & stack_mask) ^
((unsigned long)current->stack & stack_mask)))
do_copy = 1;
if (do_copy)
bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
else
......
......@@ -441,6 +441,9 @@ static int attempt_merge(struct request_queue *q, struct request *req,
|| next->special)
return 0;
if (blk_integrity_rq(req) != blk_integrity_rq(next))
return 0;
/*
* If we are allowed to merge, then append bio list
* from next to rq and release next. merge_requests_fn
......
......@@ -302,11 +302,10 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
* @q: the request queue for the device
* @mask: pad mask
*
* Set pad mask. Direct IO requests are padded to the mask specified.
* Set dma pad mask.
*
* Appending pad buffer to a request modifies ->data_len such that it
* includes the pad buffer. The original requested data length can be
* obtained using blk_rq_raw_data_len().
* Appending pad buffer to a request modifies the last entry of a
* scatter list such that it includes the pad buffer.
**/
void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
{
......@@ -314,6 +313,23 @@ void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
}
EXPORT_SYMBOL(blk_queue_dma_pad);
/**
* blk_queue_update_dma_pad - update pad mask
* @q: the request queue for the device
* @mask: pad mask
*
* Update dma pad mask.
*
* Appending pad buffer to a request modifies the last entry of a
* scatter list such that it includes the pad buffer.
**/
void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
{
if (mask > q->dma_pad_mask)
q->dma_pad_mask = mask;
}
EXPORT_SYMBOL(blk_queue_update_dma_pad);
/**
* blk_queue_dma_drain - Set up a drain buffer for excess dma.
* @q: the request queue for the device
......
......@@ -51,4 +51,12 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
return q->nr_congestion_off;
}
#if defined(CONFIG_BLK_DEV_INTEGRITY)
#define rq_for_each_integrity_segment(bvl, _rq, _iter) \
__rq_for_each_bio(_iter.bio, _rq) \
bip_for_each_vec(bvl, _iter.bio->bi_integrity, _iter.i)
#endif /* BLK_DEV_INTEGRITY */
#endif
......@@ -244,6 +244,7 @@ static struct dentry *blk_create_tree(const char *blk_name)
static void blk_trace_cleanup(struct blk_trace *bt)
{
relay_close(bt->rchan);
debugfs_remove(bt->msg_file);
debugfs_remove(bt->dropped_file);
blk_remove_tree(bt->dir);
free_percpu(bt->sequence);
......@@ -291,6 +292,44 @@ static const struct file_operations blk_dropped_fops = {
.read = blk_dropped_read,
};
static int blk_msg_open(struct inode *inode, struct file *filp)
{
filp->private_data = inode->i_private;
return 0;
}
static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
size_t count, loff_t *ppos)
{
char *msg;
struct blk_trace *bt;
if (count > BLK_TN_MAX_MSG)
return -EINVAL;
msg = kmalloc(count, GFP_KERNEL);
if (msg == NULL)
return -ENOMEM;
if (copy_from_user(msg, buffer, count)) {
kfree(msg);
return -EFAULT;
}
bt = filp->private_data;
__trace_note_message(bt, "%s", msg);
kfree(msg);
return count;
}
static const struct file_operations blk_msg_fops = {
.owner = THIS_MODULE,
.open = blk_msg_open,
.write = blk_msg_write,
};
/*
* Keep track of how many times we encountered a full subbuffer, to aid
* the user space app in telling how many lost events there were.
......@@ -380,6 +419,10 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
if (!bt->dropped_file)
goto err;
bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
if (!bt->msg_file)
goto err;
bt->rchan = relay_open("trace", dir, buts->buf_size,
buts->buf_nr, &blk_relay_callbacks, bt);
if (!bt->rchan)
......@@ -409,6 +452,8 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
if (dir)
blk_remove_tree(dir);
if (bt) {
if (bt->msg_file)
debugfs_remove(bt->msg_file);
if (bt->dropped_file)
debugfs_remove(bt->dropped_file);
free_percpu(bt->sequence);
......
......@@ -44,11 +44,12 @@ struct bsg_device {
char name[BUS_ID_SIZE];
int max_queue;
unsigned long flags;
struct blk_scsi_cmd_filter *cmd_filter;
mode_t *f_mode;
};
enum {
BSG_F_BLOCK = 1,
BSG_F_WRITE_PERM = 2,
};
#define BSG_DEFAULT_CMDS 64
......@@ -172,7 +173,7 @@ static int bsg_io_schedule(struct bsg_device *bd)
}
static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_v4 *hdr, int has_write_perm)
struct sg_io_v4 *hdr, struct bsg_device *bd)
{
if (hdr->request_len > BLK_MAX_CDB) {
rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
......@@ -185,7 +186,8 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
return -EFAULT;
if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
if (blk_verify_command(rq->cmd, has_write_perm))
if (blk_cmd_filter_verify_command(bd->cmd_filter, rq->cmd,
bd->f_mode))
return -EPERM;
} else if (!capable(CAP_SYS_RAWIO))
return -EPERM;
......@@ -263,8 +265,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
rq = blk_get_request(q, rw, GFP_KERNEL);
if (!rq)
return ERR_PTR(-ENOMEM);
ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, test_bit(BSG_F_WRITE_PERM,
&bd->flags));
ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd);
if (ret)
goto out;
......@@ -566,12 +567,23 @@ static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
set_bit(BSG_F_BLOCK, &bd->flags);
}
static inline void bsg_set_write_perm(struct bsg_device *bd, struct file *file)
static void bsg_set_cmd_filter(struct bsg_device *bd,
struct file *file)
{
if (file->f_mode & FMODE_WRITE)
set_bit(BSG_F_WRITE_PERM, &bd->flags);
else
clear_bit(BSG_F_WRITE_PERM, &bd->flags);
struct inode *inode;
struct gendisk *disk;
if (!file)
return;
inode = file->f_dentry->d_inode;
if (!inode)
return;
disk = inode->i_bdev->bd_disk;
bd->cmd_filter = &disk->cmd_filter;
bd->f_mode = &file->f_mode;
}
/*
......@@ -595,6 +607,8 @@ bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
dprintk("%s: read %Zd bytes\n", bd->name, count);
bsg_set_block(bd, file);
bsg_set_cmd_filter(bd, file);
bytes_read = 0;
ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
*ppos = bytes_read;
......@@ -668,7 +682,7 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
dprintk("%s: write %Zd bytes\n", bd->name, count);
bsg_set_block(bd, file);
bsg_set_write_perm(bd, file);
bsg_set_cmd_filter(bd, file);
bytes_written = 0;
ret = __bsg_write(bd, buf, count, &bytes_written);
......@@ -772,7 +786,9 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
}
bd->queue = rq;
bsg_set_block(bd, file);
bsg_set_cmd_filter(bd, file);
atomic_set(&bd->ref_count, 1);
mutex_lock(&bsg_mutex);
......
......@@ -11,6 +11,7 @@
#include <linux/elevator.h>
#include <linux/rbtree.h>
#include <linux/ioprio.h>
#include <linux/blktrace_api.h>
/*
* tunables
......@@ -41,13 +42,14 @@ static int cfq_slice_idle = HZ / 125;
#define RQ_CIC(rq) \
((struct cfq_io_context *) (rq)->elevator_private)
#define RQ_CFQQ(rq) ((rq)->elevator_private2)
#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2)
static struct kmem_cache *cfq_pool;
static struct kmem_cache *cfq_ioc_pool;
static DEFINE_PER_CPU(unsigned long, ioc_count);
static struct completion *ioc_gone;
static DEFINE_SPINLOCK(ioc_gone_lock);
#define CFQ_PRIO_LISTS IOPRIO_BE_NR
#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
......@@ -155,6 +157,7 @@ struct cfq_queue {
unsigned short ioprio, org_ioprio;
unsigned short ioprio_class, org_ioprio_class;
pid_t pid;
};
enum cfqq_state_flags {
......@@ -198,6 +201,11 @@ CFQ_CFQQ_FNS(slice_new);
CFQ_CFQQ_FNS(sync);
#undef CFQ_CFQQ_FNS
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
#define cfq_log(cfqd, fmt, args...) \
blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
static void cfq_dispatch_insert(struct request_queue *, struct request *);
static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
struct io_context *, gfp_t);
......@@ -234,8 +242,10 @@ static inline int cfq_bio_sync(struct bio *bio)
*/
static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
{
if (cfqd->busy_queues)
if (cfqd->busy_queues) {
cfq_log(cfqd, "schedule dispatch");
kblockd_schedule_work(&cfqd->unplug_work);
}
}
static int cfq_queue_empty(struct request_queue *q)
......@@ -270,6 +280,7 @@ static inline void
cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
}
/*
......@@ -539,6 +550,7 @@ static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
*/
static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
BUG_ON(cfq_cfqq_on_rr(cfqq));
cfq_mark_cfqq_on_rr(cfqq);
cfqd->busy_queues++;
......@@ -552,6 +564,7 @@ static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
*/
static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
BUG_ON(!cfq_cfqq_on_rr(cfqq));
cfq_clear_cfqq_on_rr(cfqq);
......@@ -638,6 +651,8 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
struct cfq_data *cfqd = q->elevator->elevator_data;
cfqd->rq_in_driver++;
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
cfqd->rq_in_driver);
/*
* If the depth is larger 1, it really could be queueing. But lets
......@@ -657,6 +672,8 @@ static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
WARN_ON(!cfqd->rq_in_driver);
cfqd->rq_in_driver--;
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
cfqd->rq_in_driver);
}
static void cfq_remove_request(struct request *rq)
......@@ -746,6 +763,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
struct cfq_queue *cfqq)
{
if (cfqq) {
cfq_log_cfqq(cfqd, cfqq, "set_active");
cfqq->slice_end = 0;
cfq_clear_cfqq_must_alloc_slice(cfqq);
cfq_clear_cfqq_fifo_expire(cfqq);
......@@ -763,6 +781,8 @@ static void
__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
int timed_out)
{
cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
if (cfq_cfqq_wait_request(cfqq))
del_timer(&cfqd->idle_slice_timer);
......@@ -772,8 +792,10 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
/*
* store what was left of this slice, if the queue idled/timed out
*/
if (timed_out && !cfq_cfqq_slice_new(cfqq))
if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
cfqq->slice_resid = cfqq->slice_end - jiffies;
cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
}
cfq_resort_rr_list(cfqd, cfqq);
......@@ -865,6 +887,12 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq))
return;
/*
* still requests with the driver, don't idle
*/
if (cfqd->rq_in_driver)
return;
/*
* task has exited, don't wait
*/
......@@ -892,6 +920,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
cfq_log(cfqd, "arm_idle: %lu", sl);
}
/*
......@@ -902,6 +931,8 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq = RQ_CFQQ(rq);
cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
cfq_remove_request(rq);
cfqq->dispatched++;
elv_dispatch_sort(q, rq);
......@@ -931,8 +962,9 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
rq = rq_entry_fifo(cfqq->fifo.next);
if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
return NULL;
rq = NULL;
cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq);
return rq;
}
......@@ -1072,6 +1104,7 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
BUG_ON(cfqd->busy_queues);
cfq_log(cfqd, "forced_dispatch=%d\n", dispatched);
return dispatched;
}
......@@ -1112,6 +1145,7 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
}
cfq_log(cfqd, "dispatched=%d", dispatched);
return dispatched;
}
......@@ -1130,6 +1164,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
if (!atomic_dec_and_test(&cfqq->ref))
return;
cfq_log_cfqq(cfqd, cfqq, "put_queue");
BUG_ON(rb_first(&cfqq->sort_list));
BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
BUG_ON(cfq_cfqq_on_rr(cfqq));
......@@ -1177,8 +1212,19 @@ static void cfq_cic_free_rcu(struct rcu_head *head)
kmem_cache_free(cfq_ioc_pool, cic);
elv_ioc_count_dec(ioc_count);
if (ioc_gone && !elv_ioc_count_read(ioc_count))
complete(ioc_gone);
if (ioc_gone) {
/*
* CFQ scheduler is exiting, grab exit lock and check
* the pending io context count. If it hits zero,
* complete ioc_gone and set it back to NULL
*/
spin_lock(&ioc_gone_lock);
if (ioc_gone && !elv_ioc_count_read(ioc_count)) {
complete(ioc_gone);
ioc_gone = NULL;
}
spin_unlock(&ioc_gone_lock);
}
}
static void cfq_cic_free(struct cfq_io_context *cic)
......@@ -1427,6 +1473,8 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
cfq_mark_cfqq_idle_window(cfqq);
cfq_mark_cfqq_sync(cfqq);
}
cfqq->pid = current->pid;
cfq_log_cfqq(cfqd, cfqq, "alloced");
}
if (new_cfqq)
......@@ -1675,7 +1723,7 @@ static void
cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct cfq_io_context *cic)
{
int enable_idle;
int old_idle, enable_idle;
/*
* Don't idle for async or idle io prio class
......@@ -1683,7 +1731,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
return;
enable_idle = cfq_cfqq_idle_window(cfqq);
enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
(cfqd->hw_tag && CIC_SEEKY(cic)))
......@@ -1695,10 +1743,13 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
enable_idle = 1;
}
if (enable_idle)
cfq_mark_cfqq_idle_window(cfqq);
else
cfq_clear_cfqq_idle_window(cfqq);
if (old_idle != enable_idle) {
cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
if (enable_idle)
cfq_mark_cfqq_idle_window(cfqq);
else
cfq_clear_cfqq_idle_window(cfqq);
}
}
/*
......@@ -1757,6 +1808,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
*/
static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
cfq_log_cfqq(cfqd, cfqq, "preempt");
cfq_slice_expired(cfqd, 1);
/*
......@@ -1818,6 +1870,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq = RQ_CFQQ(rq);
cfq_log_cfqq(cfqd, cfqq, "insert_request");
cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
cfq_add_rq_rb(rq);
......@@ -1835,6 +1888,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
unsigned long now;
now = jiffies;
cfq_log_cfqq(cfqd, cfqq, "complete");
WARN_ON(!cfqd->rq_in_driver);
WARN_ON(!cfqq->dispatched);
......@@ -2004,6 +2058,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
cfq_schedule_dispatch(cfqd);
spin_unlock_irqrestore(q->queue_lock, flags);
cfq_log(cfqd, "set_request fail");
return 1;
}
......@@ -2029,6 +2084,8 @@ static void cfq_idle_slice_timer(unsigned long data)
unsigned long flags;
int timed_out = 1;
cfq_log(cfqd, "idle timer fired");
spin_lock_irqsave(cfqd->queue->queue_lock, flags);
cfqq = cfqd->active_queue;
......@@ -2317,7 +2374,7 @@ static void __exit cfq_exit(void)
* pending RCU callbacks
*/
if (elv_ioc_count_read(ioc_count))
wait_for_completion(ioc_gone);
wait_for_completion(&all_gone);
cfq_slab_kill();
}
......
/*
* Copyright 2004 Peter M. Jones <pjones@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
*
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public Licens
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
*
*/
#include <linux/list.h>
#include <linux/genhd.h>
#include <linux/spinlock.h>
#include <linux/parser.h>
#include <linux/capability.h>
#include <linux/bitops.h>
#include <scsi/scsi.h>
#include <linux/cdrom.h>
int blk_cmd_filter_verify_command(struct blk_scsi_cmd_filter *filter,
unsigned char *cmd, mode_t *f_mode)
{
/* root can do any command. */
if (capable(CAP_SYS_RAWIO))
return 0;
/* if there's no filter set, assume we're filtering everything out */
if (!filter)
return -EPERM;
/* Anybody who can open the device can do a read-safe command */
if (test_bit(cmd[0], filter->read_ok))
return 0;
/* Write-safe commands require a writable open */
if (test_bit(cmd[0], filter->write_ok) && (*f_mode & FMODE_WRITE))
return 0;
return -EPERM;
}
EXPORT_SYMBOL(blk_cmd_filter_verify_command);
int blk_verify_command(struct file *file, unsigned char *cmd)
{
struct gendisk *disk;
struct inode *inode;
if (!file)
return -EINVAL;
inode = file->f_dentry->d_inode;
if (!inode)
return -EINVAL;
disk = inode->i_bdev->bd_disk;
return blk_cmd_filter_verify_command(&disk->cmd_filter,
cmd, &file->f_mode);
}
EXPORT_SYMBOL(blk_verify_command);
/* and now, the sysfs stuff */
static ssize_t rcf_cmds_show(struct blk_scsi_cmd_filter *filter, char *page,
int rw)
{
char *npage = page;
unsigned long *okbits;
int i;
if (rw == READ)
okbits = filter->read_ok;
else
okbits = filter->write_ok;
for (i = 0; i < BLK_SCSI_MAX_CMDS; i++) {
if (test_bit(i, okbits)) {
sprintf(npage, "%02x", i);
npage += 2;
if (i < BLK_SCSI_MAX_CMDS - 1)
sprintf(npage++, " ");
}
}
if (npage != page)
npage += sprintf(npage, "\n");
return npage - page;
}
static ssize_t rcf_readcmds_show(struct blk_scsi_cmd_filter *filter, char *page)
{
return rcf_cmds_show(filter, page, READ);
}
static ssize_t rcf_writecmds_show(struct blk_scsi_cmd_filter *filter,
char *page)
{
return rcf_cmds_show(filter, page, WRITE);
}
static ssize_t rcf_cmds_store(struct blk_scsi_cmd_filter *filter,
const char *page, size_t count, int rw)
{
ssize_t ret = 0;
unsigned long okbits[BLK_SCSI_CMD_PER_LONG], *target_okbits;
int cmd, status, len;
substring_t ss;
memset(&okbits, 0, sizeof(okbits));
for (len = strlen(page); len > 0; len -= 3) {
if (len < 2)
break;
ss.from = (char *) page + ret;
ss.to = (char *) page + ret + 2;
ret += 3;
status = match_hex(&ss, &cmd);
/* either of these cases means invalid input, so do nothing. */
if (status || cmd >= BLK_SCSI_MAX_CMDS)
return -EINVAL;
__set_bit(cmd, okbits);
}
if (rw == READ)
target_okbits = filter->read_ok;
else
target_okbits = filter->write_ok;
memmove(target_okbits, okbits, sizeof(okbits));
return count;
}
static ssize_t rcf_readcmds_store(struct blk_scsi_cmd_filter *filter,
const char *page, size_t count)
{
return rcf_cmds_store(filter, page, count, READ);
}
static ssize_t rcf_writecmds_store(struct blk_scsi_cmd_filter *filter,
const char *page, size_t count)
{
return rcf_cmds_store(filter, page, count, WRITE);
}
struct rcf_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct blk_scsi_cmd_filter *, char *);
ssize_t (*store)(struct blk_scsi_cmd_filter *, const char *, size_t);
};
static struct rcf_sysfs_entry rcf_readcmds_entry = {
.attr = { .name = "read_table", .mode = S_IRUGO | S_IWUSR },
.show = rcf_readcmds_show,
.store = rcf_readcmds_store,
};
static struct rcf_sysfs_entry rcf_writecmds_entry = {
.attr = {.name = "write_table", .mode = S_IRUGO | S_IWUSR },
.show = rcf_writecmds_show,
.store = rcf_writecmds_store,
};
static struct attribute *default_attrs[] = {
&rcf_readcmds_entry.attr,
&rcf_writecmds_entry.attr,
NULL,
};
#define to_rcf(atr) container_of((atr), struct rcf_sysfs_entry, attr)
static ssize_t
rcf_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct rcf_sysfs_entry *entry = to_rcf(attr);
struct blk_scsi_cmd_filter *filter;
filter = container_of(kobj, struct blk_scsi_cmd_filter, kobj);
if (entry->show)
return entry->show(filter, page);
return 0;
}
static ssize_t
rcf_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length)
{
struct rcf_sysfs_entry *entry = to_rcf(attr);
struct blk_scsi_cmd_filter *filter;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
if (!entry->store)
return -EINVAL;
filter = container_of(kobj, struct blk_scsi_cmd_filter, kobj);
return entry->store(filter, page, length);
}
static struct sysfs_ops rcf_sysfs_ops = {
.show = rcf_attr_show,
.store = rcf_attr_store,
};
static struct kobj_type rcf_ktype = {
.sysfs_ops = &rcf_sysfs_ops,
.default_attrs = default_attrs,
};
#ifndef MAINTENANCE_IN_CMD
#define MAINTENANCE_IN_CMD 0xa3
#endif
static void rcf_set_defaults(struct blk_scsi_cmd_filter *filter)
{
/* Basic read-only commands */
__set_bit(TEST_UNIT_READY, filter->read_ok);
__set_bit(REQUEST_SENSE, filter->read_ok);
__set_bit(READ_6, filter->read_ok);
__set_bit(READ_10, filter->read_ok);
__set_bit(READ_12, filter->read_ok);
__set_bit(READ_16, filter->read_ok);
__set_bit(READ_BUFFER, filter->read_ok);
__set_bit(READ_DEFECT_DATA, filter->read_ok);
__set_bit(READ_CAPACITY, filter->read_ok);
__set_bit(READ_LONG, filter->read_ok);
__set_bit(INQUIRY, filter->read_ok);
__set_bit(MODE_SENSE, filter->read_ok);
__set_bit(MODE_SENSE_10, filter->read_ok);
__set_bit(LOG_SENSE, filter->read_ok);
__set_bit(START_STOP, filter->read_ok);
__set_bit(GPCMD_VERIFY_10, filter->read_ok);
__set_bit(VERIFY_16, filter->read_ok);
__set_bit(REPORT_LUNS, filter->read_ok);
__set_bit(SERVICE_ACTION_IN, filter->read_ok);
__set_bit(RECEIVE_DIAGNOSTIC, filter->read_ok);
__set_bit(MAINTENANCE_IN_CMD, filter->read_ok);
__set_bit(GPCMD_READ_BUFFER_CAPACITY, filter->read_ok);
/* Audio CD commands */
__set_bit(GPCMD_PLAY_CD, filter->read_ok);
__set_bit(GPCMD_PLAY_AUDIO_10, filter->read_ok);
__set_bit(GPCMD_PLAY_AUDIO_MSF, filter->read_ok);
__set_bit(GPCMD_PLAY_AUDIO_TI, filter->read_ok);
__set_bit(GPCMD_PAUSE_RESUME, filter->read_ok);
/* CD/DVD data reading */
__set_bit(GPCMD_READ_CD, filter->read_ok);
__set_bit(GPCMD_READ_CD_MSF, filter->read_ok);
__set_bit(GPCMD_READ_DISC_INFO, filter->read_ok);
__set_bit(GPCMD_READ_CDVD_CAPACITY, filter->read_ok);
__set_bit(GPCMD_READ_DVD_STRUCTURE, filter->read_ok);
__set_bit(GPCMD_READ_HEADER, filter->read_ok);
__set_bit(GPCMD_READ_TRACK_RZONE_INFO, filter->read_ok);
__set_bit(GPCMD_READ_SUBCHANNEL, filter->read_ok);
__set_bit(GPCMD_READ_TOC_PMA_ATIP, filter->read_ok);
__set_bit(GPCMD_REPORT_KEY, filter->read_ok);
__set_bit(GPCMD_SCAN, filter->read_ok);
__set_bit(GPCMD_GET_CONFIGURATION, filter->read_ok);
__set_bit(GPCMD_READ_FORMAT_CAPACITIES, filter->read_ok);
__set_bit(GPCMD_GET_EVENT_STATUS_NOTIFICATION, filter->read_ok);
__set_bit(GPCMD_GET_PERFORMANCE, filter->read_ok);
__set_bit(GPCMD_SEEK, filter->read_ok);
__set_bit(GPCMD_STOP_PLAY_SCAN, filter->read_ok);
/* Basic writing commands */
__set_bit(WRITE_6, filter->write_ok);
__set_bit(WRITE_10, filter->write_ok);
__set_bit(WRITE_VERIFY, filter->write_ok);
__set_bit(WRITE_12, filter->write_ok);
__set_bit(WRITE_VERIFY_12, filter->write_ok);
__set_bit(WRITE_16, filter->write_ok);
__set_bit(WRITE_LONG, filter->write_ok);
__set_bit(WRITE_LONG_2, filter->write_ok);
__set_bit(ERASE, filter->write_ok);
__set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
__set_bit(MODE_SELECT, filter->write_ok);
__set_bit(LOG_SELECT, filter->write_ok);
__set_bit(GPCMD_BLANK, filter->write_ok);
__set_bit(GPCMD_CLOSE_TRACK, filter->write_ok);
__set_bit(GPCMD_FLUSH_CACHE, filter->write_ok);
__set_bit(GPCMD_FORMAT_UNIT, filter->write_ok);
__set_bit(GPCMD_REPAIR_RZONE_TRACK, filter->write_ok);
__set_bit(GPCMD_RESERVE_RZONE_TRACK, filter->write_ok);
__set_bit(GPCMD_SEND_DVD_STRUCTURE, filter->write_ok);
__set_bit(GPCMD_SEND_EVENT, filter->write_ok);
__set_bit(GPCMD_SEND_KEY, filter->write_ok);
__set_bit(GPCMD_SEND_OPC, filter->write_ok);
__set_bit(GPCMD_SEND_CUE_SHEET, filter->write_ok);
__set_bit(GPCMD_SET_SPEED, filter->write_ok);
__set_bit(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, filter->write_ok);
__set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok);
__set_bit(GPCMD_SET_STREAMING, filter->write_ok);
}
int blk_register_filter(struct gendisk *disk)
{
int ret;
struct blk_scsi_cmd_filter *filter = &disk->cmd_filter;
struct kobject *parent = kobject_get(disk->holder_dir->parent);
if (!parent)
return -ENODEV;
ret = kobject_init_and_add(&filter->kobj, &rcf_ktype, parent,
"%s", "cmd_filter");
if (ret < 0)
return ret;
rcf_set_defaults(filter);
return 0;
}
void blk_unregister_filter(struct gendisk *disk)
{
struct blk_scsi_cmd_filter *filter = &disk->cmd_filter;
kobject_put(&filter->kobj);
kobject_put(disk->holder_dir->parent);
}
......@@ -86,6 +86,12 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
return 0;
/*
* only merge integrity protected bio into ditto rq
*/
if (bio_integrity(bio) != blk_integrity_rq(rq))
return 0;
if (!elv_iosched_allow_merge(rq, bio))
return 0;
......@@ -144,7 +150,7 @@ static struct elevator_type *elevator_get(const char *name)
else
sprintf(elv, "%s-iosched", name);
request_module(elv);
request_module("%s", elv);
spin_lock(&elv_list_lock);
e = elevator_find(name);
}
......
......@@ -189,6 +189,7 @@ void add_disk(struct gendisk *disk)
disk->minors, NULL, exact_match, exact_lock, disk);
register_disk(disk);
blk_register_queue(disk);
blk_register_filter(disk);
bdi = &disk->queue->backing_dev_info;
bdi_register_dev(bdi, MKDEV(disk->major, disk->first_minor));
......@@ -200,6 +201,7 @@ EXPORT_SYMBOL(del_gendisk); /* in partitions/check.c */
void unlink_gendisk(struct gendisk *disk)
{
blk_unregister_filter(disk);
sysfs_remove_link(&disk->dev.kobj, "bdi");
bdi_unregister(&disk->queue->backing_dev_info);
blk_unregister_queue(disk);
......@@ -400,6 +402,14 @@ static ssize_t disk_removable_show(struct device *dev,
(disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
}
static ssize_t disk_ro_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
return sprintf(buf, "%d\n", disk->policy ? 1 : 0);
}
static ssize_t disk_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
......@@ -472,6 +482,7 @@ static ssize_t disk_fail_store(struct device *dev,
static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL);
static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL);
static DEVICE_ATTR(size, S_IRUGO, disk_size_show, NULL);
static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
static DEVICE_ATTR(stat, S_IRUGO, disk_stat_show, NULL);
......@@ -483,6 +494,7 @@ static struct device_attribute dev_attr_fail =
static struct attribute *disk_attrs[] = {
&dev_attr_range.attr,
&dev_attr_removable.attr,
&dev_attr_ro.attr,
&dev_attr_size.attr,
&dev_attr_capability.attr,
&dev_attr_stat.attr,
......
......@@ -105,120 +105,12 @@ static int sg_emulated_host(struct request_queue *q, int __user *p)
return put_user(1, p);
}
#define CMD_READ_SAFE 0x01
#define CMD_WRITE_SAFE 0x02
#define CMD_WARNED 0x04
#define safe_for_read(cmd) [cmd] = CMD_READ_SAFE
#define safe_for_write(cmd) [cmd] = CMD_WRITE_SAFE
int blk_verify_command(unsigned char *cmd, int has_write_perm)
{
static unsigned char cmd_type[256] = {
/* Basic read-only commands */
safe_for_read(TEST_UNIT_READY),
safe_for_read(REQUEST_SENSE),
safe_for_read(READ_6),
safe_for_read(READ_10),
safe_for_read(READ_12),
safe_for_read(READ_16),
safe_for_read(READ_BUFFER),
safe_for_read(READ_DEFECT_DATA),
safe_for_read(READ_LONG),
safe_for_read(INQUIRY),
safe_for_read(MODE_SENSE),
safe_for_read(MODE_SENSE_10),
safe_for_read(LOG_SENSE),
safe_for_read(START_STOP),
safe_for_read(GPCMD_VERIFY_10),
safe_for_read(VERIFY_16),
/* Audio CD commands */
safe_for_read(GPCMD_PLAY_CD),
safe_for_read(GPCMD_PLAY_AUDIO_10),
safe_for_read(GPCMD_PLAY_AUDIO_MSF),
safe_for_read(GPCMD_PLAY_AUDIO_TI),
safe_for_read(GPCMD_PAUSE_RESUME),
/* CD/DVD data reading */
safe_for_read(GPCMD_READ_BUFFER_CAPACITY),
safe_for_read(GPCMD_READ_CD),
safe_for_read(GPCMD_READ_CD_MSF),
safe_for_read(GPCMD_READ_DISC_INFO),
safe_for_read(GPCMD_READ_CDVD_CAPACITY),
safe_for_read(GPCMD_READ_DVD_STRUCTURE),
safe_for_read(GPCMD_READ_HEADER),
safe_for_read(GPCMD_READ_TRACK_RZONE_INFO),
safe_for_read(GPCMD_READ_SUBCHANNEL),
safe_for_read(GPCMD_READ_TOC_PMA_ATIP),
safe_for_read(GPCMD_REPORT_KEY),
safe_for_read(GPCMD_SCAN),
safe_for_read(GPCMD_GET_CONFIGURATION),
safe_for_read(GPCMD_READ_FORMAT_CAPACITIES),
safe_for_read(GPCMD_GET_EVENT_STATUS_NOTIFICATION),
safe_for_read(GPCMD_GET_PERFORMANCE),
safe_for_read(GPCMD_SEEK),
safe_for_read(GPCMD_STOP_PLAY_SCAN),
/* Basic writing commands */
safe_for_write(WRITE_6),
safe_for_write(WRITE_10),
safe_for_write(WRITE_VERIFY),
safe_for_write(WRITE_12),
safe_for_write(WRITE_VERIFY_12),
safe_for_write(WRITE_16),
safe_for_write(WRITE_LONG),
safe_for_write(WRITE_LONG_2),
safe_for_write(ERASE),
safe_for_write(GPCMD_MODE_SELECT_10),
safe_for_write(MODE_SELECT),
safe_for_write(LOG_SELECT),
safe_for_write(GPCMD_BLANK),
safe_for_write(GPCMD_CLOSE_TRACK),
safe_for_write(GPCMD_FLUSH_CACHE),
safe_for_write(GPCMD_FORMAT_UNIT),
safe_for_write(GPCMD_REPAIR_RZONE_TRACK),
safe_for_write(GPCMD_RESERVE_RZONE_TRACK),
safe_for_write(GPCMD_SEND_DVD_STRUCTURE),
safe_for_write(GPCMD_SEND_EVENT),
safe_for_write(GPCMD_SEND_KEY),
safe_for_write(GPCMD_SEND_OPC),
safe_for_write(GPCMD_SEND_CUE_SHEET),
safe_for_write(GPCMD_SET_SPEED),
safe_for_write(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL),
safe_for_write(GPCMD_LOAD_UNLOAD),
safe_for_write(GPCMD_SET_STREAMING),
};
unsigned char type = cmd_type[cmd[0]];
/* Anybody who can open the device can do a read-safe command */
if (type & CMD_READ_SAFE)
return 0;
/* Write-safe commands just require a writable open.. */
if ((type & CMD_WRITE_SAFE) && has_write_perm)
return 0;
/* And root can do any command.. */
if (capable(CAP_SYS_RAWIO))
return 0;
if (!type) {
cmd_type[cmd[0]] = CMD_WARNED;
printk(KERN_WARNING "scsi: unknown opcode 0x%02x\n", cmd[0]);
}
/* Otherwise fail it with an "Operation not permitted" */
return -EPERM;
}
EXPORT_SYMBOL_GPL(blk_verify_command);
static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_hdr *hdr, int has_write_perm)
struct sg_io_hdr *hdr, struct file *file)
{
if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
return -EFAULT;
if (blk_verify_command(rq->cmd, has_write_perm))
if (blk_verify_command(file, rq->cmd))
return -EPERM;
/*
......@@ -287,7 +179,7 @@ static int sg_io(struct file *file, struct request_queue *q,
struct gendisk *bd_disk, struct sg_io_hdr *hdr)
{
unsigned long start_time;
int writing = 0, ret = 0, has_write_perm = 0;
int writing = 0, ret = 0;
struct request *rq;
char sense[SCSI_SENSE_BUFFERSIZE];
struct bio *bio;
......@@ -316,10 +208,7 @@ static int sg_io(struct file *file, struct request_queue *q,
if (!rq)
return -ENOMEM;
if (file)
has_write_perm = file->f_mode & FMODE_WRITE;
if (blk_fill_sghdr_rq(q, rq, hdr, has_write_perm)) {
if (blk_fill_sghdr_rq(q, rq, hdr, file)) {
blk_put_request(rq);
return -EFAULT;
}
......@@ -451,7 +340,7 @@ int sg_scsi_ioctl(struct file *file, struct request_queue *q,
if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
goto error;
err = blk_verify_command(rq->cmd, file->f_mode & FMODE_WRITE);
err = blk_verify_command(file, rq->cmd);
if (err)
goto error;
......
......@@ -885,7 +885,8 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
/* set the min alignment and padding */
blk_queue_update_dma_alignment(sdev->request_queue,
ATA_DMA_PAD_SZ - 1);
blk_queue_dma_pad(sdev->request_queue, ATA_DMA_PAD_SZ - 1);
blk_queue_update_dma_pad(sdev->request_queue,
ATA_DMA_PAD_SZ - 1);
/* configure draining */
buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
......
This diff is collapsed.
......@@ -1003,7 +1003,7 @@ aoecmd_cfg_rsp(struct sk_buff *skb)
* Enough people have their dip switches set backwards to
* warrant a loud message for this special case.
*/
aoemajor = be16_to_cpu(get_unaligned(&h->major));
aoemajor = get_unaligned_be16(&h->major);
if (aoemajor == 0xfff) {
printk(KERN_ERR "aoe: Warning: shelf address is all ones. "
"Check shelf dip switches.\n");
......
......@@ -146,6 +146,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
#include <linux/mtio.h>
#include <linux/device.h>
#include <linux/sched.h> /* current, TASK_*, schedule_timeout() */
#include <linux/smp_lock.h>
#include <asm/uaccess.h>
......@@ -189,8 +190,7 @@ module_param_array(drive3, int, NULL, 0);
#define ATAPI_LOG_SENSE 0x4d
static int pt_open(struct inode *inode, struct file *file);
static int pt_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg);
static long pt_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
static int pt_release(struct inode *inode, struct file *file);
static ssize_t pt_read(struct file *filp, char __user *buf,
size_t count, loff_t * ppos);
......@@ -236,7 +236,7 @@ static const struct file_operations pt_fops = {
.owner = THIS_MODULE,
.read = pt_read,
.write = pt_write,
.ioctl = pt_ioctl,
.unlocked_ioctl = pt_ioctl,
.open = pt_open,
.release = pt_release,
};
......@@ -685,8 +685,7 @@ static int pt_open(struct inode *inode, struct file *file)
return err;
}
static int pt_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
static long pt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct pt_unit *tape = file->private_data;
struct mtop __user *p = (void __user *)arg;
......@@ -700,23 +699,26 @@ static int pt_ioctl(struct inode *inode, struct file *file,
switch (mtop.mt_op) {
case MTREW:
lock_kernel();
pt_rewind(tape);
unlock_kernel();
return 0;
case MTWEOF:
lock_kernel();
pt_write_fm(tape);
unlock_kernel();
return 0;
default:
printk("%s: Unimplemented mt_op %d\n", tape->name,
/* FIXME: rate limit ?? */
printk(KERN_DEBUG "%s: Unimplemented mt_op %d\n", tape->name,
mtop.mt_op);
return -EINVAL;
}
default:
printk("%s: Unimplemented ioctl 0x%x\n", tape->name, cmd);
return -EINVAL;
return -ENOTTY;
}
}
......
......@@ -49,6 +49,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/smp_lock.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/file.h>
......@@ -2079,7 +2080,6 @@ static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
unsigned char buf[64];
int ret;
memset(buf, 0, sizeof(buf));
init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
cgc.sense = &sense;
cgc.buflen = pd->mode_offset + 12;
......@@ -2126,7 +2126,6 @@ static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
unsigned char *cap_buf;
int ret, offset;
memset(buf, 0, sizeof(buf));
cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
cgc.sense = &sense;
......@@ -2633,11 +2632,12 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
static int pkt_merge_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *bvec)
static int pkt_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
struct bio_vec *bvec)
{
struct pktcdvd_device *pd = q->queuedata;
sector_t zone = ZONE(bio->bi_sector, pd);
int used = ((bio->bi_sector - zone) << 9) + bio->bi_size;
sector_t zone = ZONE(bmd->bi_sector, pd);
int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size;
int remaining = (pd->settings.size << 9) - used;
int remaining2;
......@@ -2645,7 +2645,7 @@ static int pkt_merge_bvec(struct request_queue *q, struct bio *bio, struct bio_v
* A bio <= PAGE_SIZE must be allowed. If it crosses a packet
* boundary, pkt_make_request() will split the bio.
*/
remaining2 = PAGE_SIZE - bio->bi_size;
remaining2 = PAGE_SIZE - bmd->bi_size;
remaining = max(remaining, remaining2);
BUG_ON(remaining < 0);
......@@ -2796,9 +2796,14 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
return ret;
}
static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
static long pkt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data;
struct inode *inode = file->f_path.dentry->d_inode;
struct pktcdvd_device *pd;
long ret;
lock_kernel();
pd = inode->i_bdev->bd_disk->private_data;
VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode));
......@@ -2811,7 +2816,8 @@ static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, u
case CDROM_LAST_WRITTEN:
case CDROM_SEND_PACKET:
case SCSI_IOCTL_SEND_COMMAND:
return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
ret = blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
break;
case CDROMEJECT:
/*
......@@ -2820,14 +2826,15 @@ static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, u
*/
if (pd->refcnt == 1)
pkt_lock_door(pd, 0);
return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
ret = blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
break;
default:
VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd);
return -ENOTTY;
ret = -ENOTTY;
}
return 0;
unlock_kernel();
return ret;
}
static int pkt_media_changed(struct gendisk *disk)
......@@ -2849,7 +2856,7 @@ static struct block_device_operations pktcdvd_ops = {
.owner = THIS_MODULE,
.open = pkt_open,
.release = pkt_close,
.ioctl = pkt_ioctl,
.unlocked_ioctl = pkt_ioctl,
.media_changed = pkt_media_changed,
};
......@@ -3014,7 +3021,8 @@ static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
mutex_unlock(&ctl_mutex);
}
static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
static long pkt_ctl_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
void __user *argp = (void __user *)arg;
struct pkt_ctrl_command ctrl_cmd;
......@@ -3031,16 +3039,22 @@ static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cm
case PKT_CTRL_CMD_SETUP:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
lock_kernel();
ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
unlock_kernel();
break;
case PKT_CTRL_CMD_TEARDOWN:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
lock_kernel();
ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
unlock_kernel();
break;
case PKT_CTRL_CMD_STATUS:
lock_kernel();
pkt_get_status(&ctrl_cmd);
unlock_kernel();
break;
default:
return -ENOTTY;
......@@ -3053,7 +3067,7 @@ static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cm
static const struct file_operations pkt_ctl_fops = {
.ioctl = pkt_ctl_ioctl,
.unlocked_ioctl = pkt_ctl_ioctl,
.owner = THIS_MODULE,
};
......
......@@ -38,6 +38,7 @@
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/hdreg.h>
#include <linux/cdrom.h>
#include <linux/module.h>
#include <xen/xenbus.h>
......@@ -153,6 +154,40 @@ static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
return 0;
}
int blkif_ioctl(struct inode *inode, struct file *filep,
unsigned command, unsigned long argument)
{
struct blkfront_info *info =
inode->i_bdev->bd_disk->private_data;
int i;
dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n",
command, (long)argument);
switch (command) {
case CDROMMULTISESSION:
dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n");
for (i = 0; i < sizeof(struct cdrom_multisession); i++)
if (put_user(0, (char __user *)(argument + i)))
return -EFAULT;
return 0;
case CDROM_GET_CAPABILITY: {
struct gendisk *gd = info->gd;
if (gd->flags & GENHD_FL_CD)
return 0;
return -EINVAL;
}
default:
/*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
command);*/
return -EINVAL; /* same return as native Linux */
}
return 0;
}
/*
* blkif_queue_request
*
......@@ -324,6 +359,9 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
/* Make sure buffer addresses are sector-aligned. */
blk_queue_dma_alignment(rq, 511);
/* Make sure we don't use bounce buffers. */
blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
gd->queue = rq;
return 0;
......@@ -546,7 +584,7 @@ static int setup_blkring(struct xenbus_device *dev,
info->ring_ref = GRANT_INVALID_REF;
sring = (struct blkif_sring *)__get_free_page(GFP_KERNEL);
sring = (struct blkif_sring *)__get_free_page(GFP_NOIO | __GFP_HIGH);
if (!sring) {
xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
return -ENOMEM;
......@@ -703,7 +741,8 @@ static int blkif_recover(struct blkfront_info *info)
int j;
/* Stage 1: Make a safe copy of the shadow state. */
copy = kmalloc(sizeof(info->shadow), GFP_KERNEL);
copy = kmalloc(sizeof(info->shadow),
GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
if (!copy)
return -ENOMEM;
memcpy(copy, info->shadow, sizeof(info->shadow));
......@@ -959,7 +998,7 @@ static int blkif_release(struct inode *inode, struct file *filep)
struct xenbus_device *dev = info->xbdev;
enum xenbus_state state = xenbus_read_driver_state(dev->otherend);
if (state == XenbusStateClosing)
if (state == XenbusStateClosing && info->is_ready)
blkfront_closing(dev);
}
return 0;
......@@ -971,6 +1010,7 @@ static struct block_device_operations xlvbd_block_fops =
.open = blkif_open,
.release = blkif_release,
.getgeo = blkif_getgeo,
.ioctl = blkif_ioctl,
};
......@@ -1006,7 +1046,7 @@ static int __init xlblk_init(void)
module_init(xlblk_init);
static void xlblk_exit(void)
static void __exit xlblk_exit(void)
{
return xenbus_unregister_driver(&blkfront);
}
......
This diff is collapsed.
......@@ -50,17 +50,19 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
/**
* linear_mergeable_bvec -- tell bio layer if two requests can be merged
* @q: request queue
* @bio: the buffer head that's been built up so far
* @bvm: properties of new bio
* @biovec: the request that could be merged to it.
*
* Return amount of bytes we can take at this offset
*/
static int linear_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
static int linear_mergeable_bvec(struct request_queue *q,
struct bvec_merge_data *bvm,
struct bio_vec *biovec)
{
mddev_t *mddev = q->queuedata;
dev_info_t *dev0;
unsigned long maxsectors, bio_sectors = bio->bi_size >> 9;
sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
dev0 = which_dev(mddev, sector);
maxsectors = (dev0->size << 1) - (sector - (dev0->offset<<1));
......
......@@ -241,18 +241,20 @@ static int create_strip_zones (mddev_t *mddev)
/**
* raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
* @q: request queue
* @bio: the buffer head that's been built up so far
* @bvm: properties of new bio
* @biovec: the request that could be merged to it.
*
* Return amount of bytes we can accept at this offset
*/
static int raid0_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
static int raid0_mergeable_bvec(struct request_queue *q,
struct bvec_merge_data *bvm,
struct bio_vec *biovec)
{
mddev_t *mddev = q->queuedata;
sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int max;
unsigned int chunk_sectors = mddev->chunk_size >> 9;
unsigned int bio_sectors = bio->bi_size >> 9;
unsigned int bio_sectors = bvm->bi_size >> 9;
max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
if (max < 0) max = 0; /* bio_add cannot handle a negative return */
......
......@@ -439,26 +439,27 @@ static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
/**
* raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
* @q: request queue
* @bio: the buffer head that's been built up so far
* @bvm: properties of new bio
* @biovec: the request that could be merged to it.
*
* Return amount of bytes we can accept at this offset
* If near_copies == raid_disk, there are no striping issues,
* but in that case, the function isn't called at all.
*/
static int raid10_mergeable_bvec(struct request_queue *q, struct bio *bio,
struct bio_vec *bio_vec)
static int raid10_mergeable_bvec(struct request_queue *q,
struct bvec_merge_data *bvm,
struct bio_vec *biovec)
{
mddev_t *mddev = q->queuedata;
sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int max;
unsigned int chunk_sectors = mddev->chunk_size >> 9;
unsigned int bio_sectors = bio->bi_size >> 9;
unsigned int bio_sectors = bvm->bi_size >> 9;
max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
if (max < 0) max = 0; /* bio_add cannot handle a negative return */
if (max <= bio_vec->bv_len && bio_sectors == 0)
return bio_vec->bv_len;
if (max <= biovec->bv_len && bio_sectors == 0)
return biovec->bv_len;
else
return max;
}
......
......@@ -3314,15 +3314,17 @@ static int raid5_congested(void *data, int bits)
/* We want read requests to align with chunks where possible,
* but write requests don't need to.
*/
static int raid5_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
static int raid5_mergeable_bvec(struct request_queue *q,
struct bvec_merge_data *bvm,
struct bio_vec *biovec)
{
mddev_t *mddev = q->queuedata;
sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int max;
unsigned int chunk_sectors = mddev->chunk_size >> 9;
unsigned int bio_sectors = bio->bi_size >> 9;
unsigned int bio_sectors = bvm->bi_size >> 9;
if (bio_data_dir(bio) == WRITE)
if ((bvm->bi_rw & 1) == WRITE)
return biovec->bv_len; /* always allow writes to be mergeable */
max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
......
......@@ -1324,7 +1324,7 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
goto fail;
}
txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_KERNEL);
txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
if (!txs) {
err = -ENOMEM;
xenbus_dev_fatal(dev, err, "allocating tx ring page");
......@@ -1340,7 +1340,7 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
}
info->tx_ring_ref = err;
rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_KERNEL);
rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
if (!rxs) {
err = -ENOMEM;
xenbus_dev_fatal(dev, err, "allocating rx ring page");
......
......@@ -182,8 +182,9 @@ static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
int tablesize);
static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
Sg_request * srp);
static ssize_t sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
int blocking, int read_only, Sg_request ** o_srp);
static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
const char __user *buf, size_t count, int blocking,
int read_only, Sg_request **o_srp);
static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
unsigned char *cmnd, int timeout, int blocking);
static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
......@@ -204,7 +205,6 @@ static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
static int sg_res_in_use(Sg_fd * sfp);
static int sg_allow_access(unsigned char opcode, char dev_type);
static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
static Sg_device *sg_get_dev(int dev);
#ifdef CONFIG_SCSI_PROC_FS
......@@ -544,7 +544,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
return -EFAULT;
blocking = !(filp->f_flags & O_NONBLOCK);
if (old_hdr.reply_len < 0)
return sg_new_write(sfp, buf, count, blocking, 0, NULL);
return sg_new_write(sfp, filp, buf, count, blocking, 0, NULL);
if (count < (SZ_SG_HEADER + 6))
return -EIO; /* The minimum scsi command length is 6 bytes. */
......@@ -621,8 +621,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
}
static ssize_t
sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
int blocking, int read_only, Sg_request ** o_srp)
sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
size_t count, int blocking, int read_only,
Sg_request **o_srp)
{
int k;
Sg_request *srp;
......@@ -678,8 +679,7 @@ sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
sg_remove_request(sfp, srp);
return -EFAULT;
}
if (read_only &&
(!sg_allow_access(cmnd[0], sfp->parentdp->device->type))) {
if (read_only && !blk_verify_command(file, cmnd)) {
sg_remove_request(sfp, srp);
return -EPERM;
}
......@@ -799,7 +799,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
return -EFAULT;
result =
sg_new_write(sfp, p, SZ_SG_IO_HDR,
sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
blocking, read_only, &srp);
if (result < 0)
return result;
......@@ -1048,7 +1048,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
if (copy_from_user(&opcode, siocp->data, 1))
return -EFAULT;
if (!sg_allow_access(opcode, sdp->device->type))
if (!blk_verify_command(filp, &opcode))
return -EPERM;
}
return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p);
......@@ -2502,30 +2502,6 @@ sg_page_free(struct page *page, int size)
__free_pages(page, order);
}
#ifndef MAINTENANCE_IN_CMD
#define MAINTENANCE_IN_CMD 0xa3
#endif
static unsigned char allow_ops[] = { TEST_UNIT_READY, REQUEST_SENSE,
INQUIRY, READ_CAPACITY, READ_BUFFER, READ_6, READ_10, READ_12,
READ_16, MODE_SENSE, MODE_SENSE_10, LOG_SENSE, REPORT_LUNS,
SERVICE_ACTION_IN, RECEIVE_DIAGNOSTIC, READ_LONG, MAINTENANCE_IN_CMD
};
static int
sg_allow_access(unsigned char opcode, char dev_type)
{
int k;
if (TYPE_SCANNER == dev_type) /* TYPE_ROM maybe burner */
return 1;
for (k = 0; k < sizeof (allow_ops); ++k) {
if (opcode == allow_ops[k])
return 1;
}
return 0;
}
#ifdef CONFIG_SCSI_PROC_FS
static int
sg_idr_max_id(int id, void *p, void *data)
......
......@@ -673,24 +673,20 @@ static int sr_probe(struct device *dev)
static void get_sectorsize(struct scsi_cd *cd)
{
unsigned char cmd[10];
unsigned char *buffer;
unsigned char buffer[8];
int the_result, retries = 3;
int sector_size;
struct request_queue *queue;
buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
if (!buffer)
goto Enomem;
do {
cmd[0] = READ_CAPACITY;
memset((void *) &cmd[1], 0, 9);
memset(buffer, 0, 8);
memset(buffer, 0, sizeof(buffer));
/* Do the command and wait.. */
the_result = scsi_execute_req(cd->device, cmd, DMA_FROM_DEVICE,
buffer, 8, NULL, SR_TIMEOUT,
MAX_RETRIES);
buffer, sizeof(buffer), NULL,
SR_TIMEOUT, MAX_RETRIES);
retries--;
......@@ -745,14 +741,8 @@ static void get_sectorsize(struct scsi_cd *cd)
queue = cd->device->request_queue;
blk_queue_hardsect_size(queue, sector_size);
out:
kfree(buffer);
return;
Enomem:
cd->capacity = 0x1fffff;
cd->device->sector_size = 2048; /* A guess, just in case */
goto out;
return;
}
static void get_capabilities(struct scsi_cd *cd)
......
......@@ -117,7 +117,7 @@ int xenbus_watch_pathfmt(struct xenbus_device *dev,
char *path;
va_start(ap, pathfmt);
path = kvasprintf(GFP_KERNEL, pathfmt, ap);
path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
va_end(ap);
if (!path) {
......
......@@ -283,9 +283,9 @@ static char *join(const char *dir, const char *name)
char *buffer;
if (strlen(name) == 0)
buffer = kasprintf(GFP_KERNEL, "%s", dir);
buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir);
else
buffer = kasprintf(GFP_KERNEL, "%s/%s", dir, name);
buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name);
return (!buffer) ? ERR_PTR(-ENOMEM) : buffer;
}
......@@ -297,7 +297,7 @@ static char **split(char *strings, unsigned int len, unsigned int *num)
*num = count_strings(strings, len);
/* Transfer to one big alloc for easy freeing. */
ret = kmalloc(*num * sizeof(char *) + len, GFP_KERNEL);
ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH);
if (!ret) {
kfree(strings);
return ERR_PTR(-ENOMEM);
......@@ -751,7 +751,7 @@ static int process_msg(void)
}
msg = kmalloc(sizeof(*msg), GFP_KERNEL);
msg = kmalloc(sizeof(*msg), GFP_NOIO | __GFP_HIGH);
if (msg == NULL) {
err = -ENOMEM;
goto out;
......@@ -763,7 +763,7 @@ static int process_msg(void)
goto out;
}
body = kmalloc(msg->hdr.len + 1, GFP_KERNEL);
body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH);
if (body == NULL) {
kfree(msg);
err = -ENOMEM;
......
......@@ -19,6 +19,7 @@ else
obj-y += no-block.o
endif
obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o
obj-$(CONFIG_INOTIFY) += inotify.o
obj-$(CONFIG_INOTIFY_USER) += inotify_user.o
obj-$(CONFIG_EPOLL) += eventpoll.o
......
This diff is collapsed.
......@@ -28,25 +28,10 @@
#include <linux/blktrace_api.h>
#include <scsi/sg.h> /* for struct sg_iovec */
#define BIO_POOL_SIZE 2
static struct kmem_cache *bio_slab __read_mostly;
#define BIOVEC_NR_POOLS 6
/*
* a small number of entries is fine, not going to be performance critical.
* basically we just need to survive
*/
#define BIO_SPLIT_ENTRIES 2
mempool_t *bio_split_pool __read_mostly;
struct biovec_slab {
int nr_vecs;
char *name;
struct kmem_cache *slab;
};
/*
* if you change this list, also change bvec_alloc or things will
* break badly! cannot be bigger than what you can fit into an
......@@ -59,24 +44,18 @@ static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
};
#undef BV
/*
* bio_set is used to allow other portions of the IO system to
* allocate their own private memory pools for bio and iovec structures.
* These memory pools in turn all allocate from the bio_slab
* and the bvec_slabs[].
*/
struct bio_set {
mempool_t *bio_pool;
mempool_t *bvec_pools[BIOVEC_NR_POOLS];
};
/*
* fs_bio_set is the bio_set containing bio and iovec memory pools used by
* IO code that does not need private memory pools.
*/
static struct bio_set *fs_bio_set;
struct bio_set *fs_bio_set;
unsigned int bvec_nr_vecs(unsigned short idx)
{
return bvec_slabs[idx].nr_vecs;
}
static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs)
struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs)
{
struct bio_vec *bvl;
......@@ -117,6 +96,9 @@ void bio_free(struct bio *bio, struct bio_set *bio_set)
mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
}
if (bio_integrity(bio))
bio_integrity_free(bio, bio_set);
mempool_free(bio, bio_set->bio_pool);
}
......@@ -275,9 +257,19 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
{
struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set);
if (b) {
b->bi_destructor = bio_fs_destructor;
__bio_clone(b, bio);
if (!b)
return NULL;
b->bi_destructor = bio_fs_destructor;
__bio_clone(b, bio);
if (bio_integrity(bio)) {
int ret;
ret = bio_integrity_clone(b, bio, fs_bio_set);
if (ret < 0)
return NULL;
}
return b;
......@@ -333,10 +325,19 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
if (page == prev->bv_page &&
offset == prev->bv_offset + prev->bv_len) {
prev->bv_len += len;
if (q->merge_bvec_fn &&
q->merge_bvec_fn(q, bio, prev) < len) {
prev->bv_len -= len;
return 0;
if (q->merge_bvec_fn) {
struct bvec_merge_data bvm = {
.bi_bdev = bio->bi_bdev,
.bi_sector = bio->bi_sector,
.bi_size = bio->bi_size,
.bi_rw = bio->bi_rw,
};
if (q->merge_bvec_fn(q, &bvm, prev) < len) {
prev->bv_len -= len;
return 0;
}
}
goto done;
......@@ -377,11 +378,18 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
* queue to get further control
*/
if (q->merge_bvec_fn) {
struct bvec_merge_data bvm = {
.bi_bdev = bio->bi_bdev,
.bi_sector = bio->bi_sector,
.bi_size = bio->bi_size,
.bi_rw = bio->bi_rw,
};
/*
* merge_bvec_fn() returns number of bytes it can accept
* at this offset
*/
if (q->merge_bvec_fn(q, bio, bvec) < len) {
if (q->merge_bvec_fn(q, &bvm, bvec) < len) {
bvec->bv_page = NULL;
bvec->bv_len = 0;
bvec->bv_offset = 0;
......@@ -1249,6 +1257,9 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
bp->bio1.bi_private = bi;
bp->bio2.bi_private = pool;
if (bio_integrity(bi))
bio_integrity_split(bi, bp, first_sectors);
return bp;
}
......@@ -1290,6 +1301,7 @@ void bioset_free(struct bio_set *bs)
if (bs->bio_pool)
mempool_destroy(bs->bio_pool);
bioset_integrity_free(bs);
biovec_free_pools(bs);
kfree(bs);
......@@ -1306,6 +1318,9 @@ struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size)
if (!bs->bio_pool)
goto bad;
if (bioset_integrity_create(bs, bio_pool_size))
goto bad;
if (!biovec_create_pools(bs, bvec_pool_size))
return bs;
......@@ -1332,6 +1347,7 @@ static int __init init_bio(void)
{
bio_slab = KMEM_CACHE(bio, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
bio_integrity_init_slab();
biovec_init_slabs();
fs_bio_set = bioset_create(BIO_POOL_SIZE, 2);
......
......@@ -45,6 +45,7 @@ const struct file_operations ramfs_file_operations = {
.mmap = generic_file_mmap,
.fsync = simple_sync_file,
.splice_read = generic_file_splice_read,
.splice_write = generic_file_splice_write,
.llseek = generic_file_llseek,
};
......
......@@ -43,6 +43,7 @@ const struct file_operations ramfs_file_operations = {
.aio_write = generic_file_aio_write,
.fsync = simple_sync_file,
.splice_read = generic_file_splice_read,
.splice_write = generic_file_splice_write,
.llseek = generic_file_llseek,
};
......
......@@ -379,13 +379,22 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
lock_page(page);
/*
* page was truncated, stop here. if this isn't the
* first page, we'll just complete what we already
* added
* Page was truncated, or invalidated by the
* filesystem. Redo the find/create, but this time the
* page is kept locked, so there's no chance of another
* race with truncate/invalidate.
*/
if (!page->mapping) {
unlock_page(page);
break;
page = find_or_create_page(mapping, index,
mapping_gfp_mask(mapping));
if (!page) {
error = -ENOMEM;
break;
}
page_cache_release(pages[page_nr]);
pages[page_nr] = page;
}
/*
* page was already under io and is now done, great
......
......@@ -64,6 +64,7 @@ struct bio_vec {
struct bio_set;
struct bio;
struct bio_integrity_payload;
typedef void (bio_end_io_t) (struct bio *, int);
typedef void (bio_destructor_t) (struct bio *);
......@@ -112,6 +113,9 @@ struct bio {
atomic_t bi_cnt; /* pin count */
void *bi_private;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
struct bio_integrity_payload *bi_integrity; /* data integrity */
#endif
bio_destructor_t *bi_destructor; /* destructor */
};
......@@ -271,6 +275,29 @@ static inline void *bio_data(struct bio *bio)
*/
#define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
#if defined(CONFIG_BLK_DEV_INTEGRITY)
/*
* bio integrity payload
*/
struct bio_integrity_payload {
struct bio *bip_bio; /* parent bio */
struct bio_vec *bip_vec; /* integrity data vector */
sector_t bip_sector; /* virtual start sector */
void *bip_buf; /* generated integrity data */
bio_end_io_t *bip_end_io; /* saved I/O completion fn */
int bip_error; /* saved I/O error */
unsigned int bip_size;
unsigned short bip_pool; /* pool the ivec came from */
unsigned short bip_vcnt; /* # of integrity bio_vecs */
unsigned short bip_idx; /* current bip_vec index */
struct work_struct bip_work; /* I/O completion */
};
#endif /* CONFIG_BLK_DEV_INTEGRITY */
/*
* A bio_pair is used when we need to split a bio.
......@@ -283,10 +310,14 @@ static inline void *bio_data(struct bio *bio)
* in bio2.bi_private
*/
struct bio_pair {
struct bio bio1, bio2;
struct bio_vec bv1, bv2;
atomic_t cnt;
int error;
struct bio bio1, bio2;
struct bio_vec bv1, bv2;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
struct bio_integrity_payload bip1, bip2;
struct bio_vec iv1, iv2;
#endif
atomic_t cnt;
int error;
};
extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool,
int first_sectors);
......@@ -333,6 +364,39 @@ extern struct bio *bio_copy_user_iov(struct request_queue *, struct sg_iovec *,
int, int);
extern int bio_uncopy_user(struct bio *);
void zero_fill_bio(struct bio *bio);
extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
extern unsigned int bvec_nr_vecs(unsigned short idx);
/*
* bio_set is used to allow other portions of the IO system to
* allocate their own private memory pools for bio and iovec structures.
* These memory pools in turn all allocate from the bio_slab
* and the bvec_slabs[].
*/
#define BIO_POOL_SIZE 2
#define BIOVEC_NR_POOLS 6
struct bio_set {
mempool_t *bio_pool;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
mempool_t *bio_integrity_pool;
#endif
mempool_t *bvec_pools[BIOVEC_NR_POOLS];
};
struct biovec_slab {
int nr_vecs;
char *name;
struct kmem_cache *slab;
};
extern struct bio_set *fs_bio_set;
/*
* a small number of entries is fine, not going to be performance critical.
* basically we just need to survive
*/
#define BIO_SPLIT_ENTRIES 2
#ifdef CONFIG_HIGHMEM
/*
......@@ -381,5 +445,63 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
__bio_kmap_irq((bio), (bio)->bi_idx, (flags))
#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
#if defined(CONFIG_BLK_DEV_INTEGRITY)
#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
#define bip_vec(bip) bip_vec_idx(bip, 0)
#define __bip_for_each_vec(bvl, bip, i, start_idx) \
for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx); \
i < (bip)->bip_vcnt; \
bvl++, i++)
#define bip_for_each_vec(bvl, bip, i) \
__bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
static inline int bio_integrity(struct bio *bio)
{
#if defined(CONFIG_BLK_DEV_INTEGRITY)
return bio->bi_integrity != NULL;
#else
return 0;
#endif
}
extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
extern void bio_integrity_free(struct bio *, struct bio_set *);
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
extern int bio_integrity_enabled(struct bio *bio);
extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
extern int bio_integrity_get_tag(struct bio *, void *, unsigned int);
extern int bio_integrity_prep(struct bio *);
extern void bio_integrity_endio(struct bio *, int);
extern void bio_integrity_advance(struct bio *, unsigned int);
extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
extern int bio_integrity_clone(struct bio *, struct bio *, struct bio_set *);
extern int bioset_integrity_create(struct bio_set *, int);
extern void bioset_integrity_free(struct bio_set *);
extern void bio_integrity_init_slab(void);
#else /* CONFIG_BLK_DEV_INTEGRITY */
#define bio_integrity(a) (0)
#define bioset_integrity_create(a, b) (0)
#define bio_integrity_prep(a) (0)
#define bio_integrity_enabled(a) (0)
#define bio_integrity_clone(a, b, c) (0)
#define bioset_integrity_free(a) do { } while (0)
#define bio_integrity_free(a, b) do { } while (0)
#define bio_integrity_endio(a, b) do { } while (0)
#define bio_integrity_advance(a, b) do { } while (0)
#define bio_integrity_trim(a, b, c) do { } while (0)
#define bio_integrity_split(a, b, c) do { } while (0)
#define bio_integrity_set_tag(a, b, c) do { } while (0)
#define bio_integrity_get_tag(a, b, c) do { } while (0)
#define bio_integrity_init_slab(a) do { } while (0)
#endif /* CONFIG_BLK_DEV_INTEGRITY */
#endif /* CONFIG_BLOCK */
#endif /* __LINUX_BIO_H */
......@@ -23,7 +23,6 @@
struct scsi_ioctl_command;
struct request_queue;
typedef struct request_queue request_queue_t __deprecated;
struct elevator_queue;
typedef struct elevator_queue elevator_t;
struct request_pm_state;
......@@ -34,12 +33,6 @@ struct sg_io_hdr;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
int put_io_context(struct io_context *ioc);
void exit_io_context(void);
struct io_context *get_io_context(gfp_t gfp_flags, int node);
struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
void copy_io_context(struct io_context **pdst, struct io_context **psrc);
struct request;
typedef void (rq_end_io_fn)(struct request *, int);
......@@ -113,6 +106,7 @@ enum rq_flag_bits {
__REQ_ALLOCED, /* request came from our alloc pool */
__REQ_RW_META, /* metadata io request */
__REQ_COPY_USER, /* contains copies of user pages */
__REQ_INTEGRITY, /* integrity metadata has been remapped */
__REQ_NR_BITS, /* stops here */
};
......@@ -135,6 +129,7 @@ enum rq_flag_bits {
#define REQ_ALLOCED (1 << __REQ_ALLOCED)
#define REQ_RW_META (1 << __REQ_RW_META)
#define REQ_COPY_USER (1 << __REQ_COPY_USER)
#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
#define BLK_MAX_CDB 16
......@@ -259,7 +254,14 @@ typedef int (prep_rq_fn) (struct request_queue *, struct request *);
typedef void (unplug_fn) (struct request_queue *);
struct bio_vec;
typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
struct bvec_merge_data {
struct block_device *bi_bdev;
sector_t bi_sector;
unsigned bi_size;
unsigned long bi_rw;
};
typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
struct bio_vec *);
typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
typedef void (softirq_done_fn)(struct request *);
typedef int (dma_drain_needed_fn)(struct request *);
......@@ -426,6 +428,32 @@ static inline void queue_flag_set_unlocked(unsigned int flag,
__set_bit(flag, &q->queue_flags);
}
static inline int queue_flag_test_and_clear(unsigned int flag,
struct request_queue *q)
{
WARN_ON_ONCE(!queue_is_locked(q));
if (test_bit(flag, &q->queue_flags)) {
__clear_bit(flag, &q->queue_flags);
return 1;
}
return 0;
}
static inline int queue_flag_test_and_set(unsigned int flag,
struct request_queue *q)
{
WARN_ON_ONCE(!queue_is_locked(q));
if (!test_bit(flag, &q->queue_flags)) {
__set_bit(flag, &q->queue_flags);
return 0;
}
return 1;
}
static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
{
WARN_ON_ONCE(!queue_is_locked(q));
......@@ -676,7 +704,6 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
struct request *, int);
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
struct request *, int, rq_end_io_fn *);
extern int blk_verify_command(unsigned char *, int);
extern void blk_unplug(struct request_queue *q);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
......@@ -749,6 +776,7 @@ extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
extern int blk_queue_dma_drain(struct request_queue *q,
dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size);
......@@ -802,6 +830,15 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
extern int blkdev_issue_flush(struct block_device *, sector_t *);
/*
* command filter functions
*/
extern int blk_verify_command(struct file *file, unsigned char *cmd);
extern int blk_cmd_filter_verify_command(struct blk_scsi_cmd_filter *filter,
unsigned char *cmd, mode_t *f_mode);
extern int blk_register_filter(struct gendisk *disk);
extern void blk_unregister_filter(struct gendisk *disk);
#define MAX_PHYS_SEGMENTS 128
#define MAX_HW_SEGMENTS 128
#define SAFE_MAX_SECTORS 255
......@@ -865,28 +902,116 @@ void kblockd_flush_work(struct work_struct *work);
#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
MODULE_ALIAS("block-major-" __stringify(major) "-*")
#if defined(CONFIG_BLK_DEV_INTEGRITY)
#else /* CONFIG_BLOCK */
/*
* stubs for when the block layer is configured out
*/
#define buffer_heads_over_limit 0
#define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */
#define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */
static inline long nr_blockdev_pages(void)
struct blk_integrity_exchg {
void *prot_buf;
void *data_buf;
sector_t sector;
unsigned int data_size;
unsigned short sector_size;
const char *disk_name;
};
typedef void (integrity_gen_fn) (struct blk_integrity_exchg *);
typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *);
typedef void (integrity_set_tag_fn) (void *, void *, unsigned int);
typedef void (integrity_get_tag_fn) (void *, void *, unsigned int);
struct blk_integrity {
integrity_gen_fn *generate_fn;
integrity_vrfy_fn *verify_fn;
integrity_set_tag_fn *set_tag_fn;
integrity_get_tag_fn *get_tag_fn;
unsigned short flags;
unsigned short tuple_size;
unsigned short sector_size;
unsigned short tag_size;
const char *name;
struct kobject kobj;
};
extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
extern void blk_integrity_unregister(struct gendisk *);
extern int blk_integrity_compare(struct block_device *, struct block_device *);
extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
extern int blk_rq_count_integrity_sg(struct request *);
static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi)
{
if (bi)
return bi->tuple_size;
return 0;
}
static inline void exit_io_context(void)
static inline struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
{
return bdev->bd_disk->integrity;
}
struct io_context;
static inline int put_io_context(struct io_context *ioc)
static inline unsigned int bdev_get_tag_size(struct block_device *bdev)
{
return 1;
struct blk_integrity *bi = bdev_get_integrity(bdev);
if (bi)
return bi->tag_size;
return 0;
}
static inline int bdev_integrity_enabled(struct block_device *bdev, int rw)
{
struct blk_integrity *bi = bdev_get_integrity(bdev);
if (bi == NULL)
return 0;
if (rw == READ && bi->verify_fn != NULL &&
(bi->flags & INTEGRITY_FLAG_READ))
return 1;
if (rw == WRITE && bi->generate_fn != NULL &&
(bi->flags & INTEGRITY_FLAG_WRITE))
return 1;
return 0;
}
static inline int blk_integrity_rq(struct request *rq)
{
return bio_integrity(rq->bio);
}
#else /* CONFIG_BLK_DEV_INTEGRITY */
#define blk_integrity_rq(rq) (0)
#define blk_rq_count_integrity_sg(a) (0)
#define blk_rq_map_integrity_sg(a, b) (0)
#define bdev_get_integrity(a) (0)
#define bdev_get_tag_size(a) (0)
#define blk_integrity_compare(a, b) (0)
#define blk_integrity_register(a, b) (0)
#define blk_integrity_unregister(a) do { } while (0);
#endif /* CONFIG_BLK_DEV_INTEGRITY */
#else /* CONFIG_BLOCK */
/*
* stubs for when the block layer is configured out
*/
#define buffer_heads_over_limit 0
static inline long nr_blockdev_pages(void)
{
return 0;
}
#endif /* CONFIG_BLOCK */
......
......@@ -129,6 +129,7 @@ struct blk_trace {
u32 dev;
struct dentry *dir;
struct dentry *dropped_file;
struct dentry *msg_file;
atomic_t dropped;
};
......
......@@ -110,6 +110,14 @@ struct hd_struct {
#define GENHD_FL_SUPPRESS_PARTITION_INFO 32
#define GENHD_FL_FAIL 64
#define BLK_SCSI_MAX_CMDS (256)
#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
struct blk_scsi_cmd_filter {
unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
struct kobject kobj;
};
struct gendisk {
int major; /* major number of driver */
......@@ -120,6 +128,7 @@ struct gendisk {
struct hd_struct **part; /* [indexed by minor] */
struct block_device_operations *fops;
struct request_queue *queue;
struct blk_scsi_cmd_filter cmd_filter;
void *private_data;
sector_t capacity;
......@@ -141,6 +150,9 @@ struct gendisk {
struct disk_stats dkstats;
#endif
struct work_struct async_notify;
#ifdef CONFIG_BLK_DEV_INTEGRITY
struct blk_integrity *integrity;
#endif
};
/*
......
......@@ -99,4 +99,22 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
return NULL;
}
#ifdef CONFIG_BLOCK
int put_io_context(struct io_context *ioc);
void exit_io_context(void);
struct io_context *get_io_context(gfp_t gfp_flags, int node);
struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
void copy_io_context(struct io_context **pdst, struct io_context **psrc);
#else
static inline void exit_io_context(void)
{
}
struct io_context;
static inline int put_io_context(struct io_context *ioc)
{
return 1;
}
#endif
#endif
......@@ -13,6 +13,7 @@
#include <linux/personality.h>
#include <linux/tty.h>
#include <linux/mnt_namespace.h>
#include <linux/iocontext.h>
#include <linux/key.h>
#include <linux/security.h>
#include <linux/cpu.h>
......
......@@ -23,6 +23,7 @@
#include <linux/sem.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/iocontext.h>
#include <linux/key.h>
#include <linux/binfmts.h>
#include <linux/mman.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment