Commit cec60620 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'blktrace' of git://brick.kernel.dk/data/git/linux-2.6-block

* 'blktrace' of git://brick.kernel.dk/data/git/linux-2.6-block:
  [PATCH] Block queue IO tracing support (blktrace) as of 2006-03-23
  [PATCH] relay: consolidate sendfile() and read() code
  [PATCH] relay: add sendfile() support
  [PATCH] relay: migrate from relayfs to a generic relay API
parents 88f07ffb 2056a782
...@@ -11,4 +11,16 @@ config LBD ...@@ -11,4 +11,16 @@ config LBD
your machine, or if you want to have a raid or loopback device your machine, or if you want to have a raid or loopback device
bigger than 2TB. Otherwise say N. bigger than 2TB. Otherwise say N.
config BLK_DEV_IO_TRACE
bool "Support for tracing block io actions"
select RELAY
select DEBUG_FS
help
Say Y here, if you want to be able to trace the block layer actions
on a given queue. Tracing allows you to see any traffic happening
on a block device queue. For more information (and the user space
support tools needed), fetch the blktrace app from:
git://brick.kernel.dk/data/git/blktrace.git
source block/Kconfig.iosched source block/Kconfig.iosched
...@@ -8,3 +8,5 @@ obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o ...@@ -8,3 +8,5 @@ obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
obj-$(CONFIG_IOSCHED_AS) += as-iosched.o obj-$(CONFIG_IOSCHED_AS) += as-iosched.o
obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
This diff is collapsed.
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/blktrace_api.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -333,6 +334,8 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) ...@@ -333,6 +334,8 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
struct list_head *pos; struct list_head *pos;
unsigned ordseq; unsigned ordseq;
blk_add_trace_rq(q, rq, BLK_TA_INSERT);
rq->q = q; rq->q = q;
switch (where) { switch (where) {
...@@ -499,6 +502,7 @@ struct request *elv_next_request(request_queue_t *q) ...@@ -499,6 +502,7 @@ struct request *elv_next_request(request_queue_t *q)
* not be passed by new incoming requests * not be passed by new incoming requests
*/ */
rq->flags |= REQ_STARTED; rq->flags |= REQ_STARTED;
blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
} }
if (!q->boundary_rq || q->boundary_rq == rq) { if (!q->boundary_rq || q->boundary_rq == rq) {
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/backing-dev.h> #include <linux/backing-dev.h>
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/blktrace_api.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg) static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg)
...@@ -189,6 +190,11 @@ static int blkdev_locked_ioctl(struct file *file, struct block_device *bdev, ...@@ -189,6 +190,11 @@ static int blkdev_locked_ioctl(struct file *file, struct block_device *bdev,
return put_ulong(arg, bdev->bd_inode->i_size >> 9); return put_ulong(arg, bdev->bd_inode->i_size >> 9);
case BLKGETSIZE64: case BLKGETSIZE64:
return put_u64(arg, bdev->bd_inode->i_size); return put_u64(arg, bdev->bd_inode->i_size);
case BLKTRACESTART:
case BLKTRACESTOP:
case BLKTRACESETUP:
case BLKTRACETEARDOWN:
return blk_trace_ioctl(bdev, cmd, (char __user *) arg);
} }
return -ENOIOCTLCMD; return -ENOIOCTLCMD;
} }
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/writeback.h> #include <linux/writeback.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/blktrace_api.h>
/* /*
* for max sense size * for max sense size
...@@ -1556,8 +1557,10 @@ void blk_plug_device(request_queue_t *q) ...@@ -1556,8 +1557,10 @@ void blk_plug_device(request_queue_t *q)
if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)) if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
return; return;
if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
}
} }
EXPORT_SYMBOL(blk_plug_device); EXPORT_SYMBOL(blk_plug_device);
...@@ -1621,14 +1624,21 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi, ...@@ -1621,14 +1624,21 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
/* /*
* devices don't necessarily have an ->unplug_fn defined * devices don't necessarily have an ->unplug_fn defined
*/ */
if (q->unplug_fn) if (q->unplug_fn) {
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
q->rq.count[READ] + q->rq.count[WRITE]);
q->unplug_fn(q); q->unplug_fn(q);
}
} }
static void blk_unplug_work(void *data) static void blk_unplug_work(void *data)
{ {
request_queue_t *q = data; request_queue_t *q = data;
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
q->rq.count[READ] + q->rq.count[WRITE]);
q->unplug_fn(q); q->unplug_fn(q);
} }
...@@ -1636,6 +1646,9 @@ static void blk_unplug_timeout(unsigned long data) ...@@ -1636,6 +1646,9 @@ static void blk_unplug_timeout(unsigned long data)
{ {
request_queue_t *q = (request_queue_t *)data; request_queue_t *q = (request_queue_t *)data;
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
q->rq.count[READ] + q->rq.count[WRITE]);
kblockd_schedule_work(&q->unplug_work); kblockd_schedule_work(&q->unplug_work);
} }
...@@ -1753,6 +1766,9 @@ static void blk_release_queue(struct kobject *kobj) ...@@ -1753,6 +1766,9 @@ static void blk_release_queue(struct kobject *kobj)
if (q->queue_tags) if (q->queue_tags)
__blk_queue_free_tags(q); __blk_queue_free_tags(q);
if (q->blk_trace)
blk_trace_shutdown(q);
kmem_cache_free(requestq_cachep, q); kmem_cache_free(requestq_cachep, q);
} }
...@@ -2129,6 +2145,8 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, ...@@ -2129,6 +2145,8 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
rq_init(q, rq); rq_init(q, rq);
rq->rl = rl; rq->rl = rl;
blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
out: out:
return rq; return rq;
} }
...@@ -2157,6 +2175,8 @@ static struct request *get_request_wait(request_queue_t *q, int rw, ...@@ -2157,6 +2175,8 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
if (!rq) { if (!rq) {
struct io_context *ioc; struct io_context *ioc;
blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
__generic_unplug_device(q); __generic_unplug_device(q);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
io_schedule(); io_schedule();
...@@ -2210,6 +2230,8 @@ EXPORT_SYMBOL(blk_get_request); ...@@ -2210,6 +2230,8 @@ EXPORT_SYMBOL(blk_get_request);
*/ */
void blk_requeue_request(request_queue_t *q, struct request *rq) void blk_requeue_request(request_queue_t *q, struct request *rq)
{ {
blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
if (blk_rq_tagged(rq)) if (blk_rq_tagged(rq))
blk_queue_end_tag(q, rq); blk_queue_end_tag(q, rq);
...@@ -2844,6 +2866,8 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -2844,6 +2866,8 @@ static int __make_request(request_queue_t *q, struct bio *bio)
if (!q->back_merge_fn(q, req, bio)) if (!q->back_merge_fn(q, req, bio))
break; break;
blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
req->biotail->bi_next = bio; req->biotail->bi_next = bio;
req->biotail = bio; req->biotail = bio;
req->nr_sectors = req->hard_nr_sectors += nr_sectors; req->nr_sectors = req->hard_nr_sectors += nr_sectors;
...@@ -2859,6 +2883,8 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -2859,6 +2883,8 @@ static int __make_request(request_queue_t *q, struct bio *bio)
if (!q->front_merge_fn(q, req, bio)) if (!q->front_merge_fn(q, req, bio))
break; break;
blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
bio->bi_next = req->bio; bio->bi_next = req->bio;
req->bio = bio; req->bio = bio;
...@@ -2976,6 +3002,7 @@ void generic_make_request(struct bio *bio) ...@@ -2976,6 +3002,7 @@ void generic_make_request(struct bio *bio)
request_queue_t *q; request_queue_t *q;
sector_t maxsector; sector_t maxsector;
int ret, nr_sectors = bio_sectors(bio); int ret, nr_sectors = bio_sectors(bio);
dev_t old_dev;
might_sleep(); might_sleep();
/* Test device or partition size, when known. */ /* Test device or partition size, when known. */
...@@ -3002,6 +3029,8 @@ void generic_make_request(struct bio *bio) ...@@ -3002,6 +3029,8 @@ void generic_make_request(struct bio *bio)
* NOTE: we don't repeat the blk_size check for each new device. * NOTE: we don't repeat the blk_size check for each new device.
* Stacking drivers are expected to know what they are doing. * Stacking drivers are expected to know what they are doing.
*/ */
maxsector = -1;
old_dev = 0;
do { do {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
...@@ -3034,6 +3063,15 @@ void generic_make_request(struct bio *bio) ...@@ -3034,6 +3063,15 @@ void generic_make_request(struct bio *bio)
*/ */
blk_partition_remap(bio); blk_partition_remap(bio);
if (maxsector != -1)
blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
maxsector);
blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
maxsector = bio->bi_sector;
old_dev = bio->bi_bdev->bd_dev;
ret = q->make_request_fn(q, bio); ret = q->make_request_fn(q, bio);
} while (ret); } while (ret);
} }
...@@ -3153,6 +3191,8 @@ static int __end_that_request_first(struct request *req, int uptodate, ...@@ -3153,6 +3191,8 @@ static int __end_that_request_first(struct request *req, int uptodate,
int total_bytes, bio_nbytes, error, next_idx = 0; int total_bytes, bio_nbytes, error, next_idx = 0;
struct bio *bio; struct bio *bio;
blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
/* /*
* extend uptodate bool to allow < 0 value to be direct io error * extend uptodate bool to allow < 0 value to be direct io error
*/ */
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <linux/hdreg.h> #include <linux/hdreg.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/blktrace_api.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/io.h> #include <asm/io.h>
...@@ -2331,6 +2332,7 @@ static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd, ...@@ -2331,6 +2332,7 @@ static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
cmd->rq->completion_data = cmd; cmd->rq->completion_data = cmd;
cmd->rq->errors = status; cmd->rq->errors = status;
blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
blk_complete_request(cmd->rq); blk_complete_request(cmd->rq);
} }
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/mempool.h> #include <linux/mempool.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/blktrace_api.h>
static const char *_name = DM_NAME; static const char *_name = DM_NAME;
...@@ -334,6 +335,8 @@ static void dec_pending(struct dm_io *io, int error) ...@@ -334,6 +335,8 @@ static void dec_pending(struct dm_io *io, int error)
/* nudge anyone waiting on suspend queue */ /* nudge anyone waiting on suspend queue */
wake_up(&io->md->wait); wake_up(&io->md->wait);
blk_add_trace_bio(io->md->queue, io->bio, BLK_TA_COMPLETE);
bio_endio(io->bio, io->bio->bi_size, io->error); bio_endio(io->bio, io->bio->bi_size, io->error);
free_io(io->md, io); free_io(io->md, io);
} }
...@@ -392,6 +395,7 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, ...@@ -392,6 +395,7 @@ static void __map_bio(struct dm_target *ti, struct bio *clone,
struct target_io *tio) struct target_io *tio)
{ {
int r; int r;
sector_t sector;
/* /*
* Sanity checks. * Sanity checks.
...@@ -407,10 +411,17 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, ...@@ -407,10 +411,17 @@ static void __map_bio(struct dm_target *ti, struct bio *clone,
* this io. * this io.
*/ */
atomic_inc(&tio->io->io_count); atomic_inc(&tio->io->io_count);
sector = clone->bi_sector;
r = ti->type->map(ti, clone, &tio->info); r = ti->type->map(ti, clone, &tio->info);
if (r > 0) if (r > 0) {
/* the bio has been remapped so dispatch it */ /* the bio has been remapped so dispatch it */
blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone,
tio->io->bio->bi_bdev->bd_dev, sector,
clone->bi_sector);
generic_make_request(clone); generic_make_request(clone);
}
else if (r < 0) { else if (r < 0) {
/* error the io and bail out */ /* error the io and bail out */
......
...@@ -859,18 +859,6 @@ config RAMFS ...@@ -859,18 +859,6 @@ config RAMFS
To compile this as a module, choose M here: the module will be called To compile this as a module, choose M here: the module will be called
ramfs. ramfs.
config RELAYFS_FS
tristate "Relayfs file system support"
---help---
Relayfs is a high-speed data relay filesystem designed to provide
an efficient mechanism for tools and facilities to relay large
amounts of data from kernel space to user space.
To compile this code as a module, choose M here: the module will be
called relayfs.
If unsure, say N.
config CONFIGFS_FS config CONFIGFS_FS
tristate "Userspace-driven configuration filesystem (EXPERIMENTAL)" tristate "Userspace-driven configuration filesystem (EXPERIMENTAL)"
depends on EXPERIMENTAL depends on EXPERIMENTAL
......
...@@ -91,7 +91,6 @@ obj-$(CONFIG_AUTOFS4_FS) += autofs4/ ...@@ -91,7 +91,6 @@ obj-$(CONFIG_AUTOFS4_FS) += autofs4/
obj-$(CONFIG_ADFS_FS) += adfs/ obj-$(CONFIG_ADFS_FS) += adfs/
obj-$(CONFIG_FUSE_FS) += fuse/ obj-$(CONFIG_FUSE_FS) += fuse/
obj-$(CONFIG_UDF_FS) += udf/ obj-$(CONFIG_UDF_FS) += udf/
obj-$(CONFIG_RELAYFS_FS) += relayfs/
obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/ obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/
obj-$(CONFIG_JFS_FS) += jfs/ obj-$(CONFIG_JFS_FS) += jfs/
obj-$(CONFIG_XFS_FS) += xfs/ obj-$(CONFIG_XFS_FS) += xfs/
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/mempool.h> #include <linux/mempool.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/blktrace_api.h>
#include <scsi/sg.h> /* for struct sg_iovec */ #include <scsi/sg.h> /* for struct sg_iovec */
#define BIO_POOL_SIZE 256 #define BIO_POOL_SIZE 256
...@@ -1095,6 +1096,9 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) ...@@ -1095,6 +1096,9 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
if (!bp) if (!bp)
return bp; return bp;
blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi,
bi->bi_sector + first_sectors);
BUG_ON(bi->bi_vcnt != 1); BUG_ON(bi->bi_vcnt != 1);
BUG_ON(bi->bi_idx != 0); BUG_ON(bi->bi_idx != 0);
atomic_set(&bp->cnt, 3); atomic_set(&bp->cnt, 3);
......
...@@ -72,6 +72,7 @@ ...@@ -72,6 +72,7 @@
#include <linux/i2c-dev.h> #include <linux/i2c-dev.h>
#include <linux/wireless.h> #include <linux/wireless.h>
#include <linux/atalk.h> #include <linux/atalk.h>
#include <linux/blktrace_api.h>
#include <net/sock.h> /* siocdevprivate_ioctl */ #include <net/sock.h> /* siocdevprivate_ioctl */
#include <net/bluetooth/bluetooth.h> #include <net/bluetooth/bluetooth.h>
......
obj-$(CONFIG_RELAYFS_FS) += relayfs.o
relayfs-y := relay.o inode.o buffers.o
/*
* RelayFS buffer management code.
*
* Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
* Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com)
*
* This file is released under the GPL.
*/
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/relayfs_fs.h>
#include "relay.h"
#include "buffers.h"
/*
* close() vm_op implementation for relayfs file mapping.
*/
static void relay_file_mmap_close(struct vm_area_struct *vma)
{
struct rchan_buf *buf = vma->vm_private_data;
buf->chan->cb->buf_unmapped(buf, vma->vm_file);
}
/*
* nopage() vm_op implementation for relayfs file mapping.
*/
static struct page *relay_buf_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type)
{
struct page *page;
struct rchan_buf *buf = vma->vm_private_data;
unsigned long offset = address - vma->vm_start;
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
if (!buf)
return NOPAGE_OOM;
page = vmalloc_to_page(buf->start + offset);
if (!page)
return NOPAGE_OOM;
get_page(page);
if (type)
*type = VM_FAULT_MINOR;
return page;
}
/*
* vm_ops for relay file mappings.
*/
static struct vm_operations_struct relay_file_mmap_ops = {
.nopage = relay_buf_nopage,
.close = relay_file_mmap_close,
};
/**
* relay_mmap_buf: - mmap channel buffer to process address space
* @buf: relay channel buffer
* @vma: vm_area_struct describing memory to be mapped
*
* Returns 0 if ok, negative on error
*
* Caller should already have grabbed mmap_sem.
*/
int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma)
{
unsigned long length = vma->vm_end - vma->vm_start;
struct file *filp = vma->vm_file;
if (!buf)
return -EBADF;
if (length != (unsigned long)buf->chan->alloc_size)
return -EINVAL;
vma->vm_ops = &relay_file_mmap_ops;
vma->vm_private_data = buf;
buf->chan->cb->buf_mapped(buf, filp);
return 0;
}
/**
* relay_alloc_buf - allocate a channel buffer
* @buf: the buffer struct
* @size: total size of the buffer
*
* Returns a pointer to the resulting buffer, NULL if unsuccessful
*/
static void *relay_alloc_buf(struct rchan_buf *buf, unsigned long size)
{
void *mem;
unsigned int i, j, n_pages;
size = PAGE_ALIGN(size);
n_pages = size >> PAGE_SHIFT;
buf->page_array = kcalloc(n_pages, sizeof(struct page *), GFP_KERNEL);
if (!buf->page_array)
return NULL;
for (i = 0; i < n_pages; i++) {
buf->page_array[i] = alloc_page(GFP_KERNEL);
if (unlikely(!buf->page_array[i]))
goto depopulate;
}
mem = vmap(buf->page_array, n_pages, VM_MAP, PAGE_KERNEL);
if (!mem)
goto depopulate;
memset(mem, 0, size);
buf->page_count = n_pages;
return mem;
depopulate:
for (j = 0; j < i; j++)
__free_page(buf->page_array[j]);
kfree(buf->page_array);
return NULL;
}
/**
* relay_create_buf - allocate and initialize a channel buffer
* @alloc_size: size of the buffer to allocate
* @n_subbufs: number of sub-buffers in the channel
*
* Returns channel buffer if successful, NULL otherwise
*/
struct rchan_buf *relay_create_buf(struct rchan *chan)
{
struct rchan_buf *buf = kcalloc(1, sizeof(struct rchan_buf), GFP_KERNEL);
if (!buf)
return NULL;
buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL);
if (!buf->padding)
goto free_buf;
buf->start = relay_alloc_buf(buf, chan->alloc_size);
if (!buf->start)
goto free_buf;
buf->chan = chan;
kref_get(&buf->chan->kref);
return buf;
free_buf:
kfree(buf->padding);
kfree(buf);
return NULL;
}
/**
* relay_destroy_buf - destroy an rchan_buf struct and associated buffer
* @buf: the buffer struct
*/
void relay_destroy_buf(struct rchan_buf *buf)
{
struct rchan *chan = buf->chan;
unsigned int i;
if (likely(buf->start)) {
vunmap(buf->start);
for (i = 0; i < buf->page_count; i++)
__free_page(buf->page_array[i]);
kfree(buf->page_array);
}
kfree(buf->padding);
kfree(buf);
kref_put(&chan->kref, relay_destroy_channel);
}
/**
* relay_remove_buf - remove a channel buffer
*
* Removes the file from the relayfs fileystem, which also frees the
* rchan_buf_struct and the channel buffer. Should only be called from
* kref_put().
*/
void relay_remove_buf(struct kref *kref)
{
struct rchan_buf *buf = container_of(kref, struct rchan_buf, kref);
buf->chan->cb->remove_buf_file(buf->dentry);
relay_destroy_buf(buf);
}
#ifndef _BUFFERS_H
#define _BUFFERS_H
/* This inspired by rtai/shmem */
#define FIX_SIZE(x) (((x) - 1) & PAGE_MASK) + PAGE_SIZE
extern int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma);
extern struct rchan_buf *relay_create_buf(struct rchan *chan);
extern void relay_destroy_buf(struct rchan_buf *buf);
extern void relay_remove_buf(struct kref *kref);
#endif/* _BUFFERS_H */
This diff is collapsed.
#ifndef _RELAY_H
#define _RELAY_H
extern int relayfs_remove(struct dentry *dentry);
extern int relay_buf_empty(struct rchan_buf *buf);
extern void relay_destroy_channel(struct kref *kref);
#endif /* _RELAY_H */
...@@ -22,6 +22,7 @@ typedef struct request_queue request_queue_t; ...@@ -22,6 +22,7 @@ typedef struct request_queue request_queue_t;
struct elevator_queue; struct elevator_queue;
typedef struct elevator_queue elevator_t; typedef struct elevator_queue elevator_t;
struct request_pm_state; struct request_pm_state;
struct blk_trace;
#define BLKDEV_MIN_RQ 4 #define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */ #define BLKDEV_MAX_RQ 128 /* Default maximum */
...@@ -416,6 +417,8 @@ struct request_queue ...@@ -416,6 +417,8 @@ struct request_queue
unsigned int sg_reserved_size; unsigned int sg_reserved_size;
int node; int node;
struct blk_trace *blk_trace;
/* /*
* reserved for flush operations * reserved for flush operations
*/ */
......
#ifndef BLKTRACE_H
#define BLKTRACE_H
#include <linux/config.h>
#include <linux/blkdev.h>
#include <linux/relay.h>
/*
* Trace categories
*/
enum blktrace_cat {
BLK_TC_READ = 1 << 0, /* reads */
BLK_TC_WRITE = 1 << 1, /* writes */
BLK_TC_BARRIER = 1 << 2, /* barrier */
BLK_TC_SYNC = 1 << 3, /* barrier */
BLK_TC_QUEUE = 1 << 4, /* queueing/merging */
BLK_TC_REQUEUE = 1 << 5, /* requeueing */
BLK_TC_ISSUE = 1 << 6, /* issue */
BLK_TC_COMPLETE = 1 << 7, /* completions */
BLK_TC_FS = 1 << 8, /* fs requests */
BLK_TC_PC = 1 << 9, /* pc requests */
BLK_TC_NOTIFY = 1 << 10, /* special message */
BLK_TC_END = 1 << 15, /* only 16-bits, reminder */
};
#define BLK_TC_SHIFT (16)
#define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT)
/*
* Basic trace actions
*/
enum blktrace_act {
__BLK_TA_QUEUE = 1, /* queued */
__BLK_TA_BACKMERGE, /* back merged to existing rq */
__BLK_TA_FRONTMERGE, /* front merge to existing rq */
__BLK_TA_GETRQ, /* allocated new request */
__BLK_TA_SLEEPRQ, /* sleeping on rq allocation */
__BLK_TA_REQUEUE, /* request requeued */
__BLK_TA_ISSUE, /* sent to driver */
__BLK_TA_COMPLETE, /* completed by driver */
__BLK_TA_PLUG, /* queue was plugged */
__BLK_TA_UNPLUG_IO, /* queue was unplugged by io */
__BLK_TA_UNPLUG_TIMER, /* queue was unplugged by timer */
__BLK_TA_INSERT, /* insert request */
__BLK_TA_SPLIT, /* bio was split */
__BLK_TA_BOUNCE, /* bio was bounced */
__BLK_TA_REMAP, /* bio was remapped */
};
/*
* Trace actions in full. Additionally, read or write is masked
*/
#define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE))
#define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE))
#define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE))
#define BLK_TA_PLUG (__BLK_TA_PLUG | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TA_UNPLUG_IO (__BLK_TA_UNPLUG_IO | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TA_UNPLUG_TIMER (__BLK_TA_UNPLUG_TIMER | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TA_INSERT (__BLK_TA_INSERT | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TA_SPLIT (__BLK_TA_SPLIT)
#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE)
#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_IO_TRACE_MAGIC 0x65617400
#define BLK_IO_TRACE_VERSION 0x07
/*
* The trace itself
*/
struct blk_io_trace {
u32 magic; /* MAGIC << 8 | version */
u32 sequence; /* event number */
u64 time; /* in microseconds */
u64 sector; /* disk offset */
u32 bytes; /* transfer length */
u32 action; /* what happened */
u32 pid; /* who did it */
u32 device; /* device number */
u32 cpu; /* on what cpu did it happen */
u16 error; /* completion error */
u16 pdu_len; /* length of data after this trace */
};
/*
* The remap event
*/
struct blk_io_trace_remap {
u32 device;
u32 __pad;
u64 sector;
};
enum {
Blktrace_setup = 1,
Blktrace_running,
Blktrace_stopped,
};
struct blk_trace {
int trace_state;
struct rchan *rchan;
unsigned long *sequence;
u16 act_mask;
u64 start_lba;
u64 end_lba;
u32 pid;
u32 dev;
struct dentry *dir;
struct dentry *dropped_file;
atomic_t dropped;
};
/*
* User setup structure passed with BLKTRACESTART
*/
struct blk_user_trace_setup {
char name[BDEVNAME_SIZE]; /* output */
u16 act_mask; /* input */
u32 buf_size; /* input */
u32 buf_nr; /* input */
u64 start_lba;
u64 end_lba;
u32 pid;
};
#if defined(CONFIG_BLK_DEV_IO_TRACE)
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(request_queue_t *);
extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
/**
* blk_add_trace_rq - Add a trace for a request oriented action
* @q: queue the io is for
* @rq: the source request
* @what: the action
*
* Description:
* Records an action against a request. Will log the bio offset + size.
*
**/
static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
u32 what)
{
struct blk_trace *bt = q->blk_trace;
int rw = rq->flags & 0x07;
if (likely(!bt))
return;
if (blk_pc_request(rq)) {
what |= BLK_TC_ACT(BLK_TC_PC);
__blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
} else {
what |= BLK_TC_ACT(BLK_TC_FS);
__blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL);
}
}
/**
* blk_add_trace_bio - Add a trace for a bio oriented action
* @q: queue the io is for
* @bio: the source bio
* @what: the action
*
* Description:
* Records an action against a bio. Will log the bio offset + size.
*
**/
static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
u32 what)
{
struct blk_trace *bt = q->blk_trace;
if (likely(!bt))
return;
__blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
}
/**
* blk_add_trace_generic - Add a trace for a generic action
* @q: queue the io is for
* @bio: the source bio
* @rw: the data direction
* @what: the action
*
* Description:
* Records a simple trace
*
**/
static inline void blk_add_trace_generic(struct request_queue *q,
struct bio *bio, int rw, u32 what)
{
struct blk_trace *bt = q->blk_trace;
if (likely(!bt))
return;
if (bio)
blk_add_trace_bio(q, bio, what);
else
__blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL);
}
/**
* blk_add_trace_pdu_int - Add a trace for a bio with an integer payload
* @q: queue the io is for
* @what: the action
* @bio: the source bio
* @pdu: the integer payload
*
* Description:
* Adds a trace with some integer payload. This might be an unplug
* option given as the action, with the depth at unplug time given
* as the payload
*
**/
static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what,
struct bio *bio, unsigned int pdu)
{
struct blk_trace *bt = q->blk_trace;
u64 rpdu = cpu_to_be64(pdu);
if (likely(!bt))
return;
if (bio)
__blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu);
else
__blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
}
/**
* blk_add_trace_remap - Add a trace for a remap operation
* @q: queue the io is for
* @bio: the source bio
* @dev: target device
* @from: source sector
* @to: target sector
*
* Description:
* Device mapper or raid target sometimes need to split a bio because
* it spans a stripe (or similar). Add a trace for that action.
*
**/
static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
dev_t dev, sector_t from, sector_t to)
{
struct blk_trace *bt = q->blk_trace;
struct blk_io_trace_remap r;
if (likely(!bt))
return;
r.device = cpu_to_be32(dev);
r.sector = cpu_to_be64(to);
__blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
}
#else /* !CONFIG_BLK_DEV_IO_TRACE */
#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
#define blk_trace_shutdown(q) do { } while (0)
#define blk_add_trace_rq(q, rq, what) do { } while (0)
#define blk_add_trace_bio(q, rq, what) do { } while (0)
#define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0)
#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0)
#endif /* CONFIG_BLK_DEV_IO_TRACE */
#endif
...@@ -97,6 +97,10 @@ COMPATIBLE_IOCTL(BLKRRPART) ...@@ -97,6 +97,10 @@ COMPATIBLE_IOCTL(BLKRRPART)
COMPATIBLE_IOCTL(BLKFLSBUF) COMPATIBLE_IOCTL(BLKFLSBUF)
COMPATIBLE_IOCTL(BLKSECTSET) COMPATIBLE_IOCTL(BLKSECTSET)
COMPATIBLE_IOCTL(BLKSSZGET) COMPATIBLE_IOCTL(BLKSSZGET)
COMPATIBLE_IOCTL(BLKTRACESTART)
COMPATIBLE_IOCTL(BLKTRACESTOP)
COMPATIBLE_IOCTL(BLKTRACESETUP)
COMPATIBLE_IOCTL(BLKTRACETEARDOWN)
ULONG_IOCTL(BLKRASET) ULONG_IOCTL(BLKRASET)
ULONG_IOCTL(BLKFRASET) ULONG_IOCTL(BLKFRASET)
/* RAID */ /* RAID */
......
...@@ -197,6 +197,10 @@ extern int dir_notify_enable; ...@@ -197,6 +197,10 @@ extern int dir_notify_enable;
#define BLKBSZGET _IOR(0x12,112,size_t) #define BLKBSZGET _IOR(0x12,112,size_t)
#define BLKBSZSET _IOW(0x12,113,size_t) #define BLKBSZSET _IOW(0x12,113,size_t)
#define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */ #define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */
#define BLKTRACESETUP _IOWR(0x12,115,struct blk_user_trace_setup)
#define BLKTRACESTART _IO(0x12,116)
#define BLKTRACESTOP _IO(0x12,117)
#define BLKTRACETEARDOWN _IO(0x12,118)
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
#define FIBMAP _IO(0x00,1) /* bmap access */ #define FIBMAP _IO(0x00,1) /* bmap access */
......
/*
* linux/include/linux/relay.h
*
* Copyright (C) 2002, 2003 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
* Copyright (C) 1999, 2000, 2001, 2002 - Karim Yaghmour (karim@opersys.com)
*
* CONFIG_RELAY definitions and declarations
*/
#ifndef _LINUX_RELAY_H
#define _LINUX_RELAY_H
#include <linux/config.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/kref.h>
/* Needs a _much_ better name... */
#define FIX_SIZE(x) ((((x) - 1) & PAGE_MASK) + PAGE_SIZE)
/*
* Tracks changes to rchan/rchan_buf structs
*/
#define RELAYFS_CHANNEL_VERSION 6
/*
* Per-cpu relay channel buffer
*/
struct rchan_buf
{
void *start; /* start of channel buffer */
void *data; /* start of current sub-buffer */
size_t offset; /* current offset into sub-buffer */
size_t subbufs_produced; /* count of sub-buffers produced */
size_t subbufs_consumed; /* count of sub-buffers consumed */
struct rchan *chan; /* associated channel */
wait_queue_head_t read_wait; /* reader wait queue */
struct work_struct wake_readers; /* reader wake-up work struct */
struct dentry *dentry; /* channel file dentry */
struct kref kref; /* channel buffer refcount */
struct page **page_array; /* array of current buffer pages */
unsigned int page_count; /* number of current buffer pages */
unsigned int finalized; /* buffer has been finalized */
size_t *padding; /* padding counts per sub-buffer */
size_t prev_padding; /* temporary variable */
size_t bytes_consumed; /* bytes consumed in cur read subbuf */
unsigned int cpu; /* this buf's cpu */
} ____cacheline_aligned;
/*
* Relay channel data structure
*/
struct rchan
{
u32 version; /* the version of this struct */
size_t subbuf_size; /* sub-buffer size */
size_t n_subbufs; /* number of sub-buffers per buffer */
size_t alloc_size; /* total buffer size allocated */
struct rchan_callbacks *cb; /* client callbacks */
struct kref kref; /* channel refcount */
void *private_data; /* for user-defined data */
size_t last_toobig; /* tried to log event > subbuf size */
struct rchan_buf *buf[NR_CPUS]; /* per-cpu channel buffers */
};
/*
* Relay channel client callbacks
*/
struct rchan_callbacks
{
/*
* subbuf_start - called on buffer-switch to a new sub-buffer
* @buf: the channel buffer containing the new sub-buffer
* @subbuf: the start of the new sub-buffer
* @prev_subbuf: the start of the previous sub-buffer
* @prev_padding: unused space at the end of previous sub-buffer
*
* The client should return 1 to continue logging, 0 to stop
* logging.
*
* NOTE: subbuf_start will also be invoked when the buffer is
* created, so that the first sub-buffer can be initialized
* if necessary. In this case, prev_subbuf will be NULL.
*
* NOTE: the client can reserve bytes at the beginning of the new
* sub-buffer by calling subbuf_start_reserve() in this callback.
*/
int (*subbuf_start) (struct rchan_buf *buf,
void *subbuf,
void *prev_subbuf,
size_t prev_padding);
/*
* buf_mapped - relay buffer mmap notification
* @buf: the channel buffer
* @filp: relay file pointer
*
* Called when a relay file is successfully mmapped
*/
void (*buf_mapped)(struct rchan_buf *buf,
struct file *filp);
/*
* buf_unmapped - relay buffer unmap notification
* @buf: the channel buffer
* @filp: relay file pointer
*
* Called when a relay file is successfully unmapped
*/
void (*buf_unmapped)(struct rchan_buf *buf,
struct file *filp);
/*
* create_buf_file - create file to represent a relay channel buffer
* @filename: the name of the file to create
* @parent: the parent of the file to create
* @mode: the mode of the file to create
* @buf: the channel buffer
* @is_global: outparam - set non-zero if the buffer should be global
*
* Called during relay_open(), once for each per-cpu buffer,
* to allow the client to create a file to be used to
* represent the corresponding channel buffer. If the file is
* created outside of relay, the parent must also exist in
* that filesystem.
*
* The callback should return the dentry of the file created
* to represent the relay buffer.
*
* Setting the is_global outparam to a non-zero value will
* cause relay_open() to create a single global buffer rather
* than the default set of per-cpu buffers.
*
* See Documentation/filesystems/relayfs.txt for more info.
*/
struct dentry *(*create_buf_file)(const char *filename,
struct dentry *parent,
int mode,
struct rchan_buf *buf,
int *is_global);
/*
* remove_buf_file - remove file representing a relay channel buffer
* @dentry: the dentry of the file to remove
*
* Called during relay_close(), once for each per-cpu buffer,
* to allow the client to remove a file used to represent a
* channel buffer.
*
* The callback should return 0 if successful, negative if not.
*/
int (*remove_buf_file)(struct dentry *dentry);
};
/*
* CONFIG_RELAY kernel API, kernel/relay.c
*/
struct rchan *relay_open(const char *base_filename,
struct dentry *parent,
size_t subbuf_size,
size_t n_subbufs,
struct rchan_callbacks *cb);
extern void relay_close(struct rchan *chan);
extern void relay_flush(struct rchan *chan);
extern void relay_subbufs_consumed(struct rchan *chan,
unsigned int cpu,
size_t consumed);
extern void relay_reset(struct rchan *chan);
extern int relay_buf_full(struct rchan_buf *buf);
extern size_t relay_switch_subbuf(struct rchan_buf *buf,
size_t length);
/**
* relay_write - write data into the channel
* @chan: relay channel
* @data: data to be written
* @length: number of bytes to write
*
* Writes data into the current cpu's channel buffer.
*
* Protects the buffer by disabling interrupts. Use this
* if you might be logging from interrupt context. Try
* __relay_write() if you know you won't be logging from
* interrupt context.
*/
static inline void relay_write(struct rchan *chan,
const void *data,
size_t length)
{
unsigned long flags;
struct rchan_buf *buf;
local_irq_save(flags);
buf = chan->buf[smp_processor_id()];
if (unlikely(buf->offset + length > chan->subbuf_size))
length = relay_switch_subbuf(buf, length);
memcpy(buf->data + buf->offset, data, length);
buf->offset += length;
local_irq_restore(flags);
}
/**
* __relay_write - write data into the channel
* @chan: relay channel
* @data: data to be written
* @length: number of bytes to write
*
* Writes data into the current cpu's channel buffer.
*
* Protects the buffer by disabling preemption. Use
* relay_write() if you might be logging from interrupt
* context.
*/
static inline void __relay_write(struct rchan *chan,
const void *data,
size_t length)
{
struct rchan_buf *buf;
buf = chan->buf[get_cpu()];
if (unlikely(buf->offset + length > buf->chan->subbuf_size))
length = relay_switch_subbuf(buf, length);
memcpy(buf->data + buf->offset, data, length);
buf->offset += length;
put_cpu();
}
/**
* relay_reserve - reserve slot in channel buffer
* @chan: relay channel
* @length: number of bytes to reserve
*
* Returns pointer to reserved slot, NULL if full.
*
* Reserves a slot in the current cpu's channel buffer.
* Does not protect the buffer at all - caller must provide
* appropriate synchronization.
*/
static inline void *relay_reserve(struct rchan *chan, size_t length)
{
void *reserved;
struct rchan_buf *buf = chan->buf[smp_processor_id()];
if (unlikely(buf->offset + length > buf->chan->subbuf_size)) {
length = relay_switch_subbuf(buf, length);
if (!length)
return NULL;
}
reserved = buf->data + buf->offset;
buf->offset += length;
return reserved;
}
/**
* subbuf_start_reserve - reserve bytes at the start of a sub-buffer
* @buf: relay channel buffer
* @length: number of bytes to reserve
*
* Helper function used to reserve bytes at the beginning of
* a sub-buffer in the subbuf_start() callback.
*/
static inline void subbuf_start_reserve(struct rchan_buf *buf,
size_t length)
{
BUG_ON(length >= buf->chan->subbuf_size - 1);
buf->offset = length;
}
/*
* exported relay file operations, kernel/relay.c
*/
extern struct file_operations relay_file_operations;
#endif /* _LINUX_RELAY_H */
...@@ -706,6 +706,7 @@ struct task_struct { ...@@ -706,6 +706,7 @@ struct task_struct {
prio_array_t *array; prio_array_t *array;
unsigned short ioprio; unsigned short ioprio;
unsigned int btrace_seq;
unsigned long sleep_avg; unsigned long sleep_avg;
unsigned long long timestamp, last_ran; unsigned long long timestamp, last_ran;
......
...@@ -214,6 +214,17 @@ config CPUSETS ...@@ -214,6 +214,17 @@ config CPUSETS
Say N if unsure. Say N if unsure.
config RELAY
bool "Kernel->user space relay support (formerly relayfs)"
help
This option enables support for relay interface support in
certain file systems (such as debugfs).
It is designed to provide an efficient mechanism for tools and
facilities to relay large amounts of data from kernel space to
user space.
If unsure, say N.
source "usr/Kconfig" source "usr/Kconfig"
config UID16 config UID16
......
...@@ -34,6 +34,7 @@ obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o ...@@ -34,6 +34,7 @@ obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o
obj-$(CONFIG_GENERIC_HARDIRQS) += irq/ obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
obj-$(CONFIG_SECCOMP) += seccomp.o obj-$(CONFIG_SECCOMP) += seccomp.o
obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
obj-$(CONFIG_RELAY) += relay.o
ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y) ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y)
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
......
...@@ -181,6 +181,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) ...@@ -181,6 +181,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
/* One for us, one for whoever does the "release_task()" (usually parent) */ /* One for us, one for whoever does the "release_task()" (usually parent) */
atomic_set(&tsk->usage,2); atomic_set(&tsk->usage,2);
atomic_set(&tsk->fs_excl, 0); atomic_set(&tsk->fs_excl, 0);
tsk->btrace_seq = 0;
return tsk; return tsk;
} }
......
This diff is collapsed.
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/hash.h> #include <linux/hash.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/blktrace_api.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
static mempool_t *page_pool, *isa_page_pool; static mempool_t *page_pool, *isa_page_pool;
...@@ -483,6 +484,8 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) ...@@ -483,6 +484,8 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
pool = isa_page_pool; pool = isa_page_pool;
} }
blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE);
/* /*
* slow path * slow path
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment