Commit 5e115975 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/davem/net-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents 84d58710 98e6f109
......@@ -980,7 +980,7 @@ marvel_agp_configure(alpha_agp_info *agp)
}
static int
marvel_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, agp_memory *mem)
marvel_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
{
struct marvel_agp_aperture *aper = agp->aperture.sysdata;
return iommu_bind(aper->arena, aper->pg_start + pg_start,
......@@ -988,7 +988,7 @@ marvel_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, agp_memory *mem)
}
static int
marvel_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, agp_memory *mem)
marvel_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
{
struct marvel_agp_aperture *aper = agp->aperture.sysdata;
return iommu_unbind(aper->arena, aper->pg_start + pg_start,
......
......@@ -679,7 +679,7 @@ titan_agp_configure(alpha_agp_info *agp)
}
static int
titan_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, agp_memory *mem)
titan_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
{
struct titan_agp_aperture *aper = agp->aperture.sysdata;
return iommu_bind(aper->arena, aper->pg_start + pg_start,
......@@ -687,7 +687,7 @@ titan_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, agp_memory *mem)
}
static int
titan_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, agp_memory *mem)
titan_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
{
struct titan_agp_aperture *aper = agp->aperture.sysdata;
return iommu_unbind(aper->arena, aper->pg_start + pg_start,
......
......@@ -433,6 +433,15 @@ sys_call_table:
.quad sys_set_tid_address
.quad sys_restart_syscall
.quad sys_fadvise64
.quad sys_timer_create
.quad sys_timer_settime /* 415 */
.quad sys_timer_gettime
.quad sys_timer_getoverrun
.quad sys_timer_delete
.quad sys_clock_settime
.quad sys_clock_gettime /* 420 */
.quad sys_clock_getres
.quad sys_clock_nanosleep
.size sys_call_table, . - sys_call_table
.type sys_call_table, @object
......
......@@ -1961,7 +1961,7 @@ static void do_cciss_request(request_queue_t *q)
goto queue;
startio:
__blk_stop_queue(q);
blk_stop_queue(q);
start_io(h);
}
......@@ -2021,8 +2021,8 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
/*
* See if we can queue up some more IO
*/
spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
blk_start_queue(&h->queue);
spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
return IRQ_HANDLED;
}
/*
......
......@@ -391,12 +391,6 @@ void blk_queue_dma_alignment(request_queue_t *q, int mask)
q->dma_alignment = mask;
}
void blk_queue_assign_lock(request_queue_t *q, spinlock_t *lock)
{
spin_lock_init(lock);
q->queue_lock = lock;
}
/**
* blk_queue_find_tag - find a request by its tag and queue
*
......@@ -1076,30 +1070,12 @@ static void blk_unplug_timeout(unsigned long data)
* blk_start_queue() will clear the stop flag on the queue, and call
* the request_fn for the queue if it was in a stopped state when
* entered. Also see blk_stop_queue(). Must not be called from driver
* request function due to recursion issues.
* request function due to recursion issues. Queue lock must be held.
**/
void blk_start_queue(request_queue_t *q)
{
if (test_and_clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)) {
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
if (!elv_queue_empty(q))
q->request_fn(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
}
/**
* __blk_stop_queue: see blk_stop_queue()
*
* Description:
* Like blk_stop_queue(), but queue_lock must be held
**/
void __blk_stop_queue(request_queue_t *q)
{
blk_remove_plug(q);
set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
if (test_and_clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
schedule_work(&q->unplug_work);
}
/**
......@@ -1114,15 +1090,12 @@ void __blk_stop_queue(request_queue_t *q)
* or if it simply chooses not to queue more I/O at one point, it can
* call this function to prevent the request_fn from being called until
* the driver has signalled it's ready to go again. This happens by calling
* blk_start_queue() to restart queue operations.
* blk_start_queue() to restart queue operations. Queue lock must be held.
**/
void blk_stop_queue(request_queue_t *q)
{
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
__blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
blk_remove_plug(q);
set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
}
/**
......@@ -2364,7 +2337,6 @@ EXPORT_SYMBOL(blk_rq_map_sg);
EXPORT_SYMBOL(blk_nohighio);
EXPORT_SYMBOL(blk_dump_rq_flags);
EXPORT_SYMBOL(submit_bio);
EXPORT_SYMBOL(blk_queue_assign_lock);
EXPORT_SYMBOL(blk_phys_contig_segment);
EXPORT_SYMBOL(blk_hw_contig_segment);
EXPORT_SYMBOL(blk_get_request);
......@@ -2383,7 +2355,6 @@ EXPORT_SYMBOL(blk_queue_invalidate_tags);
EXPORT_SYMBOL(blk_start_queue);
EXPORT_SYMBOL(blk_stop_queue);
EXPORT_SYMBOL(__blk_stop_queue);
EXPORT_SYMBOL(blk_run_queue);
EXPORT_SYMBOL(blk_run_queues);
......
......@@ -33,8 +33,8 @@ struct alpha_agp_ops {
int (*setup)(alpha_agp_info *);
void (*cleanup)(alpha_agp_info *);
int (*configure)(alpha_agp_info *);
int (*bind)(alpha_agp_info *, off_t, agp_memory *);
int (*unbind)(alpha_agp_info *, off_t, agp_memory *);
int (*bind)(alpha_agp_info *, off_t, struct agp_memory *);
int (*unbind)(alpha_agp_info *, off_t, struct agp_memory *);
unsigned long (*translate)(alpha_agp_info *, dma_addr_t);
};
......
......@@ -349,7 +349,16 @@
#define __NR_set_tid_address 411
#define __NR_restart_syscall 412
#define __NR_fadvise64 413
#define NR_SYSCALLS 414
#define __NR_timer_create 414
#define __NR_timer_settime 415
#define __NR_timer_gettime 416
#define __NR_timer_getoverrun 417
#define __NR_timer_delete 418
#define __NR_clock_settime 419
#define __NR_clock_gettime 420
#define __NR_clock_getres 421
#define __NR_clock_nanosleep 422
#define NR_SYSCALLS 423
#if defined(__GNUC__)
......
......@@ -431,7 +431,6 @@ extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short);
extern void blk_queue_max_segment_size(request_queue_t *, unsigned int);
extern void blk_queue_hardsect_size(request_queue_t *, unsigned short);
extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);
extern void blk_queue_assign_lock(request_queue_t *, spinlock_t *);
extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(request_queue_t *, int);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment