Commit 6805de5d authored by Linus Torvalds's avatar Linus Torvalds

v2.4.0.8 -> v2.4.0.9

  - cpq array driver elevator fixes
  - merge radeon driver from X CVS tree
  - ispnp cleanups
  - emu10k unlock on error fixes
  - hpfs doesn't allow truncate to larger
parent 9910fd91
......@@ -2353,10 +2353,11 @@ E: gt8134b@prism.gatech.edu
D: Dosemu
N: Hannu Savolainen
E: hannu@voxware.pp.fi
D: Kernel sound drivers
S: Hiekkalaiturintie 3 A 8
S: 00980 Helsinki
E: hannu@opensound.com
D: Maintainer of the sound drivers until 2.1.x days.
D: Original compressed boot image support.
S: Valurink. 4A11
S: 03600 Karkkila
S: Finland
N: Eric Schenk
......
VERSION = 2
PATCHLEVEL = 4
SUBLEVEL = 1
EXTRAVERSION =-pre8
EXTRAVERSION =-pre9
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
......
......@@ -511,11 +511,18 @@ CONFIG_PSMOUSE=y
# Ftape, the floppy tape device driver
#
# CONFIG_FTAPE is not set
# CONFIG_AGP is not set
CONFIG_AGP=y
CONFIG_AGP_INTEL=y
CONFIG_AGP_I810=y
CONFIG_AGP_VIA=y
CONFIG_AGP_AMD=y
CONFIG_AGP_SIS=y
CONFIG_AGP_ALI=y
CONFIG_DRM=y
CONFIG_DRM_TDFX=y
# CONFIG_DRM_GAMMA is not set
# CONFIG_DRM_R128 is not set
CONFIG_DRM_RADEON=y
# CONFIG_DRM_I810 is not set
# CONFIG_DRM_MGA is not set
CONFIG_PCMCIA_SERIAL=y
......
......@@ -140,23 +140,7 @@ static int ida_release(struct inode *inode, struct file *filep);
static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg);
static int ida_ctlr_ioctl(int ctlr, int dsk, ida_ioctl_t *io);
static void do_ida_request(int i);
/*
* This is a hack. This driver eats a major number for each controller, and
* sets blkdev[xxx].request_fn to each one of these so the real request
* function knows what controller its working with.
*/
#define DO_IDA_REQUEST(x) { do_ida_request(x); }
static void do_ida_request0(request_queue_t * q) DO_IDA_REQUEST(0);
static void do_ida_request1(request_queue_t * q) DO_IDA_REQUEST(1);
static void do_ida_request2(request_queue_t * q) DO_IDA_REQUEST(2);
static void do_ida_request3(request_queue_t * q) DO_IDA_REQUEST(3);
static void do_ida_request4(request_queue_t * q) DO_IDA_REQUEST(4);
static void do_ida_request5(request_queue_t * q) DO_IDA_REQUEST(5);
static void do_ida_request6(request_queue_t * q) DO_IDA_REQUEST(6);
static void do_ida_request7(request_queue_t * q) DO_IDA_REQUEST(7);
static void do_ida_request(request_queue_t *q);
static void start_io(ctlr_info_t *h);
static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
......@@ -362,6 +346,50 @@ void cleanup_module(void)
}
#endif /* MODULE */
static inline int cpq_new_segment(request_queue_t *q, struct request *rq,
int max_segments)
{
if (rq->nr_segments < SG_MAX) {
rq->nr_segments++;
return 1;
}
return 0;
}
static int cpq_back_merge_fn(request_queue_t *q, struct request *rq,
struct buffer_head *bh, int max_segments)
{
if (rq->bhtail->b_data + rq->bhtail->b_size == bh->b_data)
return 1;
return cpq_new_segment(q, rq, max_segments);
}
static int cpq_front_merge_fn(request_queue_t *q, struct request *rq,
struct buffer_head *bh, int max_segments)
{
if (bh->b_data + bh->b_size == rq->bh->b_data)
return 1;
return cpq_new_segment(q, rq, max_segments);
}
static int cpq_merge_requests_fn(request_queue_t *q, struct request *rq,
struct request *nxt, int max_segments)
{
int total_segments = rq->nr_segments + nxt->nr_segments;
int same_segment = 0;
if (rq->bhtail->b_data + rq->bhtail->b_size == nxt->bh->b_data) {
total_segments--;
same_segment = 1;
}
if (total_segments > SG_MAX)
return 0;
rq->nr_segments = total_segments;
return 1;
}
/*
* This is it. Find all the controllers and register them. I really hate
* stealing all these major device numbers.
......@@ -369,12 +397,7 @@ void cleanup_module(void)
*/
int __init cpqarray_init(void)
{
void (*request_fns[MAX_CTLR])(request_queue_t *) = {
do_ida_request0, do_ida_request1,
do_ida_request2, do_ida_request3,
do_ida_request4, do_ida_request5,
do_ida_request6, do_ida_request7,
};
request_queue_t *q;
int i,j;
int num_cntlrs_reg = 0;
......@@ -495,16 +518,20 @@ int __init cpqarray_init(void)
hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
ida_procinit(i);
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR + i),
request_fns[i]);
blk_queue_headactive(BLK_DEFAULT_QUEUE(MAJOR_NR + i), 0);
q = BLK_DEFAULT_QUEUE(MAJOR_NR + i);
q->queuedata = hba[i];
blk_init_queue(q, do_ida_request);
blk_queue_headactive(q, 0);
blksize_size[MAJOR_NR+i] = ida_blocksizes + (i*256);
hardsect_size[MAJOR_NR+i] = ida_hardsizes + (i*256);
read_ahead[MAJOR_NR+i] = READ_AHEAD;
q->back_merge_fn = cpq_back_merge_fn;
q->front_merge_fn = cpq_front_merge_fn;
q->merge_requests_fn = cpq_merge_requests_fn;
ida_gendisk[i].major = MAJOR_NR + i;
ida_gendisk[i].major_name = "ida";
ida_gendisk[i].minor_shift = NWD_SHIFT;
......@@ -872,37 +899,34 @@ static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c)
* are in here (either via the dummy do_ida_request functions or by being
* called from the interrupt handler
*/
static void do_ida_request(int ctlr)
static void do_ida_request(request_queue_t *q)
{
ctlr_info_t *h = hba[ctlr];
ctlr_info_t *h = q->queuedata;
cmdlist_t *c;
int seg, sect;
char *lastdataend;
struct list_head * queue_head;
struct list_head * queue_head = &q->queue_head;
struct buffer_head *bh;
struct request *creq;
queue_head = &blk_dev[MAJOR_NR+ctlr].request_queue.queue_head;
if (!q)
BUG();
if (!h)
BUG();
if (list_empty(queue_head))
{
start_io(h);
if (q->plugged || list_empty(queue_head))
return;
}
creq = blkdev_entry_next_request(queue_head);
if (creq->rq_status == RQ_INACTIVE)
{
start_io(h);
return;
}
if (creq->rq_status != RQ_ACTIVE)
BUG();
if (creq->nr_segments > SG_MAX)
BUG();
if (ctlr != MAJOR(creq->rq_dev)-MAJOR_NR ||
ctlr > nr_ctlr || h == NULL)
if (h->ctlr != MAJOR(creq->rq_dev)-MAJOR_NR || h->ctlr > nr_ctlr)
{
printk(KERN_WARNING "doreq cmd for %d, %x at %p\n",
ctlr, creq->rq_dev, creq);
h->ctlr, creq->rq_dev, creq);
complete_buffers(creq->bh, 0);
start_io(h);
return;
......@@ -916,12 +940,12 @@ static void do_ida_request(int ctlr)
bh = creq->bh;
c->ctlr = ctlr;
c->ctlr = h->ctlr;
c->hdr.unit = MINOR(creq->rq_dev) >> NWD_SHIFT;
c->hdr.size = sizeof(rblk_t) >> 2;
c->size += sizeof(rblk_t);
c->req.hdr.blk = ida[(ctlr<<CTLR_SHIFT) + MINOR(creq->rq_dev)].start_sect + creq->sector;
c->req.hdr.blk = ida[(h->ctlr<<CTLR_SHIFT) + MINOR(creq->rq_dev)].start_sect + creq->sector;
c->bh = bh;
DBGPX(
if (bh == NULL)
......@@ -933,12 +957,6 @@ DBGPX(
sect = 0;
while(bh) {
sect += bh->b_size/512;
DBGPX(
if (bh->b_size % 512) {
printk("Oh damn. %d+%d, size = %d\n", creq->sector, sect, bh->b_size);
panic("b_size %% 512 != 0");
}
);
if (bh->b_data == lastdataend) {
c->req.sg[seg-1].size += bh->b_size;
lastdataend += bh->b_size;
......@@ -955,30 +973,24 @@ DBGPX( printk("Submitting %d sectors in %d segments\n", sect, seg); );
c->req.hdr.sg_cnt = seg;
c->req.hdr.blk_cnt = sect;
creq->sector += sect;
creq->nr_sectors -= sect;
/* Ready the next request:
* Fix up creq if we still have more buffers in the buffer chain, or
* mark this request as done and ready the next one.
/*
* Since we control our own merging, we know that this request
* is now fully setup and there's nothing left.
*/
if (creq->nr_sectors) {
DBGPX(
if (bh==NULL) {
printk("sector=%d, nr_sectors=%d, sect=%d, seg=%d\n",
creq->sector, creq->nr_sectors, sect, seg);
panic("mother...");
}
);
creq->bh = bh->b_reqnext;
bh->b_reqnext = NULL;
DBGPX( printk("More to do on same request %p\n", creq); );
} else {
DBGPX( printk("Done with %p\n", creq); );
blkdev_dequeue_request(creq);
end_that_request_last(creq);
if (creq->nr_sectors != sect) {
printk("ida: %ld sectors remain\n", creq->nr_sectors);
BUG();
}
blkdev_dequeue_request(creq);
/*
* ehh, we can't really end the request here since it's not
* even started yet. for now it shouldn't hurt though
*/
DBGPX( printk("Done with %p\n", creq); );
end_that_request_last(creq);
c->req.hdr.cmd = (creq->cmd == READ) ? IDA_READ : IDA_WRITE;
c->type = CMD_RWREQ;
......@@ -1072,7 +1084,6 @@ static void do_ida_intr(int irq, void *dev_id, struct pt_regs *regs)
unsigned long flags;
__u32 a,a1;
istat = h->access.intr_pending(h);
/* Is this interrupt for us? */
if (istat == 0)
......@@ -1116,7 +1127,7 @@ static void do_ida_intr(int irq, void *dev_id, struct pt_regs *regs)
/*
* See if we can queue up some more IO
*/
do_ida_request(h->ctlr);
do_ida_request(BLK_DEFAULT_QUEUE(MAJOR_NR + h->ctlr));
spin_unlock_irqrestore(&io_request_lock, flags);
}
......
......@@ -597,20 +597,9 @@ static inline void add_request(request_queue_t * q, struct request * req,
*/
list_add(&req->queue, insert_here);
/*
* FIXME(eric) I don't understand why there is a need for this
* special case code. It clearly doesn't fit any more with
* the new queueing architecture, and it got added in 2.3.10.
* I am leaving this in here until I hear back from the COMPAQ
* people.
*/
major = MAJOR(req->rq_dev);
if (major >= COMPAQ_SMART2_MAJOR+0 && major <= COMPAQ_SMART2_MAJOR+7)
(q->request_fn)(q);
if (major >= COMPAQ_CISS_MAJOR+0 && major <= COMPAQ_CISS_MAJOR+7)
(q->request_fn)(q);
if (major >= DAC960_MAJOR+0 && major <= DAC960_MAJOR+7)
(q->request_fn)(q);
q->request_fn(q);
}
void inline blk_refill_freelist(request_queue_t *q, int rw)
......
......@@ -10,6 +10,7 @@ if [ "$CONFIG_DRM" != "n" ]; then
tristate ' 3dfx Banshee/Voodoo3+' CONFIG_DRM_TDFX
tristate ' 3dlabs GMX 2000' CONFIG_DRM_GAMMA
dep_tristate ' ATI Rage 128' CONFIG_DRM_R128 $CONFIG_AGP
dep_tristate ' ATI Radeon' CONFIG_DRM_RADEON $CONFIG_AGP
dep_tristate ' Intel I810' CONFIG_DRM_I810 $CONFIG_AGP
dep_tristate ' Matrox g200/g400' CONFIG_DRM_MGA $CONFIG_AGP
fi
......@@ -42,16 +42,18 @@ else
endif
endif
gamma-objs := gamma_drv.o gamma_dma.o
tdfx-objs := tdfx_drv.o tdfx_context.o
r128-objs := r128_drv.o r128_cce.o r128_context.o r128_bufs.o r128_state.o
ffb-objs := ffb_drv.o ffb_context.o
mga-objs := mga_drv.o mga_dma.o mga_context.o mga_bufs.o mga_state.o
i810-objs := i810_drv.o i810_dma.o i810_context.o i810_bufs.o
gamma-objs := gamma_drv.o gamma_dma.o
tdfx-objs := tdfx_drv.o tdfx_context.o
r128-objs := r128_drv.o r128_cce.o r128_context.o r128_bufs.o r128_state.o
ffb-objs := ffb_drv.o ffb_context.o
mga-objs := mga_drv.o mga_dma.o mga_context.o mga_bufs.o mga_state.o
i810-objs := i810_drv.o i810_dma.o i810_context.o i810_bufs.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_context.o radeon_bufs.o radeon_state.o
obj-$(CONFIG_DRM_GAMMA) += gamma.o
obj-$(CONFIG_DRM_TDFX) += tdfx.o
obj-$(CONFIG_DRM_R128) += r128.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o
obj-$(CONFIG_DRM_FFB) += ffb.o
obj-$(CONFIG_DRM_MGA) += mga.o
obj-$(CONFIG_DRM_I810) += i810.o
......@@ -96,5 +98,8 @@ i810.o: $(i810-objs) $(lib)
r128.o: $(r128-objs) $(lib)
$(LD) -r -o $@ $(r128-objs) $(lib)
radeon.o: $(radeon-objs) $(lib)
$(LD) -r -o $@ $(radeon-objs) $(lib)
ffb.o: $(ffb-objs) $(lib)
$(LD) -r -o $@ $(ffb-objs) $(lib)
This diff is collapsed.
/* radeon_bufs.c -- IOCTLs to manage buffers -*- linux-c -*-
*
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Fremont, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Kevin E. Martin <martin@valinux.com>
* Rickard E. (Rik) Faith <faith@valinux.com>
* Jeff Hartmann <jhartmann@valinux.com>
*
*/
#define __NO_VERSION__
#include <linux/config.h>
#include "drmP.h"
#include "radeon_drv.h"
#include "linux/un.h"
#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
int radeon_addbufs_agp(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
drm_buf_entry_t *entry;
drm_buf_t *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
int order;
int size;
int alignment;
int page_order;
int total;
int byte_count;
int i;
if (!dma) return -EINVAL;
if (copy_from_user(&request, (drm_buf_desc_t *)arg, sizeof(request)))
return -EFAULT;
count = request.count;
order = drm_order(request.size);
size = 1 << order;
alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size):size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
byte_count = 0;
agp_offset = dev->agp->base + request.agp_start;
DRM_DEBUG("count: %d\n", count);
DRM_DEBUG("order: %d\n", order);
DRM_DEBUG("size: %d\n", size);
DRM_DEBUG("agp_offset: %ld\n", agp_offset);
DRM_DEBUG("alignment: %d\n", alignment);
DRM_DEBUG("page_order: %d\n", page_order);
DRM_DEBUG("total: %d\n", total);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
if (dev->queue_count) return -EBUSY; /* Not while in use */
spin_lock(&dev->count_lock);
if (dev->buf_use) {
spin_unlock(&dev->count_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->count_lock);
down(&dev->struct_sem);
entry = &dma->bufs[order];
if (entry->buf_count) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->buf_size = size;
entry->page_order = page_order;
offset = 0;
for (offset = 0;
entry->buf_count < count;
offset += alignment, ++entry->buf_count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
buf->offset = (dma->byte_count + offset);
buf->address = (void *)(agp_offset + offset);
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
init_waitqueue_head(&buf->dma_wait);
buf->pid = 0;
buf->dev_priv_size = sizeof(drm_radeon_buf_priv_t);
buf->dev_private = drm_alloc(sizeof(drm_radeon_buf_priv_t),
DRM_MEM_BUFS);
memset(buf->dev_private, 0, buf->dev_priv_size);
#if DRM_DMA_HISTOGRAM
buf->time_queued = 0;
buf->time_dispatched = 0;
buf->time_completed = 0;
buf->time_freed = 0;
#endif
byte_count += PAGE_SIZE << page_order;
DRM_DEBUG("buffer %d @ %p\n",
entry->buf_count, buf->address);
}
DRM_DEBUG("byte_count: %d\n", byte_count);
dma->buflist = drm_realloc(dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist),
DRM_MEM_BUFS);
for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
dma->buflist[i] = &entry->buflist[i - dma->buf_count];
dma->buf_count += entry->buf_count;
dma->byte_count += byte_count;
drm_freelist_create(&entry->freelist, entry->buf_count);
for (i = 0; i < entry->buf_count; i++) {
drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
}
up(&dev->struct_sem);
request.count = entry->buf_count;
request.size = size;
if (copy_to_user((drm_buf_desc_t *)arg, &request, sizeof(request)))
return -EFAULT;
dma->flags = _DRM_DMA_USE_AGP;
atomic_dec(&dev->buf_alloc);
return 0;
}
#endif
int radeon_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_buf_desc_t request;
if (!dev_priv || dev_priv->is_pci) return -EINVAL;
if (copy_from_user(&request, (drm_buf_desc_t *)arg, sizeof(request)))
return -EFAULT;
#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
if (request.flags & _DRM_AGP_BUFFER)
return radeon_addbufs_agp(inode, filp, cmd, arg);
else
#endif
return -EINVAL;
}
int radeon_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
const int zero = 0;
unsigned long virtual;
unsigned long address;
drm_buf_map_t request;
int i;
if (!dma || !dev_priv || dev_priv->is_pci) return -EINVAL;
DRM_DEBUG("\n");
spin_lock(&dev->count_lock);
if (atomic_read(&dev->buf_alloc)) {
spin_unlock(&dev->count_lock);
return -EBUSY;
}
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock(&dev->count_lock);
if (copy_from_user(&request, (drm_buf_map_t *)arg, sizeof(request)))
return -EFAULT;
if (request.count >= dma->buf_count) {
if (dma->flags & _DRM_DMA_USE_AGP) {
drm_map_t *map;
map = dev_priv->buffers;
if (!map) {
retcode = -EINVAL;
goto done;
}
down(&current->mm->mmap_sem);
virtual = do_mmap(filp, 0, map->size,
PROT_READ|PROT_WRITE,
MAP_SHARED,
(unsigned long)map->offset);
up(&current->mm->mmap_sem);
} else {
down(&current->mm->mmap_sem);
virtual = do_mmap(filp, 0, dma->byte_count,
PROT_READ|PROT_WRITE, MAP_SHARED, 0);
up(&current->mm->mmap_sem);
}
if (virtual > -1024UL) {
/* Real error */
retcode = (signed long)virtual;
goto done;
}
request.virtual = (void *)virtual;
for (i = 0; i < dma->buf_count; i++) {
if (copy_to_user(&request.list[i].idx,
&dma->buflist[i]->idx,
sizeof(request.list[0].idx))) {
retcode = -EFAULT;
goto done;
}
if (copy_to_user(&request.list[i].total,
&dma->buflist[i]->total,
sizeof(request.list[0].total))) {
retcode = -EFAULT;
goto done;
}
if (copy_to_user(&request.list[i].used,
&zero,
sizeof(zero))) {
retcode = -EFAULT;
goto done;
}
address = virtual + dma->buflist[i]->offset;
if (copy_to_user(&request.list[i].address,
&address,
sizeof(address))) {
retcode = -EFAULT;
goto done;
}
}
}
done:
request.count = dma->buf_count;
DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
if (copy_to_user((drm_buf_map_t *)arg, &request, sizeof(request)))
return -EFAULT;
return retcode;
}
/* radeon_context.c -- IOCTLs for Radeon contexts -*- linux-c -*-
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Fremont, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Kevin E. Martin <martin@valinux.com>
* Rickard E. (Rik) Faith <faith@valinux.com>
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include "radeon_drv.h"
extern drm_ctx_t radeon_res_ctx;
static int radeon_alloc_queue(drm_device_t *dev)
{
return drm_ctxbitmap_next(dev);
}
int radeon_context_switch(drm_device_t *dev, int old, int new)
{
char buf[64];
atomic_inc(&dev->total_ctx);
if (test_and_set_bit(0, &dev->context_flag)) {
DRM_ERROR("Reentering -- FIXME\n");
return -EBUSY;
}
#if DRM_DMA_HISTOGRAM
dev->ctx_start = get_cycles();
#endif
DRM_DEBUG("Context switch from %d to %d\n", old, new);
if (new == dev->last_context) {
clear_bit(0, &dev->context_flag);
return 0;
}
if (drm_flags & DRM_FLAG_NOCTX) {
radeon_context_switch_complete(dev, new);
} else {
sprintf(buf, "C %d %d\n", old, new);
drm_write_string(dev, buf);
}
return 0;
}
int radeon_context_switch_complete(drm_device_t *dev, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = jiffies;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
/* If a context switch is ever initiated
when the kernel holds the lock, release
that lock here. */
#if DRM_DMA_HISTOGRAM
atomic_inc(&dev->histo.ctx[drm_histogram_slot(get_cycles()
- dev->ctx_start)]);
#endif
clear_bit(0, &dev->context_flag);
wake_up(&dev->context_wait);
return 0;
}
int radeon_resctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_ctx_res_t res;
drm_ctx_t ctx;
int i;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res)))
return -EFAULT;
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
if (copy_to_user(&res.contexts[i], &i, sizeof(i)))
return -EFAULT;
}
}
res.count = DRM_RESERVED_CONTEXTS;
if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res)))
return -EFAULT;
return 0;
}
int radeon_addctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
if ((ctx.handle = radeon_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
ctx.handle = radeon_alloc_queue(dev);
}
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle == -1) {
DRM_DEBUG("Not enough free contexts.\n");
/* Should this return -EBUSY instead? */
return -ENOMEM;
}
if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
int radeon_modctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_ctx_t ctx;
if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx)))
return -EFAULT;
if (ctx.flags==_DRM_CONTEXT_PRESERVED)
radeon_res_ctx.handle=ctx.handle;
return 0;
}
int radeon_getctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_ctx_t ctx;
if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx)))
return -EFAULT;
/* This is 0, because we don't hanlde any context flags */
ctx.flags = 0;
if (copy_to_user((drm_ctx_t*)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
int radeon_switchctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
return radeon_context_switch(dev, dev->last_context, ctx.handle);
}
int radeon_newctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
radeon_context_switch_complete(dev, ctx.handle);
return 0;
}
int radeon_rmctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
drm_ctxbitmap_free(dev, ctx.handle);
return 0;
}
This diff is collapsed.
/* radeon_drm.h -- Public header for the radeon driver -*- linux-c -*-
*
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Fremont, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#ifndef __RADEON_DRM_H__
#define __RADEON_DRM_H__
/* WARNING: If you change any of these defines, make sure to change the
* defines in the X server file (radeon_sarea.h)
*/
#ifndef __RADEON_SAREA_DEFINES__
#define __RADEON_SAREA_DEFINES__
/* What needs to be changed for the current vertex buffer?
*/
#define RADEON_UPLOAD_CONTEXT 0x00000001
#define RADEON_UPLOAD_VERTFMT 0x00000002
#define RADEON_UPLOAD_LINE 0x00000004
#define RADEON_UPLOAD_BUMPMAP 0x00000008
#define RADEON_UPLOAD_MASKS 0x00000010
#define RADEON_UPLOAD_VIEWPORT 0x00000020
#define RADEON_UPLOAD_SETUP 0x00000040
#define RADEON_UPLOAD_TCL 0x00000080
#define RADEON_UPLOAD_MISC 0x00000100
#define RADEON_UPLOAD_TEX0 0x00000200
#define RADEON_UPLOAD_TEX1 0x00000400
#define RADEON_UPLOAD_TEX2 0x00000800
#define RADEON_UPLOAD_TEX0IMAGES 0x00001000
#define RADEON_UPLOAD_TEX1IMAGES 0x00002000
#define RADEON_UPLOAD_TEX2IMAGES 0x00004000
#define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */
#define RADEON_REQUIRE_QUIESCENCE 0x00010000
#define RADEON_UPLOAD_ALL 0x0001ffff
#define RADEON_FRONT 0x1
#define RADEON_BACK 0x2
#define RADEON_DEPTH 0x4
/* Primitive types
*/
#define RADEON_POINTS 0x1
#define RADEON_LINES 0x2
#define RADEON_LINE_STRIP 0x3
#define RADEON_TRIANGLES 0x4
#define RADEON_TRIANGLE_FAN 0x5
#define RADEON_TRIANGLE_STRIP 0x6
/* Vertex/indirect buffer size
*/
#define RADEON_BUFFER_SIZE 16384
/* Byte offsets for indirect buffer data
*/
#define RADEON_INDEX_PRIM_OFFSET 20
#define RADEON_HOSTDATA_BLIT_OFFSET 32
#define RADEON_SCRATCH_REG_OFFSET 32
/* Keep these small for testing
*/
#define RADEON_NR_SAREA_CLIPRECTS 12
/* There are 2 heaps (local/AGP). Each region within a heap is a
* minimum of 64k, and there are at most 64 of them per heap.
*/
#define RADEON_LOCAL_TEX_HEAP 0
#define RADEON_AGP_TEX_HEAP 1
#define RADEON_NR_TEX_HEAPS 2
#define RADEON_NR_TEX_REGIONS 64
#define RADEON_LOG_TEX_GRANULARITY 16
#define RADEON_MAX_TEXTURE_LEVELS 11
#define RADEON_MAX_TEXTURE_UNITS 3
#endif /* __RADEON_SAREA_DEFINES__ */
typedef struct {
unsigned int red;
unsigned int green;
unsigned int blue;
unsigned int alpha;
} radeon_color_regs_t;
typedef struct {
/* Context state */
unsigned int pp_misc; /* 0x1c14 */
unsigned int pp_fog_color;
unsigned int re_solid_color;
unsigned int rb3d_blendcntl;
unsigned int rb3d_depthoffset;
unsigned int rb3d_depthpitch;
unsigned int rb3d_zstencilcntl;
unsigned int pp_cntl; /* 0x1c38 */
unsigned int rb3d_cntl;
unsigned int rb3d_coloroffset;
unsigned int re_width_height;
unsigned int rb3d_colorpitch;
unsigned int se_cntl;
/* Vertex format state */
unsigned int se_coord_fmt; /* 0x1c50 */
/* Line state */
unsigned int re_line_pattern; /* 0x1cd0 */
unsigned int re_line_state;
unsigned int se_line_width; /* 0x1db8 */
/* Bumpmap state */
unsigned int pp_lum_matrix; /* 0x1d00 */
unsigned int pp_rot_matrix_0; /* 0x1d58 */
unsigned int pp_rot_matrix_1;
/* Mask state */
unsigned int rb3d_stencilrefmask; /* 0x1d7c */
unsigned int rb3d_ropcntl;
unsigned int rb3d_planemask;
/* Viewport state */
unsigned int se_vport_xscale; /* 0x1d98 */
unsigned int se_vport_xoffset;
unsigned int se_vport_yscale;
unsigned int se_vport_yoffset;
unsigned int se_vport_zscale;
unsigned int se_vport_zoffset;
/* Setup state */
unsigned int se_cntl_status; /* 0x2140 */
#ifdef TCL_ENABLE
/* TCL state */
radeon_color_regs_t se_tcl_material_emmissive; /* 0x2210 */
radeon_color_regs_t se_tcl_material_ambient;
radeon_color_regs_t se_tcl_material_diffuse;
radeon_color_regs_t se_tcl_material_specular;
unsigned int se_tcl_shininess;
unsigned int se_tcl_output_vtx_fmt;
unsigned int se_tcl_output_vtx_sel;
unsigned int se_tcl_matrix_select_0;
unsigned int se_tcl_matrix_select_1;
unsigned int se_tcl_ucp_vert_blend_ctl;
unsigned int se_tcl_texture_proc_ctl;
unsigned int se_tcl_light_model_ctl;
unsigned int se_tcl_per_light_ctl[4];
#endif
/* Misc state */
unsigned int re_top_left; /* 0x26c0 */
unsigned int re_misc;
} drm_radeon_context_regs_t;
/* Setup registers for each texture unit
*/
typedef struct {
unsigned int pp_txfilter;
unsigned int pp_txformat;
unsigned int pp_txoffset;
unsigned int pp_txcblend;
unsigned int pp_txablend;
unsigned int pp_tfactor;
unsigned int pp_border_color;
#ifdef CUBIC_ENABLE
unsigned int pp_cubic_faces;
unsigned int pp_cubic_offset[5];
#endif
} drm_radeon_texture_regs_t;
typedef struct {
unsigned char next, prev;
unsigned char in_use;
int age;
} drm_radeon_tex_region_t;
typedef struct {
/* The channel for communication of state information to the kernel
* on firing a vertex buffer.
*/
drm_radeon_context_regs_t context_state;
drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS];
unsigned int dirty;
unsigned int vertsize;
unsigned int vc_format;
/* The current cliprects, or a subset thereof.
*/
drm_clip_rect_t boxes[RADEON_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Counters for client-side throttling of rendering clients.
*/
unsigned int last_frame;
unsigned int last_dispatch;
unsigned int last_clear;
drm_radeon_tex_region_t tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS+1];
int tex_age[RADEON_NR_TEX_HEAPS];
int ctx_owner;
} drm_radeon_sarea_t;
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (xf86drmRadeon.h)
*/
typedef struct drm_radeon_init {
enum {
RADEON_INIT_CP = 0x01,
RADEON_CLEANUP_CP = 0x02
} func;
int sarea_priv_offset;
int is_pci;
int cp_mode;
int agp_size;
int ring_size;
int usec_timeout;
unsigned int fb_bpp;
unsigned int front_offset, front_pitch;
unsigned int back_offset, back_pitch;
unsigned int depth_bpp;
unsigned int depth_offset, depth_pitch;
unsigned int fb_offset;
unsigned int mmio_offset;
unsigned int ring_offset;
unsigned int ring_rptr_offset;
unsigned int buffers_offset;
unsigned int agp_textures_offset;
} drm_radeon_init_t;
typedef struct drm_radeon_cp_stop {
int flush;
int idle;
} drm_radeon_cp_stop_t;
typedef struct drm_radeon_fullscreen {
enum {
RADEON_INIT_FULLSCREEN = 0x01,
RADEON_CLEANUP_FULLSCREEN = 0x02
} func;
} drm_radeon_fullscreen_t;
#define CLEAR_X1 0
#define CLEAR_Y1 1
#define CLEAR_X2 2
#define CLEAR_Y2 3
#define CLEAR_DEPTH 4
typedef struct drm_radeon_clear {
unsigned int flags;
int x, y, w, h;
unsigned int clear_color;
unsigned int clear_depth;
union {
float f[5];
unsigned int ui[5];
} rect;
} drm_radeon_clear_t;
typedef struct drm_radeon_vertex {
int prim;
int idx; /* Index of vertex buffer */
int count; /* Number of vertices in buffer */
int discard; /* Client finished with buffer? */
} drm_radeon_vertex_t;
typedef struct drm_radeon_indices {
int prim;
int idx;
int start;
int end;
int discard; /* Client finished with buffer? */
} drm_radeon_indices_t;
typedef struct drm_radeon_blit {
int idx;
int pitch;
int offset;
int format;
unsigned short x, y;
unsigned short width, height;
} drm_radeon_blit_t;
typedef struct drm_radeon_stipple {
unsigned int *mask;
} drm_radeon_stipple_t;
typedef struct drm_radeon_indirect {
int idx;
int start;
int end;
int discard;
} drm_radeon_indirect_t;
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -783,7 +783,10 @@ static int isapnp_set_card(char *line)
unsigned int id;
char index[16], value[32];
isapnp_info_card = NULL;
if (isapnp_info_card) {
isapnp_cfg_end();
isapnp_info_card = NULL;
}
line = isapnp_get_str(index, line, sizeof(index));
isapnp_get_str(value, line, sizeof(value));
idx = idx1 = simple_strtoul(index, NULL, 0);
......@@ -853,10 +856,7 @@ static int isapnp_set_device(char *line)
static int isapnp_autoconfigure(void)
{
if (isapnp_info_device == NULL) {
printk("isapnp: device is not set\n");
return 0;
}
isapnp_cfg_end();
if (isapnp_info_device->active)
isapnp_info_device->deactivate(isapnp_info_device);
if (isapnp_info_device->prepare(isapnp_info_device) < 0) {
......@@ -867,6 +867,13 @@ static int isapnp_autoconfigure(void)
printk("isapnp: cannot activate device");
return 0;
}
if (isapnp_cfg_begin(isapnp_info_card->number, -1)<0) {
printk("isapnp: configuration start sequence for card %d failed\n", isapnp_info_card->number);
isapnp_info_card = NULL;
isapnp_info_device = NULL;
return 1;
}
isapnp_device(isapnp_info_device->devfn);
return 0;
}
......
......@@ -375,8 +375,10 @@ static int emu10k1_audio_ioctl(struct inode *inode, struct file *file, unsigned
format = wiinst->format;
format.samplingrate = val;
if (emu10k1_wavein_setformat(wave_dev, &format) < 0)
if (emu10k1_wavein_setformat(wave_dev, &format) < 0) {
spin_unlock_irqrestore(&wiinst->lock, flags);
return -EINVAL;
}
val = wiinst->format.samplingrate;
......@@ -393,8 +395,10 @@ static int emu10k1_audio_ioctl(struct inode *inode, struct file *file, unsigned
format = woinst->format;
format.samplingrate = val;
if (emu10k1_waveout_setformat(wave_dev, &format) < 0)
if (emu10k1_waveout_setformat(wave_dev, &format) < 0) {
spin_unlock_irqrestore(&woinst->lock, flags);
return -EINVAL;
}
val = woinst->format.samplingrate;
......@@ -430,8 +434,10 @@ static int emu10k1_audio_ioctl(struct inode *inode, struct file *file, unsigned
format = wiinst->format;
format.channels = val ? 2 : 1;
if (emu10k1_wavein_setformat(wave_dev, &format) < 0)
if (emu10k1_wavein_setformat(wave_dev, &format) < 0) {
spin_unlock_irqrestore(&wiinst->lock, flags);
return -EINVAL;
}
val = wiinst->format.channels - 1;
......@@ -447,8 +453,10 @@ static int emu10k1_audio_ioctl(struct inode *inode, struct file *file, unsigned
format = woinst->format;
format.channels = val ? 2 : 1;
if (emu10k1_waveout_setformat(wave_dev, &format) < 0)
if (emu10k1_waveout_setformat(wave_dev, &format) < 0) {
spin_unlock_irqrestore(&woinst->lock, flags);
return -EINVAL;
}
val = woinst->format.channels - 1;
......@@ -478,8 +486,10 @@ static int emu10k1_audio_ioctl(struct inode *inode, struct file *file, unsigned
format = wiinst->format;
format.channels = val;
if (emu10k1_wavein_setformat(wave_dev, &format) < 0)
if (emu10k1_wavein_setformat(wave_dev, &format) < 0) {
spin_unlock_irqrestore(&wiinst->lock, flags);
return -EINVAL;
}
val = wiinst->format.channels;
......@@ -495,8 +505,10 @@ static int emu10k1_audio_ioctl(struct inode *inode, struct file *file, unsigned
format = woinst->format;
format.channels = val;
if (emu10k1_waveout_setformat(wave_dev, &format) < 0)
if (emu10k1_waveout_setformat(wave_dev, &format) < 0) {
spin_unlock_irqrestore(&woinst->lock, flags);
return -EINVAL;
}
val = woinst->format.channels;
......@@ -542,8 +554,10 @@ static int emu10k1_audio_ioctl(struct inode *inode, struct file *file, unsigned
format = wiinst->format;
format.bitsperchannel = val;
if (emu10k1_wavein_setformat(wave_dev, &format) < 0)
if (emu10k1_wavein_setformat(wave_dev, &format) < 0) {
spin_unlock_irqrestore(&wiinst->lock, flags);
return -EINVAL;
}
val = wiinst->format.bitsperchannel;
......@@ -559,8 +573,10 @@ static int emu10k1_audio_ioctl(struct inode *inode, struct file *file, unsigned
format = woinst->format;
format.bitsperchannel = val;
if (emu10k1_waveout_setformat(wave_dev, &format) < 0)
if (emu10k1_waveout_setformat(wave_dev, &format) < 0) {
spin_unlock_irqrestore(&woinst->lock, flags);
return -EINVAL;
}
val = woinst->format.bitsperchannel;
......@@ -968,6 +984,7 @@ static int emu10k1_audio_mmap(struct file *file, struct vm_area_struct *vma)
for (i = 0; i < woinst->buffer.pages; i++) {
if (remap_page_range(vma->vm_start + (i * PAGE_SIZE), virt_to_phys(woinst->buffer.addr[i]), PAGE_SIZE, vma->vm_page_prot)) {
spin_unlock_irqrestore(&woinst->lock, flags);
unlock_kernel();
return -EAGAIN;
}
}
......
......@@ -461,7 +461,7 @@ static u_long get_line_length(int xres_virtual, int bpp)
{
u_long length;
length = (xres_virtual+bpp-1)/bpp;
length = xres_virtual*bpp;
length = (length+31)&-32;
length >>= 3;
return(length);
......
......@@ -299,6 +299,7 @@ int hpfs_notify_change(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
int error;
if (attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) return -EPERM;
if (inode->i_sb->s_hpfs_root == inode->i_ino) return -EINVAL;
if ((error = inode_change_ok(inode, attr))) return error;
inode_setattr(inode, attr);
......
......@@ -318,7 +318,7 @@ static void floppy_off(unsigned int nr);
#define DEVICE_NAME "ida"
#define TIMEOUT_VALUE (25*HZ)
#define DEVICE_REQUEST do_ida_request0
#define DEVICE_REQUEST do_ida_request
#define DEVICE_NR(device) (MINOR(device) >> 4)
#endif /* MAJOR_NR == whatever */
......
......@@ -216,8 +216,6 @@ static int shmem_writepage(struct page * page)
swp_entry_t *entry, swap;
info = &page->mapping->host->u.shmem_i;
if (info->locked)
return 1;
swap = __get_swap_page(2);
if (!swap.val)
return 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment