Commit 9c6f70be authored by Linus Torvalds's avatar Linus Torvalds

v2.4.4.6 -> v2.4.5

  - Alan Cox: camera conversion missed parts
  - Neil Brown: md graceful alloc failure
  - Andrea Arkangeli: more alpha fixups, bounce buffer deadlock avoidance
  - Adam Fritzler: tms380tr driver update
  - Al Viro: VFS layer cleanups
parent c9df1e20
VERSION = 2
PATCHLEVEL = 4
SUBLEVEL = 5
EXTRAVERSION =-pre6
EXTRAVERSION =
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
......
......@@ -213,15 +213,8 @@ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
fi
fi
# The machine must be able to support more than 8GB physical memory
# before large vmalloc might even pretend to be an issue.
if [ "$CONFIG_ALPHA_GENERIC" = "y" -o "$CONFIG_ALPHA_DP264" = "y" \
-o "$CONFIG_ALPHA_WILDFIRE" = "y" -o "$CONFIG_ALPHA_TITAN" = "y" ]
then
bool 'Large VMALLOC support' CONFIG_ALPHA_LARGE_VMALLOC
else
define_bool CONFIG_ALPHA_LARGE_VMALLOC n
fi
# LARGE_VMALLOC is racy, if you *really* need it then fix it first
define_bool CONFIG_ALPHA_LARGE_VMALLOC n
source drivers/pci/Config.in
......
......@@ -207,6 +207,7 @@ EXPORT_SYMBOL(debug_spin_trylock);
EXPORT_SYMBOL(write_lock);
EXPORT_SYMBOL(read_lock);
#endif
EXPORT_SYMBOL(cpu_present_mask);
#endif /* CONFIG_SMP */
EXPORT_SYMBOL(rtc_lock);
......@@ -230,5 +231,6 @@ EXPORT_SYMBOL_NOVERS(__remq);
EXPORT_SYMBOL_NOVERS(__remqu);
EXPORT_SYMBOL_NOVERS(memcpy);
EXPORT_SYMBOL_NOVERS(memset);
EXPORT_SYMBOL_NOVERS(memchr);
EXPORT_SYMBOL(get_wchan);
......@@ -135,6 +135,7 @@ struct pci_iommu_arena
{
spinlock_t lock;
struct pci_controller *hose;
#define IOMMU_INVALID_PTE 0x2 /* 32:63 bits MBZ */
unsigned long *ptes;
dma_addr_t dma_base;
unsigned int size;
......
......@@ -42,6 +42,25 @@ calc_npages(long bytes)
return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
static void __init
iommu_arena_fixup(struct pci_iommu_arena * arena)
{
unsigned long base, size;
/*
* The Cypress chip has a quirk, it get confused by addresses
* above -1M so reserve the pagetables that maps pci addresses
* above -1M.
*/
base = arena->dma_base;
size = arena->size;
if (base + size > 0xfff00000) {
int i = (0xfff00000 - base) >> PAGE_SHIFT;
for (; i < (0x100000 >> PAGE_SHIFT); i++)
arena->ptes[i] = IOMMU_INVALID_PTE;
}
}
struct pci_iommu_arena *
iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
unsigned long window_size, unsigned long align)
......@@ -71,6 +90,8 @@ iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
unless there are chip bugs. */
arena->align_entry = 1;
iommu_arena_fixup(arena);
return arena;
}
......@@ -115,12 +136,12 @@ iommu_arena_alloc(struct pci_iommu_arena *arena, long n)
}
}
/* Success. Mark them all in use, ie not zero. Typically
bit zero is the valid bit, so write ~1 into everything.
/* Success. Mark them all in use, ie not zero and invalid
for the iommu tlb that could load them from under us.
The chip specific bits will fill this in with something
kosher when we return. */
for (i = 0; i < n; ++i)
ptes[p+i] = ~1UL;
ptes[p+i] = IOMMU_INVALID_PTE;
arena->next_entry = p + n;
spin_unlock_irqrestore(&arena->lock, flags);
......
......@@ -16,15 +16,18 @@
#include <linux/pci.h>
#include <linux/init.h>
#define __EXTERN_INLINE inline
#include <asm/io.h>
#include <asm/core_tsunami.h>
#undef __EXTERN_INLINE
#include <asm/ptrace.h>
#include <asm/system.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/bitops.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_tsunami.h>
#include <asm/hwrpb.h>
#include "proto.h"
......
......@@ -767,7 +767,7 @@ static inline void parse_mem_cmdline (char ** cmdline_p)
void __init setup_arch(char **cmdline_p)
{
unsigned long bootmap_size;
unsigned long bootmap_size, low_mem_size;
unsigned long start_pfn, max_pfn, max_low_pfn;
int i;
......@@ -1013,7 +1013,9 @@ void __init setup_arch(char **cmdline_p)
request_resource(&ioport_resource, standard_io_resources+i);
/* Tell the PCI layer not to allocate too close to the RAM area.. */
pci_mem_start = ((max_low_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
low_mem_size = ((max_low_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
if (low_mem_size > pci_mem_start)
pci_mem_start = low_mem_size;
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
......
......@@ -270,7 +270,7 @@ static struct device_node* airport_dev;
static struct board_features_t {
char* compatible;
u32 features;
} board_features_datas[] __init =
} board_features_datas[] __initdata =
{
{ "AAPL,PowerMac G3", 0 }, /* Beige G3 */
{ "iMac,1", 0 }, /* First iMac (gossamer) */
......
......@@ -2780,12 +2780,12 @@ static
int ide_cdrom_open (struct inode *ip, struct file *fp, ide_drive_t *drive)
{
struct cdrom_info *info = drive->driver_data;
int rc;
int rc = -ENOMEM;
MOD_INC_USE_COUNT;
if (info->buffer == NULL)
info->buffer = (char *) kmalloc(SECTOR_BUFFER_SIZE, GFP_KERNEL);
if ((rc = cdrom_fops.open(ip, fp))) {
if ((info->buffer == NULL) || (rc = cdrom_fops.open(ip, fp))) {
drive->usage--;
MOD_DEC_USE_COUNT;
}
......
......@@ -3754,6 +3754,10 @@ void md__init md_setup_drive(void)
continue;
}
mddev = alloc_mddev(MKDEV(MD_MAJOR,minor));
if (mddev == NULL) {
printk("md: kmalloc failed - cannot start array %d\n", minor);
continue;
}
if (md_setup_args.pers[minor]) {
/* non-persistent */
mdu_array_info_t ainfo;
......
......@@ -137,8 +137,7 @@ static int __init abyss_attach(struct pci_dev *pdev, const struct pci_device_id
*/
dev->base_addr += 0x10;
ret = tmsdev_init(dev,0,pdev);
/* XXX: should be the max PCI32 DMA max */
ret = tmsdev_init(dev, PCI_MAX_ADDRESS, pdev);
if (ret) {
printk("%s: unable to get memory for dev->priv.\n",
dev->name);
......
......@@ -349,6 +349,7 @@ int __init madgemc_probe(void)
printk(":%2.2x", dev->dev_addr[i]);
printk("\n");
/* XXX is ISA_MAX_ADDRESS correct here? */
if (tmsdev_init(dev, ISA_MAX_ADDRESS, NULL)) {
printk("%s: unable to get memory for dev->priv.\n",
dev->name);
......
......@@ -461,7 +461,9 @@ typedef struct {
* fragments following.
*/
/* XXX is there some better way to do this? */
#define ISA_MAX_ADDRESS 0x00ffffff
#define PCI_MAX_ADDRESS 0xffffffff
#pragma pack(1)
typedef struct {
......
......@@ -142,8 +142,7 @@ static int __init tms_pci_attach(struct pci_dev *pdev, const struct pci_device_i
printk(":%2.2x", dev->dev_addr[i]);
printk("\n");
ret = tmsdev_init(dev,0, pdev);
/* XXX: should be the max PCI32 DMA max */
ret = tmsdev_init(dev, PCI_MAX_ADDRESS, pdev);
if (ret) {
printk("%s: unable to get memory for dev->priv.\n", dev->name);
goto err_out_irq;
......@@ -165,7 +164,7 @@ static int __init tms_pci_attach(struct pci_dev *pdev, const struct pci_device_i
dev->stop = tms380tr_close;
ret = register_trdev(dev);
if (!ret)
if (ret)
goto err_out_tmsdev;
pci_set_drvdata(pdev, dev);
......
......@@ -18,18 +18,11 @@ list-multi := usbcore.o
usbcore-objs := usb.o usb-debug.o hub.o
ifneq ($(CONFIG_USB_PWC),n)
# By default we use the C colour conversion functions unless we
# detect an Intel CPU, for which there is assembly available
ccvt-objs := ccvt_c.o vcvt_c.o
ifeq ($(CONFIG_X86),y)
ccvt-objs := ccvt_i386.o vcvt_i386.o
endif
export-objs += pwc-uncompress.o
list-multi += pwc.o
endif
pwc-objs := pwc-if.o pwc-misc.o pwc-ctrl.o pwc-uncompress.o $(ccvt-objs)
pwc-objs := pwc-if.o pwc-misc.o pwc-ctrl.o pwc-uncompress.o
# Optional parts of multipart objects.
......
/*
(C) 2000 Nemosoft Unv. nemosoft@smcc.demon.nl
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef CCVT_H
#define CCVT_H
#ifdef __cplusplus
extern "C" {
#endif
/* Colour ConVerT: going from one colour space to another
Format descriptions:
420i = "4:2:0 interlaced"
YYYY UU YYYY UU even lines
YYYY VV YYYY VV odd lines
U/V data is subsampled by 2 both in horizontal
and vertical directions, and intermixed with the Y values.
420p = "4:2:0 planar"
YYYYYYYY N lines
UUUU N/2 lines
VVVV N/2 lines
U/V is again subsampled, but all the Ys, Us and Vs are placed
together in separate buffers. The buffers may be placed in
one piece of contiguous memory though, with Y buffer first,
followed by U, followed by V.
yuyv = "4:2:2 interlaced"
YUYV YUYV YUYV ... N lines
The U/V data is subsampled by 2 in horizontal direction only.
bgr24 = 3 bytes per pixel, in the order Blue Green Red (whoever came up
with that idea...)
rgb24 = 3 bytes per pixel, in the order Red Green Blue (which is sensible)
rgb32 = 4 bytes per pixel, in the order Red Green Blue Alpha, with
Alpha really being a filler byte (0)
bgr32 = last but not least, 4 bytes per pixel, in the order Blue Green Red
Alpha, Alpha again a filler byte (0)
*/
/* Functions in ccvt_i386.S/ccvt_c.c */
/* 4:2:0 YUV interlaced to RGB/BGR */
void ccvt_420i_bgr24(int width, int height, void *src, void *dst);
void ccvt_420i_rgb24(int width, int height, void *src, void *dst);
void ccvt_420i_bgr32(int width, int height, void *src, void *dst);
void ccvt_420i_rgb32(int width, int height, void *src, void *dst);
/* 4:2:2 YUYV interlaced to RGB/BGR */
void ccvt_yuyv_rgb32(int width, int height, void *src, void *dst);
void ccvt_yuyv_bgr32(int width, int height, void *src, void *dst);
/* 4:2:0 YUV planar to RGB/BGR */
void ccvt_420p_rgb32(int width, int height, void *srcy, void *srcu, void *srcv, void *dst);
void ccvt_420p_bgr32(int width, int height, void *srcy, void *srcu, void *srcv, void *dst);
/* RGB/BGR to 4:2:0 YUV interlaced */
/* RGB/BGR to 4:2:0 YUV planar */
void ccvt_rgb24_420p(int width, int height, void *src, void *dsty, void *dstu, void *dstv);
void ccvt_bgr24_420p(int width, int height, void *src, void *dsty, void *dstu, void *dstv);
/* Go from 420i to other yuv formats */
void ccvt_420i_420p(int width, int height, void *src, void *dsty, void *dstu, void *dstv);
void ccvt_420i_yuyv(int width, int height, void *src, void *dst);
#ifdef __cplusplus
}
#endif
#endif
/*
Colour conversion routines (RGB <-> YUV) in plain C
(C) 2000-2001 Nemosoft Unv. nemosoft@smcc.demon.nl
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "ccvt.h"
#include "vcvt.h"
/* We use the viewport routines, with a viewport width that is exactly
the same as the image width. The overhead for computing the view/image
offsets is very small anyway.
The assembly routines are still different, since they are quite optimized.
*/
void ccvt_420i_rgb24(int width, int height, void *src, void *dst)
{
vcvt_420i_rgb24(width, height, width, src, dst);
}
void ccvt_420i_bgr24(int width, int height, void *src, void *dst)
{
vcvt_420i_bgr24(width, height, width, src, dst);
}
void ccvt_420i_rgb32(int width, int height, void *src, void *dst)
{
vcvt_420i_rgb32(width, height, width, src, dst);
}
void ccvt_420i_bgr32(int width, int height, void *src, void *dst)
{
vcvt_420i_bgr32(width, height, width, src, dst);
}
void ccvt_420i_420p(int width, int height, void *src, void *dsty, void *dstu, void *dstv)
{
vcvt_420i_420p(width, height, width, src, dsty, dstu, dstv);
}
void ccvt_420i_yuyv(int width, int height, void *src, void *dst)
{
vcvt_420i_yuyv(width, height, width, src, dst);
}
This diff is collapsed.
......@@ -107,7 +107,7 @@ struct Nala_table_entry {
static struct Nala_table_entry Nala_table[PSZ_MAX][8] =
{
#include "nala.inc"
#include "pwc_nala.h"
};
/* This tables contains entries for the 675/680/690 (Timon) camera, with
......@@ -134,7 +134,7 @@ struct Timon_table_entry
static struct Timon_table_entry Timon_table[PSZ_MAX][6][4] =
{
#include "timon.inc"
#include "pwc_timon.h"
};
/* Entries for the Kiara (730/740) camera */
......@@ -149,7 +149,7 @@ struct Kiara_table_entry
static struct Kiara_table_entry Kiara_table[PSZ_MAX][6][4] =
{
#include "kiara.inc"
#include "pwc_kiara.h"
};
......@@ -184,7 +184,6 @@ void pwc_hexdump(void *p, int len)
static inline int send_video_command(struct usb_device *udev, int index, void *buf, int buflen)
{
#ifdef __KERNEL__
return usb_control_msg(udev,
usb_sndctrlpipe(udev, 0),
SET_EP_STREAM_CTL,
......@@ -192,9 +191,6 @@ static inline int send_video_command(struct usb_device *udev, int index, void *b
VIDEO_OUTPUT_CONTROL_FORMATTER,
index,
buf, buflen, HZ);
#else
return 0;
#endif
}
......
......@@ -55,7 +55,6 @@
#endif
/* Function prototypes and driver templates */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
/* hotplug device table support */
static __devinitdata struct usb_device_id pwc_device_table [] = {
......@@ -83,20 +82,6 @@ static struct usb_driver pwc_driver =
disconnect: usb_pwc_disconnect, /* disconnect() */
};
#else
static void *usb_pwc_probe(struct usb_device *udev, unsigned int ifnum);
static void usb_pwc_disconnect(struct usb_device *udev, void *ptr);
static struct usb_driver pwc_driver =
{
name: "Philips webcam", /* name */
probe: usb_pwc_probe, /* probe() */
disconnect: usb_pwc_disconnect, /* disconnect() */
};
#endif
static int default_size = PSZ_QCIF;
static int default_fps = 10;
static int default_palette = VIDEO_PALETTE_RGB24; /* This is normal for webcams */
......@@ -122,16 +107,10 @@ static int pwc_video_ioctl(struct video_device *vdev, unsigned int cmd, void *a
static int pwc_video_mmap(struct video_device *dev, const char *adr, unsigned long size);
static struct video_device pwc_template = {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,3)
owner: NULL,
#endif
owner: THIS_MODULE,
name: "Philips Webcam", /* Filled in later */
type: VID_TYPE_CAPTURE,
#ifdef VID_HARDWARE_PWC
hardware: VID_HARDWARE_PWC, /* Let's pretend for now */
#else
hardware: 0, /* 2.2.14 backport (?) */
#endif
open: pwc_video_open,
close: pwc_video_close,
read: pwc_video_read,
......@@ -612,14 +591,8 @@ static inline void pwc_next_image(struct pwc_device *pdev)
static int pwc_set_palette(struct pwc_device *pdev, int pal)
{
if ( pal == VIDEO_PALETTE_RGB24
|| pal == VIDEO_PALETTE_RGB32
|| pal == (VIDEO_PALETTE_RGB24 | 0x80)
|| pal == (VIDEO_PALETTE_RGB32 | 0x80)
|| pal == VIDEO_PALETTE_YUYV
|| pal == VIDEO_PALETTE_YUV422
|| pal == VIDEO_PALETTE_YUV420
|| pal == VIDEO_PALETTE_YUV420P
if (
pal == VIDEO_PALETTE_YUV420
#if PWC_DEBUG
|| pal == VIDEO_PALETTE_RAW
#endif
......@@ -949,17 +922,12 @@ static int pwc_video_open(struct video_device *vdev, int mode)
Trace(TRACE_OPEN, "video_open called(0x%p, 0%o).\n", vdev, mode);
if (vdev == NULL) {
Err("video_open() called with NULL structure?\n");
return -EFAULT;
}
if (vdev == NULL)
BUG();
pdev = (struct pwc_device *)vdev->priv;
if (pdev == NULL) {
Err("video_open() called with NULL pwc_device.\n");
return -EFAULT;
}
if (pdev == NULL)
BUG();
MOD_INC_USE_COUNT;
down(&pdev->modlock);
if (!pdev->usb_init) {
Trace(TRACE_OPEN, "Doing first time initialization.\n");
......@@ -981,7 +949,6 @@ static int pwc_video_open(struct video_device *vdev, int mode)
i = pwc_allocate_buffers(pdev);
if (i < 0) {
Trace(TRACE_OPEN, "Failed to allocate memory.\n");
MOD_DEC_USE_COUNT;
up(&pdev->modlock);
return i;
}
......@@ -1026,14 +993,12 @@ static int pwc_video_open(struct video_device *vdev, int mode)
}
if (i) {
Trace(TRACE_OPEN, "Second attempt at set_video_mode failed.\n");
MOD_DEC_USE_COUNT;
up(&pdev->modlock);
return i;
}
i = usb_set_interface(pdev->udev, 0, pdev->valternate);
if (i) {
Trace(TRACE_OPEN, "Failed to set alternate interface = %d.\n", i);
MOD_DEC_USE_COUNT;
up(&pdev->modlock);
return -EINVAL;
}
......@@ -1064,15 +1029,8 @@ static void pwc_video_close(struct video_device *vdev)
int i;
Trace(TRACE_OPEN, "video_close called(0x%p).\n", vdev);
if (vdev == NULL) {
Err("video_close() called with NULL structure?\n");
return;
}
pdev = (struct pwc_device *)vdev->priv;
if (pdev == NULL) {
Err("video_close() called with NULL pwc_device.\n");
return;
}
if (pdev->vopen == 0)
Info("video_close() called on closed device?\n");
......@@ -1116,10 +1074,12 @@ static void pwc_video_close(struct video_device *vdev)
/* wake up _disconnect() routine */
if (pdev->unplugged)
wake_up(&pdev->remove_ok);
MOD_DEC_USE_COUNT;
}
/*
* FIXME: what about two parallel reads ????
*/
static long pwc_video_read(struct video_device *vdev, char *buf, unsigned long count, int noblock)
{
struct pwc_device *pdev;
......@@ -1309,6 +1269,9 @@ static int pwc_video_ioctl(struct video_device *vdev, unsigned int cmd, void *ar
if (copy_from_user(&p, arg, sizeof(p)))
return -EFAULT;
/*
* FIXME: Suppose we are mid read
*/
pwc_set_brightness(pdev, p.brightness);
pwc_set_contrast(pdev, p.contrast);
pwc_set_gamma(pdev, p.whiteness);
......@@ -1482,6 +1445,8 @@ static int pwc_video_ioctl(struct video_device *vdev, unsigned int cmd, void *ar
conflict with read(), but any programmer that uses
read() and mmap() simultaneously should be given
a job at Micro$oft. As janitor.
FIXME: needs auditing for safety.
*/
while (pdev->full_frames == NULL) {
interruptible_sleep_on(&pdev->frameq);
......@@ -1521,11 +1486,9 @@ static int pwc_video_mmap(struct video_device *vdev, const char *adr, unsigned l
unsigned long page, pos;
Trace(TRACE_READ, "mmap(0x%p, 0x%p, %lu) called.\n", vdev, adr, size);
if (vdev == NULL)
return -EFAULT;
pdev = vdev->priv;
if (pdev == NULL)
return -EFAULT;
/* FIXME - audit mmap during a read */
pos = (unsigned long)pdev->image_data;
while (size > 0) {
......@@ -1551,11 +1514,7 @@ static int pwc_video_mmap(struct video_device *vdev, const char *adr, unsigned l
* is loaded.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
static void *usb_pwc_probe(struct usb_device *udev, unsigned int ifnum, const struct usb_device_id *id)
#else
static void *usb_pwc_probe(struct usb_device *udev, unsigned int ifnum)
#endif
{
struct pwc_device *pdev = NULL;
struct video_device *vdev;
......@@ -1579,8 +1538,6 @@ static void *usb_pwc_probe(struct usb_device *udev, unsigned int ifnum)
vendor_id = udev->descriptor.idVendor;
product_id = udev->descriptor.idProduct;
if (vendor_id != 0x0471 && vendor_id != 0x069A)
return NULL; /* Not Philips or Askey, for sure. */
if (vendor_id == 0x0471) {
switch (product_id) {
......@@ -1621,7 +1578,7 @@ static void *usb_pwc_probe(struct usb_device *udev, unsigned int ifnum)
break;
}
}
if (vendor_id == 0x069A) {
else if (vendor_id == 0x069A) {
switch(product_id) {
case 0x0001:
Info("Askey VC010 type 1 USB webcam detected.\n");
......@@ -1632,6 +1589,8 @@ static void *usb_pwc_probe(struct usb_device *udev, unsigned int ifnum)
break;
}
}
else return NULL; /* Not Philips or Askey, for sure. */
if (udev->descriptor.bNumConfigurations > 1)
Info("Warning: more than 1 configuration available.\n");
......@@ -1665,9 +1624,7 @@ static void *usb_pwc_probe(struct usb_device *udev, unsigned int ifnum)
}
memcpy(vdev, &pwc_template, sizeof(pwc_template));
sprintf(vdev->name, "Philips %d webcam", pdev->type);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,3)
SET_MODULE_OWNER(vdev);
#endif
pdev->vdev = vdev;
vdev->priv = pdev;
......@@ -1827,22 +1784,10 @@ static int __init usb_pwc_init(void)
}
if (palette) {
/* Determine default palette */
if (!strcmp(palette, "bgr24"))
default_palette = VIDEO_PALETTE_RGB24;
else if (!strcmp(palette, "rgb24"))
default_palette = VIDEO_PALETTE_RGB24 | 0x80;
else if (!strcmp(palette, "bgr32"))
default_palette = VIDEO_PALETTE_RGB32;
else if (!strcmp(palette, "rgb32"))
default_palette = VIDEO_PALETTE_RGB32 | 0x80;
else if (!strcmp(palette, "yuyv"))
default_palette = VIDEO_PALETTE_YUYV;
else if (!strcmp(palette, "yuv420"))
if (!strcmp(palette, "yuv420"))
default_palette = VIDEO_PALETTE_YUV420;
else if (!strcmp(palette, "yuv420p"))
default_palette = VIDEO_PALETTE_YUV420P;
else {
Err("Palette not recognized: try palette=[bgr24 | rgb24 | bgr32 | rgb32 | yuyv | yuv420 | yuv420p].\n");
Err("Palette not recognized: try palette=yuv420.\n");
return -EINVAL;
}
Info("Default palette set to %d.\n", default_palette);
......
......@@ -21,14 +21,12 @@
themselves. It also has a decompressor wrapper function.
*/
#include "ccvt.h"
#include "vcvt.h"
#include "pwc.h"
#include "pwc-uncompress.h"
/* This contains a list of all registered decompressors */
LIST_HEAD(pwc_decompressor_list);
static LIST_HEAD(pwc_decompressor_list);
/* Should the pwc_decompress structure ever change, we increase the
version number so that we don't get nasty surprises, or can
......@@ -126,50 +124,14 @@ int pwc_decompress(struct pwc_device *pdev)
if (pdev->image.x == pdev->view.x && pdev->image.y == pdev->view.y) {
/* Sizes matches; make it quick */
switch(pdev->vpalette) {
case VIDEO_PALETTE_RGB24 | 0x80:
ccvt_420i_rgb24(pdev->image.x, pdev->image.y, yuv, image);
break;
case VIDEO_PALETTE_RGB24:
ccvt_420i_bgr24(pdev->image.x, pdev->image.y, yuv, image);
break;
case VIDEO_PALETTE_RGB32 | 0x80:
ccvt_420i_rgb32(pdev->image.x, pdev->image.y, yuv, image);
break;
case VIDEO_PALETTE_RGB32:
ccvt_420i_bgr32(pdev->image.x, pdev->image.y, yuv, image);
break;
case VIDEO_PALETTE_YUYV:
case VIDEO_PALETTE_YUV422:
ccvt_420i_yuyv(pdev->image.x, pdev->image.y, yuv, image);
break;
case VIDEO_PALETTE_YUV420:
memcpy(image, yuv, pdev->image.size);
break;
case VIDEO_PALETTE_YUV420P:
n = pdev->image.x * pdev->image.y;
ccvt_420i_420p(pdev->image.x, pdev->image.y, yuv, image, image + n, image + n + (n / 4));
break;
}
}
else {
/* Size mismatch; use viewport conversion routines */
switch(pdev->vpalette) {
case VIDEO_PALETTE_RGB24 | 0x80:
vcvt_420i_rgb24(pdev->image.x, pdev->image.y, pdev->view.x, yuv, image + pdev->offset.size);
break;
case VIDEO_PALETTE_RGB24:
vcvt_420i_bgr24(pdev->image.x, pdev->image.y, pdev->view.x, yuv, image + pdev->offset.size);
break;
case VIDEO_PALETTE_RGB32 | 0x80:
vcvt_420i_rgb32(pdev->image.x, pdev->image.y, pdev->view.x, yuv, image + pdev->offset.size);
break;
case VIDEO_PALETTE_RGB32:
vcvt_420i_bgr32(pdev->image.x, pdev->image.y, pdev->view.x, yuv, image + pdev->offset.size);
break;
case VIDEO_PALETTE_YUYV:
case VIDEO_PALETTE_YUV422:
vcvt_420i_yuyv(pdev->image.x, pdev->image.y, pdev->view.x, yuv, image + pdev->offset.size);
break;
case VIDEO_PALETTE_YUV420:
dst = image + pdev->offset.size;
w = pdev->view.x * 6;
......@@ -180,41 +142,16 @@ int pwc_decompress(struct pwc_device *pdev)
yuv += c;
}
break;
case VIDEO_PALETTE_YUV420P:
n = pdev->view.x * pdev->view.y;
vcvt_420i_420p(pdev->image.x, pdev->image.y, pdev->view.x, yuv,
image + pdev->offset.size,
image + n + pdev->offset.size / 4,
image + n + n / 4 + pdev->offset.size / 4);
break;
}
}
return 0;
}
/* wrapper functions.
By using these wrapper functions and exporting them with no VERSIONING,
I can be sure the pwcx.o module will load on all systems.
*/
void *pwc_kmalloc(size_t size, int priority)
{
return kmalloc(size, priority);
}
void pwc_kfree(const void *pointer)
{
kfree(pointer);
}
/* Make sure these functions are available for the decompressor plugin
both when this code is compiled into the kernel or as as module.
We are using unversioned names!
*/
EXPORT_SYMBOL_NOVERS(pwc_decompressor_version);
EXPORT_SYMBOL_NOVERS(pwc_register_decompressor);
EXPORT_SYMBOL_NOVERS(pwc_unregister_decompressor);
EXPORT_SYMBOL_NOVERS(pwc_find_decompressor);
EXPORT_SYMBOL_NOVERS(pwc_kmalloc);
EXPORT_SYMBOL_NOVERS(pwc_kfree);
EXPORT_SYMBOL(pwc_register_decompressor);
EXPORT_SYMBOL(pwc_unregister_decompressor);
EXPORT_SYMBOL(pwc_find_decompressor);
......@@ -18,27 +18,16 @@
#ifndef PWC_H
#define PWC_H
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/module.h>
#include <asm/semaphore.h>
#include <asm/errno.h>
#include <linux/usb.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#else
#include <errno.h>
#include <sys/types.h>
#define printk printf
#define KERN_DEBUG "<7>"
#define KERN_INFO "<6>"
#define KERN_ERR "<3>"
#endif
#include <linux/config.h>
#include <linux/videodev.h>
#include <linux/wait.h>
#include <asm/semaphore.h>
#include <asm/errno.h>
/* Defines and structures for the Philips webcam */
/* Used for checking memory corruption/pointer validation */
#define PWC_MAGIC 0x89DC10ABUL
......@@ -56,10 +45,10 @@
#define TRACE_SIZE 0x0040
#define TRACE_SEQUENCE 0x1000
#define Trace(R, A...) if (pwc_trace & R) printk(KERN_DEBUG PWC_NAME " " ##A)
#define Debug(A...) printk(KERN_DEBUG PWC_NAME " " ##A)
#define Info(A...) printk(KERN_INFO PWC_NAME " " ##A)
#define Err(A...) printk(KERN_ERR PWC_NAME " " ##A)
#define Trace(R, A...) if (pwc_trace & R) printk(KERN_DEBUG PWC_NAME " " A)
#define Debug(A...) printk(KERN_DEBUG PWC_NAME " " A)
#define Info(A...) printk(KERN_INFO PWC_NAME " " A)
#define Err(A...) printk(KERN_ERR PWC_NAME " " A)
/* Defines for ToUCam cameras */
......@@ -122,9 +111,7 @@ struct pwc_iso_buf
void *data;
int length;
int read;
#ifdef __KERNEL__
purb_t urb;
#endif
};
/* intermediate buffers with raw data from the USB cam */
......@@ -218,7 +205,6 @@ struct pwc_device
int image_read_pos; /* In case we read data in pieces, keep track of were we are in the imagebuffer */
int image_used[MAX_IMAGES]; /* For MCAPTURE and SYNC */
#ifdef __KERNEL__
/* Kernel specific structures. These were once moved to the end
of the structure and padded with bytes after I found out
some of these have different sizes in different kernel versions.
......@@ -237,7 +223,6 @@ struct pwc_device
#if PWC_INT_PIPE
void *usb_int_handler; /* for the interrupt endpoint */
#endif
#endif
};
/* Enumeration of image sizes */
......
/*
*
* Hardware accelerated Matrox Millennium I, II, Mystique, G100, G200, G400 and G450.
*
* (c) 1998-2001 Petr Vandrovec <vandrove@vc.cvut.cz>
*
* Version: 1.52 2001/05/25
*
*/
#include "matroxfb_maven.h"
#include "matroxfb_crtc2.h"
#include "matroxfb_misc.h"
......@@ -649,6 +659,9 @@ static int matroxfb_dh_regit(CPMINFO struct matroxfb_dh_fb_info* m2info) {
void* oldcrtc2;
d = kmalloc(sizeof(*d), GFP_KERNEL);
if (!d) {
return -ENOMEM;
}
memset(d, 0, sizeof(*d));
......@@ -800,7 +813,7 @@ static void matroxfb_crtc2_exit(void) {
matroxfb_unregister_driver(&crtc2);
}
MODULE_AUTHOR("(c) 1999,2000 Petr Vandrovec <vandrove@vc.cvut.cz>");
MODULE_AUTHOR("(c) 1999-2001 Petr Vandrovec <vandrove@vc.cvut.cz>");
MODULE_DESCRIPTION("Matrox G400 CRTC2 driver");
module_init(matroxfb_crtc2_init);
module_exit(matroxfb_crtc2_exit);
......
......@@ -626,7 +626,7 @@ int inode_has_buffers(struct inode *inode)
to do in order to release the ramdisk memory is to destroy dirty buffers.
These are two special cases. Normal usage imply the device driver
to issue a sync on the device (without waiting I/O completation) and
to issue a sync on the device (without waiting I/O completion) and
then an invalidate_buffers call that doesn't trash dirty buffers. */
void __invalidate_buffers(kdev_t dev, int destroy_dirty_buffers)
{
......@@ -760,7 +760,12 @@ static void refill_freelist(int size)
balance_dirty(NODEV);
if (free_shortage())
page_launder(GFP_BUFFER, 0);
grow_buffers(size);
if (!grow_buffers(size)) {
wakeup_bdflush(1);
current->policy |= SCHED_YIELD;
__set_current_state(TASK_RUNNING);
schedule();
}
}
void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
......@@ -1025,12 +1030,13 @@ struct buffer_head * getblk(kdev_t dev, int block, int size)
write_unlock(&hash_table_lock);
spin_unlock(&lru_list_lock);
refill_freelist(size);
/* FIXME: getblk should fail if there's no enough memory */
goto repeat;
}
/* -1 -> no need to flush
0 -> async flush
1 -> sync flush (wait for I/O completation) */
1 -> sync flush (wait for I/O completion) */
int balance_dirty_state(kdev_t dev)
{
unsigned long dirty, tot, hard_dirty_limit, soft_dirty_limit;
......@@ -1419,6 +1425,7 @@ static void create_empty_buffers(struct page *page, kdev_t dev, unsigned long bl
{
struct buffer_head *bh, *head, *tail;
/* FIXME: create_buffers should fail if there's no enough memory */
head = create_buffers(page, blocksize, 1);
if (page->buffers)
BUG();
......@@ -2341,11 +2348,9 @@ int try_to_free_buffers(struct page * page, int wait)
spin_lock(&free_list[index].lock);
tmp = bh;
do {
struct buffer_head *p = tmp;
tmp = tmp->b_this_page;
if (buffer_busy(p))
if (buffer_busy(tmp))
goto busy_buffer_page;
tmp = tmp->b_this_page;
} while (tmp != bh);
spin_lock(&unused_list_lock);
......
......@@ -405,133 +405,110 @@ static void proc_kill_inodes(struct proc_dir_entry *de)
file_list_unlock();
}
struct proc_dir_entry *proc_symlink(const char *name,
struct proc_dir_entry *parent, const char *dest)
static struct proc_dir_entry *proc_create(struct proc_dir_entry **parent,
const char *name,
mode_t mode,
nlink_t nlink)
{
struct proc_dir_entry *ent = NULL;
const char *fn = name;
int len;
if (!parent && xlate_proc_name(name, &parent, &fn) != 0)
/* make sure name is valid */
if (!name || !strlen(name)) goto out;
if (!(*parent) && xlate_proc_name(name, parent, &fn) != 0)
goto out;
len = strlen(fn);
ent = kmalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL);
if (!ent)
goto out;
if (!ent) goto out;
memset(ent, 0, sizeof(struct proc_dir_entry));
memcpy(((char *) ent) + sizeof(*ent), fn, len + 1);
memcpy(((char *) ent) + sizeof(struct proc_dir_entry), fn, len + 1);
ent->name = ((char *) ent) + sizeof(*ent);
ent->namelen = len;
ent->nlink = 1;
ent->mode = S_IFLNK|S_IRUGO|S_IWUGO|S_IXUGO;
ent->data = kmalloc((ent->size=strlen(dest))+1, GFP_KERNEL);
if (!ent->data) {
kfree(ent);
ent = NULL;
goto out;
}
strcpy((char*)ent->data,dest);
ent->mode = mode;
ent->nlink = nlink;
out:
return ent;
}
proc_register(parent, ent);
out:
struct proc_dir_entry *proc_symlink(const char *name,
struct proc_dir_entry *parent, const char *dest)
{
struct proc_dir_entry *ent;
ent = proc_create(&parent,name,
(S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1);
if (ent) {
ent->data = kmalloc((ent->size=strlen(dest))+1, GFP_KERNEL);
if (ent->data) {
strcpy((char*)ent->data,dest);
proc_register(parent, ent);
} else {
kfree(ent);
ent = NULL;
}
}
return ent;
}
struct proc_dir_entry *proc_mknod(const char *name, mode_t mode,
struct proc_dir_entry *parent, kdev_t rdev)
{
struct proc_dir_entry *ent = NULL;
const char *fn = name;
int len;
if (!parent && xlate_proc_name(name, &parent, &fn) != 0)
goto out;
len = strlen(fn);
ent = kmalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL);
if (!ent)
goto out;
memset(ent, 0, sizeof(struct proc_dir_entry));
memcpy(((char *) ent) + sizeof(*ent), fn, len + 1);
ent->name = ((char *) ent) + sizeof(*ent);
ent->namelen = len;
ent->nlink = 1;
ent->mode = mode;
ent->rdev = rdev;
struct proc_dir_entry *ent;
proc_register(parent, ent);
out:
ent = proc_create(&parent,name,mode,1);
if (ent) {
ent->rdev = rdev;
proc_register(parent, ent);
}
return ent;
}
struct proc_dir_entry *proc_mkdir(const char *name, struct proc_dir_entry *parent)
{
struct proc_dir_entry *ent = NULL;
const char *fn = name;
int len;
struct proc_dir_entry *ent;
if (!parent && xlate_proc_name(name, &parent, &fn) != 0)
goto out;
len = strlen(fn);
ent = kmalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL);
if (!ent)
goto out;
memset(ent, 0, sizeof(struct proc_dir_entry));
memcpy(((char *) ent) + sizeof(*ent), fn, len + 1);
ent->name = ((char *) ent) + sizeof(*ent);
ent->namelen = len;
ent->proc_fops = &proc_dir_operations;
ent->proc_iops = &proc_dir_inode_operations;
ent->nlink = 2;
ent->mode = S_IFDIR | S_IRUGO | S_IXUGO;
ent = proc_create(&parent,name,
(S_IFDIR | S_IRUGO | S_IXUGO),2);
if (ent) {
ent->proc_fops = &proc_dir_operations;
ent->proc_iops = &proc_dir_inode_operations;
proc_register(parent, ent);
out:
proc_register(parent, ent);
}
return ent;
}
struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
struct proc_dir_entry *parent)
{
struct proc_dir_entry *ent = NULL;
const char *fn = name;
int len;
if (!parent && xlate_proc_name(name, &parent, &fn) != 0)
goto out;
len = strlen(fn);
ent = kmalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL);
if (!ent)
goto out;
memset(ent, 0, sizeof(struct proc_dir_entry));
memcpy(((char *) ent) + sizeof(*ent), fn, len + 1);
ent->name = ((char *) ent) + sizeof(*ent);
ent->namelen = len;
struct proc_dir_entry *ent;
nlink_t nlink;
if (S_ISDIR(mode)) {
if ((mode & S_IALLUGO) == 0)
mode |= S_IRUGO | S_IXUGO;
ent->proc_fops = &proc_dir_operations;
ent->proc_iops = &proc_dir_inode_operations;
ent->nlink = 2;
mode |= S_IRUGO | S_IXUGO;
nlink = 2;
} else {
if ((mode & S_IFMT) == 0)
mode |= S_IFREG;
if ((mode & S_IALLUGO) == 0)
mode |= S_IRUGO;
ent->nlink = 1;
nlink = 1;
}
ent->mode = mode;
proc_register(parent, ent);
out:
ent = proc_create(&parent,name,mode,nlink);
if (ent) {
if (S_ISDIR(mode)) {
ent->proc_fops = &proc_dir_operations;
ent->proc_iops = &proc_dir_inode_operations;
}
proc_register(parent, ent);
}
return ent;
}
......
......@@ -282,6 +282,21 @@ struct file_system_type *get_fs_type(const char *name)
static LIST_HEAD(vfsmntlist);
struct vfsmount *alloc_vfsmnt(void)
{
struct vfsmount *mnt = kmalloc(sizeof(struct vfsmount), GFP_KERNEL);
if (mnt) {
memset(mnt, 0, sizeof(struct vfsmount));
atomic_set(&mnt->mnt_count,1);
INIT_LIST_HEAD(&mnt->mnt_clash);
INIT_LIST_HEAD(&mnt->mnt_child);
INIT_LIST_HEAD(&mnt->mnt_mounts);
INIT_LIST_HEAD(&mnt->mnt_list);
mnt->mnt_owner = current->uid;
}
return mnt;
}
static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd)
{
old_nd->dentry = mnt->mnt_mountpoint;
......@@ -314,13 +329,6 @@ static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd)
* Potential reason for failure (aside of trivial lack of memory) is a
* deleted mountpoint. Caller must hold ->i_zombie on mountpoint
* dentry (if any).
*
* Node is marked as MNT_VISIBLE (visible in /proc/mounts) unless both
* @nd and @devname are %NULL. It works since we pass non-%NULL @devname
* when we are mounting root and kern_mount() filesystems are deviceless.
* If we will get a kern_mount() filesystem with nontrivial @devname we
* will have to pass the visibility flag explicitly, so if we will add
* support for such beasts we'll have to change prototype.
*/
static struct vfsmount *add_vfsmnt(struct nameidata *nd,
......@@ -331,13 +339,9 @@ static struct vfsmount *add_vfsmnt(struct nameidata *nd,
struct super_block *sb = root->d_inode->i_sb;
char *name;
mnt = kmalloc(sizeof(struct vfsmount), GFP_KERNEL);
mnt = alloc_vfsmnt();
if (!mnt)
goto out;
memset(mnt, 0, sizeof(struct vfsmount));
if (nd || dev_name)
mnt->mnt_flags = MNT_VISIBLE;
/* It may be NULL, but who cares? */
if (dev_name) {
......@@ -347,8 +351,6 @@ static struct vfsmount *add_vfsmnt(struct nameidata *nd,
mnt->mnt_devname = name;
}
}
mnt->mnt_owner = current->uid;
atomic_set(&mnt->mnt_count,1);
mnt->mnt_sb = sb;
spin_lock(&dcache_lock);
......@@ -361,13 +363,12 @@ static struct vfsmount *add_vfsmnt(struct nameidata *nd,
} else {
mnt->mnt_mountpoint = mnt->mnt_root;
mnt->mnt_parent = mnt;
INIT_LIST_HEAD(&mnt->mnt_child);
INIT_LIST_HEAD(&mnt->mnt_clash);
}
INIT_LIST_HEAD(&mnt->mnt_mounts);
list_add(&mnt->mnt_instances, &sb->s_mounts);
list_add(&mnt->mnt_list, vfsmntlist.prev);
spin_unlock(&dcache_lock);
if (sb->s_type->fs_flags & FS_SINGLE)
get_filesystem(sb->s_type);
out:
return mnt;
fail:
......@@ -500,8 +501,6 @@ int get_filesystem_info( char *buf )
for (p = vfsmntlist.next; p != &vfsmntlist; p = p->next) {
struct vfsmount *tmp = list_entry(p, struct vfsmount, mnt_list);
if (!(tmp->mnt_flags & MNT_VISIBLE))
continue;
path = d_path(tmp->mnt_root, tmp, buffer, PAGE_SIZE);
if (!path)
continue;
......@@ -855,7 +854,6 @@ static struct super_block *get_sb_single(struct file_system_type *fs_type,
sb = fs_type->kern_mnt->mnt_sb;
if (!sb)
BUG();
get_filesystem(fs_type);
do_remount_sb(sb, flags, data);
return sb;
}
......@@ -947,21 +945,31 @@ static int do_remount_sb(struct super_block *sb, int flags, char *data)
struct vfsmount *kern_mount(struct file_system_type *type)
{
kdev_t dev = get_unnamed_dev();
struct super_block *sb;
struct vfsmount *mnt;
if (!dev)
struct vfsmount *mnt = alloc_vfsmnt();
kdev_t dev;
if (!mnt)
return ERR_PTR(-ENOMEM);
dev = get_unnamed_dev();
if (!dev) {
kfree(mnt);
return ERR_PTR(-EMFILE);
}
sb = read_super(dev, NULL, type, 0, NULL, 0);
if (!sb) {
put_unnamed_dev(dev);
kfree(mnt);
return ERR_PTR(-EINVAL);
}
mnt = add_vfsmnt(NULL, sb->s_root, NULL);
if (!mnt) {
kill_super(sb);
return ERR_PTR(-ENOMEM);
}
mnt->mnt_sb = sb;
mnt->mnt_root = dget(sb->s_root);
mnt->mnt_mountpoint = mnt->mnt_root;
mnt->mnt_parent = mnt;
spin_lock(&dcache_lock);
list_add(&mnt->mnt_instances, &sb->s_mounts);
spin_unlock(&dcache_lock);
type->kern_mnt = mnt;
return mnt;
}
......@@ -1158,8 +1166,6 @@ static int do_loopback(char *old_name, char *new_name)
goto out2;
err = -ENOMEM;
if (old_nd.mnt->mnt_sb->s_type->fs_flags & FS_SINGLE)
get_filesystem(old_nd.mnt->mnt_sb->s_type);
down(&mount_sem);
/* there we go */
......@@ -1170,8 +1176,6 @@ static int do_loopback(char *old_name, char *new_name)
err = 0;
up(&new_nd.dentry->d_inode->i_zombie);
up(&mount_sem);
if (err && old_nd.mnt->mnt_sb->s_type->fs_flags & FS_SINGLE)
put_filesystem(old_nd.mnt->mnt_sb->s_type);
out2:
path_release(&new_nd);
out1:
......@@ -1362,8 +1366,6 @@ long do_mount(char * dev_name, char * dir_name, char *type_page,
return retval;
fail:
if (fstype->fs_flags & FS_SINGLE)
put_filesystem(fstype);
kill_super(sb);
goto unlock_out;
}
......
......@@ -12,8 +12,6 @@
#define _LINUX_MOUNT_H
#ifdef __KERNEL__
#define MNT_VISIBLE 1
struct vfsmount
{
struct dentry *mnt_mountpoint; /* dentry of mountpoint */
......
......@@ -201,13 +201,21 @@
/* Power Management Registers */
#define PCI_PM_PMC 2 /* PM Capabilities Register */
#define PCI_PM_CAP_VER_MASK 0x0007 /* Version */
#define PCI_PM_CAP_PME_CLOCK 0x0008 /* PME clock required */
#define PCI_PM_CAP_AUX_POWER 0x0010 /* Auxilliary power support */
#define PCI_PM_CAP_RESERVED 0x0010 /* Reserved field */
#define PCI_PM_CAP_DSI 0x0020 /* Device specific initialization */
#define PCI_PM_CAP_AUX_POWER 0x01C0 /* Auxilliary power support mask */
#define PCI_PM_CAP_D1 0x0200 /* D1 power state support */
#define PCI_PM_CAP_D2 0x0400 /* D2 power state support */
#define PCI_PM_CAP_PME 0x0800 /* PME pin supported */
#define PCI_PM_CAP_PME_MASK 0xF800 /* PME Mask of all supported states */
#define PCI_PM_CAP_PME_D0 0x0800 /* PME# from D0 */
#define PCI_PM_CAP_PME_D1 0x1000 /* PME# from D1 */
#define PCI_PM_CAP_PME_D2 0x2000 /* PME# from D2 */
#define PCI_PM_CAP_PME_D3 0x4000 /* PME# from D3 (hot) */
#define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */
#define PCI_PM_CTRL 4 /* PM control and status register */
#define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */
#define PCI_PM_CTRL_PME_ENABLE 0x0100 /* PME pin enable */
......
......@@ -159,6 +159,19 @@ void kunmap_high(struct page *page)
spin_unlock(&kmap_lock);
}
#define POOL_SIZE 32
/*
* This lock gets no contention at all, normally.
*/
static spinlock_t emergency_lock = SPIN_LOCK_UNLOCKED;
int nr_emergency_pages;
static LIST_HEAD(emergency_pages);
int nr_emergency_bhs;
static LIST_HEAD(emergency_bhs);
/*
* Simple bounce buffer support for highmem pages.
* This will be moved to the block layer in 2.5.
......@@ -203,17 +216,72 @@ static inline void copy_to_high_bh_irq (struct buffer_head *to,
static inline void bounce_end_io (struct buffer_head *bh, int uptodate)
{
struct page *page;
struct buffer_head *bh_orig = (struct buffer_head *)(bh->b_private);
unsigned long flags;
bh_orig->b_end_io(bh_orig, uptodate);
__free_page(bh->b_page);
page = bh->b_page;
spin_lock_irqsave(&emergency_lock, flags);
if (nr_emergency_pages >= POOL_SIZE)
__free_page(page);
else {
/*
* We are abusing page->list to manage
* the highmem emergency pool:
*/
list_add(&page->list, &emergency_pages);
nr_emergency_pages++;
}
if (nr_emergency_bhs >= POOL_SIZE) {
#ifdef HIGHMEM_DEBUG
/* Don't clobber the constructed slab cache */
init_waitqueue_head(&bh->b_wait);
/* Don't clobber the constructed slab cache */
init_waitqueue_head(&bh->b_wait);
#endif
kmem_cache_free(bh_cachep, bh);
kmem_cache_free(bh_cachep, bh);
} else {
/*
* Ditto in the bh case, here we abuse b_inode_buffers:
*/
list_add(&bh->b_inode_buffers, &emergency_bhs);
nr_emergency_bhs++;
}
spin_unlock_irqrestore(&emergency_lock, flags);
}
static __init int init_emergency_pool(void)
{
spin_lock_irq(&emergency_lock);
while (nr_emergency_pages < POOL_SIZE) {
struct page * page = alloc_page(GFP_ATOMIC);
if (!page) {
printk("couldn't refill highmem emergency pages");
break;
}
list_add(&page->list, &emergency_pages);
nr_emergency_pages++;
}
while (nr_emergency_bhs < POOL_SIZE) {
struct buffer_head * bh = kmem_cache_alloc(bh_cachep, SLAB_ATOMIC);
if (!bh) {
printk("couldn't refill highmem emergency bhs");
break;
}
list_add(&bh->b_inode_buffers, &emergency_bhs);
nr_emergency_bhs++;
}
spin_unlock_irq(&emergency_lock);
printk("allocated %d pages and %d bhs reserved for the highmem bounces\n",
nr_emergency_pages, nr_emergency_bhs);
return 0;
}
__initcall(init_emergency_pool);
static void bounce_end_io_write (struct buffer_head *bh, int uptodate)
{
bounce_end_io(bh, uptodate);
......@@ -228,6 +296,82 @@ static void bounce_end_io_read (struct buffer_head *bh, int uptodate)
bounce_end_io(bh, uptodate);
}
struct page *alloc_bounce_page (void)
{
struct list_head *tmp;
struct page *page;
repeat_alloc:
page = alloc_page(GFP_BUFFER);
if (page)
return page;
/*
* No luck. First, kick the VM so it doesnt idle around while
* we are using up our emergency rations.
*/
wakeup_bdflush(0);
/*
* Try to allocate from the emergency pool.
*/
tmp = &emergency_pages;
spin_lock_irq(&emergency_lock);
if (!list_empty(tmp)) {
page = list_entry(tmp->next, struct page, list);
list_del(tmp->next);
nr_emergency_pages--;
}
spin_unlock_irq(&emergency_lock);
if (page)
return page;
/* we need to wait I/O completion */
run_task_queue(&tq_disk);
current->policy |= SCHED_YIELD;
__set_current_state(TASK_RUNNING);
schedule();
goto repeat_alloc;
}
struct buffer_head *alloc_bounce_bh (void)
{
struct list_head *tmp;
struct buffer_head *bh;
repeat_alloc:
bh = kmem_cache_alloc(bh_cachep, SLAB_BUFFER);
if (bh)
return bh;
/*
* No luck. First, kick the VM so it doesnt idle around while
* we are using up our emergency rations.
*/
wakeup_bdflush(0);
/*
* Try to allocate from the emergency pool.
*/
tmp = &emergency_bhs;
spin_lock_irq(&emergency_lock);
if (!list_empty(tmp)) {
bh = list_entry(tmp->next, struct buffer_head, b_inode_buffers);
list_del(tmp->next);
nr_emergency_bhs--;
}
spin_unlock_irq(&emergency_lock);
if (bh)
return bh;
/* we need to wait I/O completion */
run_task_queue(&tq_disk);
current->policy |= SCHED_YIELD;
__set_current_state(TASK_RUNNING);
schedule();
goto repeat_alloc;
}
struct buffer_head * create_bounce(int rw, struct buffer_head * bh_orig)
{
struct page *page;
......@@ -236,24 +380,15 @@ struct buffer_head * create_bounce(int rw, struct buffer_head * bh_orig)
if (!PageHighMem(bh_orig->b_page))
return bh_orig;
repeat_bh:
bh = kmem_cache_alloc(bh_cachep, SLAB_BUFFER);
if (!bh) {
wakeup_bdflush(1); /* Sets task->state to TASK_RUNNING */
goto repeat_bh;
}
bh = alloc_bounce_bh();
/*
* This is wasteful for 1k buffers, but this is a stopgap measure
* and we are being ineffective anyway. This approach simplifies
* things immensly. On boxes with more than 4GB RAM this should
* not be an issue anyway.
*/
repeat_page:
page = alloc_page(GFP_BUFFER);
if (!page) {
wakeup_bdflush(1); /* Sets task->state to TASK_RUNNING */
goto repeat_page;
}
page = alloc_bounce_page();
set_bh_page(bh, page, 0);
bh->b_next = NULL;
......
......@@ -251,10 +251,10 @@ static struct page * __alloc_pages_limit(zonelist_t *zonelist,
water_mark = z->pages_high;
}
if (z->free_pages + z->inactive_clean_pages > water_mark) {
if (z->free_pages + z->inactive_clean_pages >= water_mark) {
struct page *page = NULL;
/* If possible, reclaim a page directly. */
if (direct_reclaim && z->free_pages < z->pages_min + 8)
if (direct_reclaim)
page = reclaim_page(z);
/* If that fails, fall back to rmqueue. */
if (!page)
......@@ -299,21 +299,6 @@ struct page * __alloc_pages(zonelist_t *zonelist, unsigned long order)
if (order == 0 && (gfp_mask & __GFP_WAIT))
direct_reclaim = 1;
/*
* If we are about to get low on free pages and we also have
* an inactive page shortage, wake up kswapd.
*/
if (inactive_shortage() > inactive_target / 2 && free_shortage())
wakeup_kswapd();
/*
* If we are about to get low on free pages and cleaning
* the inactive_dirty pages would fix the situation,
* wake up bdflush.
*/
else if (free_shortage() && nr_inactive_dirty_pages > free_shortage()
&& nr_inactive_dirty_pages >= freepages.high)
wakeup_bdflush(0);
try_again:
/*
* First, see if we have any zones with lots of free memory.
......
......@@ -871,8 +871,11 @@ static int do_try_to_free_pages(unsigned int gfp_mask, int user)
* before we get around to moving them to the other
* list, so this is a relatively cheap operation.
*/
if (free_shortage())
if (free_shortage()) {
ret += page_launder(gfp_mask, user);
shrink_dcache_memory(DEF_PRIORITY, gfp_mask);
shrink_icache_memory(DEF_PRIORITY, gfp_mask);
}
/*
* If needed, we move pages from the active list
......@@ -882,23 +885,9 @@ static int do_try_to_free_pages(unsigned int gfp_mask, int user)
ret += refill_inactive(gfp_mask, user);
/*
* Delete pages from the inode and dentry caches and
* reclaim unused slab cache if memory is low.
* Reclaim unused slab cache if memory is low.
*/
if (free_shortage()) {
shrink_dcache_memory(DEF_PRIORITY, gfp_mask);
shrink_icache_memory(DEF_PRIORITY, gfp_mask);
} else {
/*
* Illogical, but true. At least for now.
*
* If we're _not_ under shortage any more, we
* reap the caches. Why? Because a noticeable
* part of the caches are the buffer-heads,
* which we'll want to keep if under shortage.
*/
kmem_cache_reap(gfp_mask);
}
kmem_cache_reap(gfp_mask);
return ret;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment