Commit 9c6f70be authored by Linus Torvalds's avatar Linus Torvalds

v2.4.4.6 -> v2.4.5

  - Alan Cox: camera conversion missed parts
  - Neil Brown: md graceful alloc failure
  - Andrea Arkangeli: more alpha fixups, bounce buffer deadlock avoidance
  - Adam Fritzler: tms380tr driver update
  - Al Viro: VFS layer cleanups
parent c9df1e20
VERSION = 2
PATCHLEVEL = 4
SUBLEVEL = 5
EXTRAVERSION =-pre6
EXTRAVERSION =
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
......
......@@ -213,15 +213,8 @@ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
fi
fi
# The machine must be able to support more than 8GB physical memory
# before large vmalloc might even pretend to be an issue.
if [ "$CONFIG_ALPHA_GENERIC" = "y" -o "$CONFIG_ALPHA_DP264" = "y" \
-o "$CONFIG_ALPHA_WILDFIRE" = "y" -o "$CONFIG_ALPHA_TITAN" = "y" ]
then
bool 'Large VMALLOC support' CONFIG_ALPHA_LARGE_VMALLOC
else
define_bool CONFIG_ALPHA_LARGE_VMALLOC n
fi
# LARGE_VMALLOC is racy, if you *really* need it then fix it first
define_bool CONFIG_ALPHA_LARGE_VMALLOC n
source drivers/pci/Config.in
......
......@@ -207,6 +207,7 @@ EXPORT_SYMBOL(debug_spin_trylock);
EXPORT_SYMBOL(write_lock);
EXPORT_SYMBOL(read_lock);
#endif
EXPORT_SYMBOL(cpu_present_mask);
#endif /* CONFIG_SMP */
EXPORT_SYMBOL(rtc_lock);
......@@ -230,5 +231,6 @@ EXPORT_SYMBOL_NOVERS(__remq);
EXPORT_SYMBOL_NOVERS(__remqu);
EXPORT_SYMBOL_NOVERS(memcpy);
EXPORT_SYMBOL_NOVERS(memset);
EXPORT_SYMBOL_NOVERS(memchr);
EXPORT_SYMBOL(get_wchan);
......@@ -135,6 +135,7 @@ struct pci_iommu_arena
{
spinlock_t lock;
struct pci_controller *hose;
#define IOMMU_INVALID_PTE 0x2 /* 32:63 bits MBZ */
unsigned long *ptes;
dma_addr_t dma_base;
unsigned int size;
......
......@@ -42,6 +42,25 @@ calc_npages(long bytes)
return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
static void __init
iommu_arena_fixup(struct pci_iommu_arena * arena)
{
unsigned long base, size;
/*
* The Cypress chip has a quirk, it get confused by addresses
* above -1M so reserve the pagetables that maps pci addresses
* above -1M.
*/
base = arena->dma_base;
size = arena->size;
if (base + size > 0xfff00000) {
int i = (0xfff00000 - base) >> PAGE_SHIFT;
for (; i < (0x100000 >> PAGE_SHIFT); i++)
arena->ptes[i] = IOMMU_INVALID_PTE;
}
}
struct pci_iommu_arena *
iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
unsigned long window_size, unsigned long align)
......@@ -71,6 +90,8 @@ iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
unless there are chip bugs. */
arena->align_entry = 1;
iommu_arena_fixup(arena);
return arena;
}
......@@ -115,12 +136,12 @@ iommu_arena_alloc(struct pci_iommu_arena *arena, long n)
}
}
/* Success. Mark them all in use, ie not zero. Typically
bit zero is the valid bit, so write ~1 into everything.
/* Success. Mark them all in use, ie not zero and invalid
for the iommu tlb that could load them from under us.
The chip specific bits will fill this in with something
kosher when we return. */
for (i = 0; i < n; ++i)
ptes[p+i] = ~1UL;
ptes[p+i] = IOMMU_INVALID_PTE;
arena->next_entry = p + n;
spin_unlock_irqrestore(&arena->lock, flags);
......
......@@ -16,15 +16,18 @@
#include <linux/pci.h>
#include <linux/init.h>
#define __EXTERN_INLINE inline
#include <asm/io.h>
#include <asm/core_tsunami.h>
#undef __EXTERN_INLINE
#include <asm/ptrace.h>
#include <asm/system.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/bitops.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_tsunami.h>
#include <asm/hwrpb.h>
#include "proto.h"
......
......@@ -767,7 +767,7 @@ static inline void parse_mem_cmdline (char ** cmdline_p)
void __init setup_arch(char **cmdline_p)
{
unsigned long bootmap_size;
unsigned long bootmap_size, low_mem_size;
unsigned long start_pfn, max_pfn, max_low_pfn;
int i;
......@@ -1013,7 +1013,9 @@ void __init setup_arch(char **cmdline_p)
request_resource(&ioport_resource, standard_io_resources+i);
/* Tell the PCI layer not to allocate too close to the RAM area.. */
pci_mem_start = ((max_low_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
low_mem_size = ((max_low_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
if (low_mem_size > pci_mem_start)
pci_mem_start = low_mem_size;
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
......
......@@ -270,7 +270,7 @@ static struct device_node* airport_dev;
static struct board_features_t {
char* compatible;
u32 features;
} board_features_datas[] __init =
} board_features_datas[] __initdata =
{
{ "AAPL,PowerMac G3", 0 }, /* Beige G3 */
{ "iMac,1", 0 }, /* First iMac (gossamer) */
......
......@@ -2780,12 +2780,12 @@ static
int ide_cdrom_open (struct inode *ip, struct file *fp, ide_drive_t *drive)
{
struct cdrom_info *info = drive->driver_data;
int rc;
int rc = -ENOMEM;
MOD_INC_USE_COUNT;
if (info->buffer == NULL)
info->buffer = (char *) kmalloc(SECTOR_BUFFER_SIZE, GFP_KERNEL);
if ((rc = cdrom_fops.open(ip, fp))) {
if ((info->buffer == NULL) || (rc = cdrom_fops.open(ip, fp))) {
drive->usage--;
MOD_DEC_USE_COUNT;
}
......
......@@ -3754,6 +3754,10 @@ void md__init md_setup_drive(void)
continue;
}
mddev = alloc_mddev(MKDEV(MD_MAJOR,minor));
if (mddev == NULL) {
printk("md: kmalloc failed - cannot start array %d\n", minor);
continue;
}
if (md_setup_args.pers[minor]) {
/* non-persistent */
mdu_array_info_t ainfo;
......
......@@ -137,8 +137,7 @@ static int __init abyss_attach(struct pci_dev *pdev, const struct pci_device_id
*/
dev->base_addr += 0x10;
ret = tmsdev_init(dev,0,pdev);
/* XXX: should be the max PCI32 DMA max */
ret = tmsdev_init(dev, PCI_MAX_ADDRESS, pdev);
if (ret) {
printk("%s: unable to get memory for dev->priv.\n",
dev->name);
......
......@@ -349,6 +349,7 @@ int __init madgemc_probe(void)
printk(":%2.2x", dev->dev_addr[i]);
printk("\n");
/* XXX is ISA_MAX_ADDRESS correct here? */
if (tmsdev_init(dev, ISA_MAX_ADDRESS, NULL)) {
printk("%s: unable to get memory for dev->priv.\n",
dev->name);
......
......@@ -461,7 +461,9 @@ typedef struct {
* fragments following.
*/
/* XXX is there some better way to do this? */
#define ISA_MAX_ADDRESS 0x00ffffff
#define PCI_MAX_ADDRESS 0xffffffff
#pragma pack(1)
typedef struct {
......
......@@ -142,8 +142,7 @@ static int __init tms_pci_attach(struct pci_dev *pdev, const struct pci_device_i
printk(":%2.2x", dev->dev_addr[i]);
printk("\n");
ret = tmsdev_init(dev,0, pdev);
/* XXX: should be the max PCI32 DMA max */
ret = tmsdev_init(dev, PCI_MAX_ADDRESS, pdev);
if (ret) {
printk("%s: unable to get memory for dev->priv.\n", dev->name);
goto err_out_irq;
......@@ -165,7 +164,7 @@ static int __init tms_pci_attach(struct pci_dev *pdev, const struct pci_device_i
dev->stop = tms380tr_close;
ret = register_trdev(dev);
if (!ret)
if (ret)
goto err_out_tmsdev;
pci_set_drvdata(pdev, dev);
......
......@@ -18,18 +18,11 @@ list-multi := usbcore.o
usbcore-objs := usb.o usb-debug.o hub.o
ifneq ($(CONFIG_USB_PWC),n)
# By default we use the C colour conversion functions unless we
# detect an Intel CPU, for which there is assembly available
ccvt-objs := ccvt_c.o vcvt_c.o
ifeq ($(CONFIG_X86),y)
ccvt-objs := ccvt_i386.o vcvt_i386.o
endif
export-objs += pwc-uncompress.o
list-multi += pwc.o
endif
pwc-objs := pwc-if.o pwc-misc.o pwc-ctrl.o pwc-uncompress.o $(ccvt-objs)
pwc-objs := pwc-if.o pwc-misc.o pwc-ctrl.o pwc-uncompress.o
# Optional parts of multipart objects.
......
/*
(C) 2000 Nemosoft Unv. nemosoft@smcc.demon.nl
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef CCVT_H
#define CCVT_H
#ifdef __cplusplus
extern "C" {
#endif
/* Colour ConVerT: going from one colour space to another
Format descriptions:
420i = "4:2:0 interlaced"
YYYY UU YYYY UU even lines
YYYY VV YYYY VV odd lines
U/V data is subsampled by 2 both in horizontal
and vertical directions, and intermixed with the Y values.
420p = "4:2:0 planar"
YYYYYYYY N lines
UUUU N/2 lines
VVVV N/2 lines
U/V is again subsampled, but all the Ys, Us and Vs are placed
together in separate buffers. The buffers may be placed in
one piece of contiguous memory though, with Y buffer first,
followed by U, followed by V.
yuyv = "4:2:2 interlaced"
YUYV YUYV YUYV ... N lines
The U/V data is subsampled by 2 in horizontal direction only.
bgr24 = 3 bytes per pixel, in the order Blue Green Red (whoever came up
with that idea...)
rgb24 = 3 bytes per pixel, in the order Red Green Blue (which is sensible)
rgb32 = 4 bytes per pixel, in the order Red Green Blue Alpha, with
Alpha really being a filler byte (0)
bgr32 = last but not least, 4 bytes per pixel, in the order Blue Green Red
Alpha, Alpha again a filler byte (0)
*/
/* Functions in ccvt_i386.S/ccvt_c.c */
/* 4:2:0 YUV interlaced to RGB/BGR */
void ccvt_420i_bgr24(int width, int height, void *src, void *dst);
void ccvt_420i_rgb24(int width, int height, void *src, void *dst);
void ccvt_420i_bgr32(int width, int height, void *src, void *dst);
void ccvt_420i_rgb32(int width, int height, void *src, void *dst);
/* 4:2:2 YUYV interlaced to RGB/BGR */
void ccvt_yuyv_rgb32(int width, int height, void *src, void *dst);
void ccvt_yuyv_bgr32(int width, int height, void *src, void *dst);
/* 4:2:0 YUV planar to RGB/BGR */
void ccvt_420p_rgb32(int width, int height, void *srcy, void *srcu, void *srcv, void *dst);
void ccvt_420p_bgr32(int width, int height, void *srcy, void *srcu, void *srcv, void *dst);
/* RGB/BGR to 4:2:0 YUV interlaced */
/* RGB/BGR to 4:2:0 YUV planar */
void ccvt_rgb24_420p(int width, int height, void *src, void *dsty, void *dstu, void *dstv);
void ccvt_bgr24_420p(int width, int height, void *src, void *dsty, void *dstu, void *dstv);
/* Go from 420i to other yuv formats */
void ccvt_420i_420p(int width, int height, void *src, void *dsty, void *dstu, void *dstv);
void ccvt_420i_yuyv(int width, int height, void *src, void *dst);
#ifdef __cplusplus
}
#endif
#endif
/*
Colour conversion routines (RGB <-> YUV) in plain C
(C) 2000-2001 Nemosoft Unv. nemosoft@smcc.demon.nl
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "ccvt.h"
#include "vcvt.h"
/* We use the viewport routines, with a viewport width that is exactly
the same as the image width. The overhead for computing the view/image
offsets is very small anyway.
The assembly routines are still different, since they are quite optimized.
*/
void ccvt_420i_rgb24(int width, int height, void *src, void *dst)
{
vcvt_420i_rgb24(width, height, width, src, dst);
}
void ccvt_420i_bgr24(int width, int height, void *src, void *dst)
{
vcvt_420i_bgr24(width, height, width, src, dst);
}
void ccvt_420i_rgb32(int width, int height, void *src, void *dst)
{
vcvt_420i_rgb32(width, height, width, src, dst);
}
void ccvt_420i_bgr32(int width, int height, void *src, void *dst)
{
vcvt_420i_bgr32(width, height, width, src, dst);
}
void ccvt_420i_420p(int width, int height, void *src, void *dsty, void *dstu, void *dstv)
{
vcvt_420i_420p(width, height, width, src, dsty, dstu, dstv);
}
void ccvt_420i_yuyv(int width, int height, void *src, void *dst)
{
vcvt_420i_yuyv(width, height, width, src, dst);
}
/*
Colour conversion routines (RGB <-> YUV) in x86 assembly
(C) 2000 Nemosoft Unv. nemosoft@smcc.demon.nl
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/* The ccvt_* functions always start with width and height, so these
parameters are in 8(%ebp) and 12 (%ebp). The other parameters can be
2 to 4 pointers, and one of these combinations:
*src, *dst
*srcy, *srcu, *srv, *dst
*src, *dsty, *dstu, *dstv
*/
#define __ASSEMBLY__
#include <linux/linkage.h>
#define Width 8(%ebp)
#define Height 12(%ebp)
/* 2 parameters, 1 in, 1 out */
#define Src2 16(%ebp)
#define Dst2 20(%ebp)
/* 4 parameters, 3 in, 1 out */
#define SrcY 16(%ebp)
#define SrcU 20(%ebp)
#define SrcV 24(%ebp)
#define Dst4 28(%ebp)
/* 4 parameters, 1 in, 3 out */
#define Src4 16(%ebp)
#define DstY 20(%ebp)
#define DstU 24(%ebp)
#define DstV 28(%ebp)
/* This buffer space used to be staticly allocted, but this is going to
give problems with multiple cams (though I have yet to see it).
Therefor, we reserve at least 64 + 8 = 72 bytes on the stack with
`enter'.
*/
#define PixelBuffer -64(%ebp)
#define Uptr -68(%ebp)
#define Vptr -72(%ebp)
.text
/* This function will load the src and destination pointers, including
Uptr/Vptr when necessary, and test the width/height parameters.
- %esi will be set to Src or SrcY
- %edi will be set to Dst or DstY
the carry flag will be set if any of these tests fail.
It assumes %ebp has been set.
*/
/* 2 parameters, src & dst */
test_param_2:
mov Src2, %esi
mov Dst2, %edi
cmp $0, %esi # NULL pointers?
je param_fail
cmp $0, %edi
je param_fail
jmp test_width_height
/* 3 inputs, 1 output */
test_param_31:
mov Dst4, %edi # NULL pointers
cmp $0, %edi
je param_fail
mov SrcV, %esi
cmp $0, %esi
je param_fail
mov %esi, Vptr
mov SrcU, %esi
cmp $0, %esi
je param_fail
mov %esi, Uptr
mov SrcY, %esi
cmp $0, %esi
je param_fail
jmp test_width_height
/* 1 input, 3 output */
test_param_13:
mov Src4, %esi # NULL pointers
cmp $0, %esi
je param_fail
mov DstV, %edi
cmp $0, %edi
je param_fail
mov %edi, Vptr
mov DstU, %edi
cmp $0, %edi
je param_fail
mov %edi, Uptr
mov DstY, %edi
cmp $0, %edi
je param_fail
jmp test_width_height
nop
test_width_height:
cmpl $0, Width
jbe param_fail
testl $3, Width # multiple of 4?
jnz param_fail # Nope...
cmp $0, Height # check illegal height
jbe param_fail
testl $1, Height # Odd no. of lines?
jnz param_fail # Aye
/* fall through */
/* exit points */
param_ok:
clc # Success: clear carry
ret
param_fail:
stc # Fail: set carry
ret
# This will fill PixelBuffer with 4 grey scale pixels (Y)
# In: %eax = Value (Y3Y2Y1Y0)
# Out:
# Modifies: %ecx (-4)
# Destroys: %edx
expand_4_y:
mov %eax, %edx # Keep in edx (we need eax)
lea PixelBuffer, %edi
0: # This code is executed 4 times
movzbl %dl, %eax # move, zero extending byte-to-long
shl $8, %eax # 8 digit precision
stosl # Expand into PixelBuffer
stosl
stosl
add $4, %edi # Skip alpha
shr $8, %edx # next Y
dec %ecx
test $3, %ecx
jnz 0b
ret # from expand_4_y
# This will add the color factors to the (grey) values in PixelBuffer
# In: %ebx (U1U0V1V0)
# Out:
# Modifies:
# Destroys: %edi, %ebx, %eax, %edx
expand_4_uv:
lea PixelBuffer, %edi # reset pointer
# V0
sub $128, %bl
movsbl %bl, %eax
mov $359, %edx # Vr
mul %edx
add %eax, 0x00(%edi)
add %eax, 0x10(%edi)
movsbl %bl, %eax
mov $183, %edx # Vg
mul %edx
sub %eax, 0x04(%edi)
sub %eax, 0x14(%edi)
# V1
sub $128, %bh
movsbl %bh, %eax
mov $359, %edx # Vr
mul %edx
add %eax, 0x20(%edi)
add %eax, 0x30(%edi)
movsbl %bh, %eax
mov $183, %edx # Vg
mul %edx
sub %eax, 0x24(%edi)
sub %eax, 0x34(%edi)
# U0
bswap %ebx # Get U values in lower half
sub $128, %bh
movsbl %bh, %eax
mov $88, %edx # Ug
mul %edx
sub %eax, 0x04(%edi)
sub %eax, 0x14(%edi)
movsbl %bh, %eax
mov $454, %edx # Ub
mul %edx
add %eax, 0x08(%edi)
add %eax, 0x18(%edi)
# U1
sub $128, %bl
movsbl %bl, %eax
mov $88, %edx # Ug
mul %edx
sub %eax, 0x24(%edi)
sub %eax, 0x34(%edi)
movsbl %bl, %eax
mov $454, %edx # Ub
mul %edx
add %eax, 0x28(%edi)
add %eax, 0x38(%edi)
ret # expand_4_uv
/* This function expands 4 420i pixels into PixelBuffer */
do_four_yuvi:
push %edi
lodsl # 4 bytes at a time
call expand_4_y
# now do UV values. on even lines, Y is followed by U values; on
# odd lines V values follow. The U and V values are always pushed
# on the stack in this order:
# U V
# First, calculate offset per line (1.5 * width)
mov Width, %ebx # width
shl %ebx # 2 *
add Width, %ebx # 3 *
shr %ebx # 1.5 *
# even or odd lines
testl $1, Height
jz 2f
# odd line; we are at V data, but do U data first
neg %ebx # make ebx offset negative
mov (%esi,%ebx),%ax # U
push %ax
lodsw # V
push %ax
jmp 3f
2: # even line
lodsw # U
push %ax
sub $2, %ebx
mov (%esi,%ebx), %ax # V
push %ax
3: # Okay, so we now have the U and V values... expand into PixelBuffer
pop %ebx
call expand_4_uv
pop %edi
ret # from do_four_yuvi
# Do four pixels, in planar format
do_four_yuvp:
push %edi
# The first part is the same as for interlaced (4 bytes Y)
lodsl # 4 bytes at a time
call expand_4_y
# now gather U and V values...
mov Uptr, %ebx # Use Uptr/Vptr
mov (%ebx), %ax
push %ax
add $2, %ebx
mov %ebx, Uptr
mov Vptr, %ebx
mov (%ebx), %ax
push %ax
add $2, %ebx
mov %ebx, Vptr
pop %ebx
call expand_4_uv
pop %edi
ret
# Do four pixels, in yuyv interlaced format
do_four_yuyv:
push %edi
lodsl # v0y1u0y0
mov %eax, %ebx
bswap %ebx # y0u0y1v0
mov %bh, %ah # v0y1y1y0
and $0x00ff00ff, %ebx # __u0__v0
push %ax # y1y0
lodsl # v1y3u1y2 # mix register instructions
mov %eax, %edx # so CPU pipeline doesnt stall
rol $16, %eax # u1y2v1y3
mov %dl, %dh # v1y3y2y2
and $0xff00ff00, %eax # u1__v1__
mov $0, %dl # v1y3y2__
or %eax, %ebx # u1u0v1v0
shl $8, %edx # y3y2____
pop %dx # y3y2y1y0
mov %edx, %eax
call expand_4_y
call expand_4_uv
pop %edi
ret
limit_pixels:
# Limit all values in PixelBuffer
push %esi
push %edi
push %ecx
lea PixelBuffer, %esi
mov %esi, %edi
mov $16, %ecx
0: lodsl
cmp $0, %eax # this would have been a perfect spot for CMOVxx instructions...
jl 2f # except they only work on Pentium Pro processors,
cmp $0xff00, %eax # and not even all of them
jg 3f
add $4, %edi # no use for stosl here
loop 0b
jmp 9f
2: mov $0, %eax
stosl
loop 0b
jmp 9f
3: mov $0xff00, %eax
stosl
loop 0b
jmp 9f
9: pop %ecx
pop %edi
pop %esi
ret # from limit_pixels
/* Copy RGB values from PixelBuffer into destination buffer, 4 bytes
with alpha
*/
/* Push 3 pixel (12 bytes), in correct order */
push_rgb24:
push %ecx
push %esi
lea PixelBuffer, %esi
mov $4, %ecx
0: lodsl
shr $8, %eax
mov %al, (%edi) # Red
lodsl
shr $8, %eax
mov %al, 1(%edi) # Green
lodsl
shr $8, %eax
mov %al, 2(%edi) # Blue
add $3, %edi
lodsl # dummy
loop 0b
pop %esi
pop %ecx
ret
/* Push 3 pixels (12 bytes), in wrong order */
push_bgr24:
push %ecx
push %esi
lea PixelBuffer, %esi
mov $4, %ecx
0: lodsl
shr $8, %eax
mov %al, 2(%edi) # Red
lodsl
shr $8, %eax
mov %al, 1(%edi) # Green
lodsl
shr $8, %eax
mov %al, (%edi) # Blue
add $3, %edi
lodsl # dummy
loop 0b
pop %esi
pop %ecx
ret
/* The simplest format: push 4 bytes, RGBa */
push_rgb32:
push %ecx
push %esi
mov $16, %ecx
lea PixelBuffer, %esi
0: lodsl # red
shr $8, %eax # 8 bit precision
stosb
loop 0b
pop %esi
pop %ecx
ret
/* Gosh. Would you believe it. They even made this format... (Qt 2.*) */
push_bgr32:
# copy all 4 values to output buffer
push %ecx
push %esi
mov $4, %ecx
lea PixelBuffer, %esi
0: lodsl # red
shr $8, %eax # 8 bit precision
mov %al, 2(%edi)
lodsl # green
shr $8, %eax
mov %al, 1(%edi)
lodsl # blue
shr $8, %eax
mov %al, (%edi)
add $4, %edi
lodsl # dummy
loop 0b
pop %esi
pop %ecx
ret
/*************************************/
/* Functions to go from YUV interlaced formats to RGB */
/* Go from interlaced to RGB, red first */
ENTRY(ccvt_420i_rgb24)
enter $72, $0 # no extra space, no stackframes
push %ebx
push %esi
push %edi
call test_param_2
jc 9f
0: mov Width, %ecx # width
1: call do_four_yuvi
call limit_pixels
call push_rgb24
cmp $0, %ecx
jnz 1b # end of line?
decl Height # yes; decrement line counter
jnz 0b
9: pop %edi
pop %esi
pop %ebx
leave
ret
/* Go from interlaced to BGR, blue first */
ENTRY(ccvt_420i_bgr24)
enter $72, $0 # no extra space, no stackframes
push %ebx
push %esi
push %edi
call test_param_2
jc 9f
0: mov Width, %ecx # width
1: call do_four_yuvi
call limit_pixels
call push_bgr24
cmp $0, %ecx
jnz 1b # end of line?
decl Height # yes; decrement line counter
jnz 0b
9: pop %edi
pop %esi
pop %ebx
leave
ret
/* From interlaced to RGBa */
ENTRY(ccvt_420i_rgb32)
enter $72, $0 # no extra space, no stackframes
push %ebx
push %esi
push %edi
call test_param_2
jc 9f
0: mov Width, %ecx # width
1: call do_four_yuvi
call limit_pixels
call push_rgb32
cmp $0, %ecx # end of line?
jnz 1b
decl Height # yes; decrement line counter
jnz 0b
9: pop %edi
pop %esi
pop %ebx
leave
ret
/* Guess what? Go from interlaced to BGRa */
ENTRY(ccvt_420i_bgr32)
enter $72, $0 # no extra space, no stackframes
push %ebx
push %esi
push %edi
call test_param_2
jc 9f
0: mov Width, %ecx # width
1: call do_four_yuvi
call limit_pixels
call push_bgr32
cmp $0, %ecx # end of line?
jnz 1b
decl Height # yes; decrement line counter
jnz 0b
9: pop %edi
pop %esi
pop %ebx
leave
ret
/* From YUYV to RGBa */
ENTRY(ccvt_yuyv_rgb32)
enter $72, $0 # no extra space, no stackframes
push %ebx
push %esi
push %edi
call test_param_2
jc 9f
0: mov Width, %ecx # width
1: call do_four_yuyv
call limit_pixels
call push_rgb32
cmp $0, %ecx # end of line?
jnz 1b
8: decl Height # yes; decrement line counter
jnz 0b
9: pop %edi
pop %esi
pop %ebx
leave
ret
/* From YUYV to BGRa */
ENTRY(ccvt_yuyv_bgr32)
enter $72, $0 # no extra space, no stackframes
push %ebx
push %esi
push %edi
call test_param_2
jc 9f
# YUYV -> RGBa RGBa
0: mov Width, %ecx # width
1: call do_four_yuyv
call limit_pixels
call push_bgr32
cmp $0, %ecx # end of line?
jnz 1b
8: decl Height # yes; decrement line counter
jnz 0b
9: pop %edi
pop %esi
pop %ebx
leave
ret
/* Planar to RGBa */
ENTRY(ccvt_420p_rgb32)
enter $72, $0
push %ebx
push %esi
push %edi
call test_param_31
jc 9f
mov Width, %eax # width
mull Height # * height
mov SrcU, %eax # Copy U/V pointers
mov %eax, Uptr
mov SrcV, %eax
mov %eax, Vptr
0: mov Width, %ecx # width
1: call do_four_yuvp
call limit_pixels
call push_rgb32
cmp $0, %ecx # end of line?
jnz 1b
testl $1, Height # odd/even line
jnz 8f
mov Width, %eax # Even: rewind U/V pointers
shr %eax
sub %eax, Uptr
sub %eax, Vptr
8: decl Height # yes; decrement line counter
jnz 0b
9: pop %edi
pop %esi
pop %ebx
leave
ret
/* Okay... eventually, you end up with a very complete set of conversion
routines. I just wished things were a bit simpler. */
ENTRY(ccvt_420p_bgr32)
enter $72, $0
push %ebx
push %esi
push %edi
call test_param_31
jc 9f
mov Width, %eax # width
mull Height # * height
mov SrcU, %eax # Copy U/V pointers
mov %eax, Uptr
mov SrcV, %eax
mov %eax, Vptr
0: mov Width, %ecx # width
1: call do_four_yuvp
call limit_pixels
call push_bgr32
cmp $0, %ecx # end of line?
jnz 1b
testl $1, Height # odd/even line
jnz 8f
mov Width, %eax # Even: rewind U/V pointers
shr %eax
sub %eax, Uptr
sub %eax, Vptr
8: decl Height # yes; decrement line counter
jnz 0b
9: pop %edi
pop %esi
pop %ebx
leave
ret
/* Go from RGB (red first) to 4:2:0 planar.
* Note: this requires decimation of the U/V space by 2 in both directions
* Also, a matrix multiply would be QUITE convenient...
This is the matrix:
(Y ) ( 77 150 29) (R)
(Cb) = (-43 -85 128) * (G)
(Cr) (128 -107 -21) (B)
*/
ENTRY(ccvt_rgb24_420p)
enter $96, $0 # 24 bytes extra stack, no stackframes
push %ebx # -76: line width in bytes
push %esi # -80: height (copy)
push %edi # -84: width (copy)
# -88: red factor
# -92: green factor
# -96: blue factor
call test_param_13
jc 9f
mov Width, %eax
shl %eax
add Width, %eax # 3 * width = line increment
mov %eax, -76(%ebp)
mov Height, %eax
mov %eax, -80(%ebp) # copy height into stackframe
/*
This is a bit complicated... since U/V decimation is taking
place both in horizontal and vertical direction, we have to
process 2 lines in parallel. Also, 2 adjacent pixels are
considered. We average the U/V values over these 4 pixels
(of course, we could have just taken the U/V value of the first
pixel and be done with it, but that's not how we do things around
here)
*/
# 1st pass: Y values. Set factors
movl $77 , -88(%ebp) # 0.299
movl $150, -92(%ebp) # 0.587
movl $29 , -96(%ebp) # 0.114
0: mov Width, %ecx # width
1: xor %ebx, %ebx # 0
call rgb_multiply
shr $8, %ebx # divide by 256 (no need for limitor, since 77 + 150 + 29 = 256)
mov %bl, %al
stosb # store it into Y buffer
dec %ecx # end of line?
jnz 1b
decl -80(%ebp) # end of image?
jnz 0b
# Okay, now the U/V pointers...
# The following code is passed twice, with different factors
# Note that the %esi pointer jumps around quite a bit
# factors for U
movl $-43, -88(%ebp) # -0.1687
movl $-85, -92(%ebp) # -0.3313
movl $128, -96(%ebp) # 0.5
mov DstU, %edi # Set %edi register now
7: mov Src4, %esi # Rewind source pointer
mov Height, %eax # height
shr %eax # / 2
mov %eax, -80(%ebp) # copy
2: mov Width, %eax # width
shr %eax # / 2
mov %eax, -84(%ebp) # copy
3: xor %ebx, %ebx # 0
mov $4, %ecx # average over 4 pixels
4: call rgb_multiply
dec %ecx
jz 5f # done?
cmp $2, %ecx # 3rd pixel.. move %esi to next line, with offset
jne 4b
sub $6, %esi # backup to where we started
add -76(%ebp), %esi # add line increment
jmp 4b
5: # okay, 4 pixels done...
sub -76(%ebp), %esi # Get %esi back to its proper place
add $0x20000, %ebx # add 0.5 factor
shr $10, %ebx # Divide by 4 * 256
mov %bl, %al
stosb # store it!
decl -84(%ebp) # end of line?
jnz 3b
add -76(%ebp), %esi # %esi to next line (actually, 2 lines further)
decl -80(%ebp) # end of image?
jnz 2b
# check if 3rd pass has been done
cmpl $128, -88(%ebp)
je 9f # Done!
# Set factors for V pass
movl $128 , -88(%ebp) # 0.5
movl $-107, -92(%ebp) # -0.4187
movl $-21 , -96(%ebp) # -0.0813
mov DstV, %edi # %edi to V buffer
jmp 7b # "Do it to me one more time..."
9: pop %edi
pop %esi
pop %ebx
leave
ret
ENTRY(ccvt_bgr24_420p)
enter $96, $0 # 24 bytes extra stack, no stackframes
push %ebx # -4: line width in bytes
push %esi # -8: height (copy)
push %edi # -12: width (copy)
# -16: red factor
# -20: green factor
# -24: blue factor
call test_param_13
jc 9f
/* No surprise, this code looks just like rgb24_420p, but with swapped factors */
mov Width, %eax
shl %eax
add Width, %eax # 3 * width = line increment
mov %eax, -76(%ebp)
mov Height, %eax
mov %eax, -80(%ebp) # copy height into stackframe
# 1st pass: Y values. Set factors
movl $29 , -88(%ebp) # 0.114
movl $150, -92(%ebp) # 0.587
movl $77 , -96(%ebp) # 0.299
0: mov Width, %ecx # width
1: xor %ebx, %ebx # 0
call rgb_multiply
shr $8, %ebx # divide by 256 (no need for limitor, since 77 + 150 + 29 = 256)
mov %bl, %al
stosb # store it into Y buffer
dec %ecx # end of line?
jnz 1b
decl -80(%ebp) # end of image?
jnz 0b
# Okay, now the U/V pointers...
# The following code is passed twice, with different factors
# Note that the %esi pointer jumps around quite a bit
# factors for U
movl $123, -88(%ebp) # 0.5
movl $-85, -92(%ebp) # -0.3313
movl $-43, -96(%ebp) # -0.1687
mov DstU, %edi # Set %edi register now
7: mov Src4, %esi # Rewind source pointer
mov Height, %eax # height
shr %eax # / 2
mov %eax, -80(%ebp) # copy
2: mov Width, %eax # width
shr %eax # / 2
mov %eax, -84(%ebp) # copy
3: xor %ebx, %ebx # 0
mov $4, %ecx # average over 4 pixels
4: call rgb_multiply
dec %ecx
jz 5f # done?
cmp $2, %ecx # 3rd pixel.. move %esi to next line, with offset
jne 4b
sub $6, %esi # backup to where we started
add -76(%ebp), %esi # add line increment
jmp 4b
5: # okay, 4 pixels done...
sub -76(%ebp), %esi # Get %esi back to its proper place
add $0x20000, %ebx # add 0.5 factor
shr $10, %ebx # Divide by 4 * 256
mov %bl, %al
stosb # store it!
decl -84(%ebp) # end of line?
jnz 3b
add -76(%ebp), %esi # %esi to next line (actually, 2 lines further)
decl -80(%ebp) # end of image?
jnz 2b
# check if 3rd pass has been done
cmpl $-21, -88(%ebp)
je 9f # Done!
# Set factors for V pass
movl $-21 , -88(%ebp) # -0.0813
movl $-107, -92(%ebp) # -0.4187
movl $128 , -96(%ebp) # 0.5
mov DstV, %edi # %edi to V buffer
jmp 7b # "Do it to me one more time..."
9: pop %edi
pop %esi
pop %ebx
leave
ret
/* RGB-to-YUV helper functions */
rgb_multiply:
# do one RGB vector multiplication; its assumed the RGB factors
# are set on the stack. The data is accumulated in ebx.
lodsb # red byte
and $0xff, %eax
mov -88(%ebp), %edx # red factor
mul %edx
add %eax, %ebx
lodsb # green byte
and $0xff, %eax
mov -92(%ebp), %edx # green factor
mul %edx
add %eax, %ebx
lodsb # blue byte
and $0xff, %eax
mov -96(%ebp), %edx # blue factor
mul %edx
add %eax, %ebx # ebx now contains sum
ret
/**************************************************************************/
/* Go from 'interlaced' (YYYY UU/VV) format to planar */
ENTRY(ccvt_420i_420p)
enter $76, $0 # 4 bytes extra space, no stackframes
push %ebx # -4: width / 4
push %esi
push %edi
call test_param_13
jc 9f
# Okay, this is fairly easy... we first grab the Y values (4 bytes
# at a time), then rewind and do the U values, and repeat for V.
# This leaves us with a nice planar format
mov Width, %eax
shr %eax
shr %eax # width / 4
mov %eax, -76(%ebp) # Store
# Y
mov Height, %edx # line counter
0: mov -76(%ebp), %ecx
1: lodsl # get 4 bytes...
stosl # ...push 4 bytes
add $2, %esi # Skip U or V
loop 1b
dec %edx
jnz 0b
# U
mov Src4, %esi # rewind source pointer
mov DstU, %edi
add $4, %esi # set to U
mov Height, %edx
shr %edx # height / 2
mov Width, %ebx
shl %ebx
add Width, %ebx
shr %ebx # Width * 1.5 (line offset)
2: mov -76(%ebp), %ecx # width / 4
3: lodsw # 2 bytes at a time
stosw
add $4, %esi # skip Y
loop 3b
add %ebx, %esi # Skip line (U is on even lines)
dec %edx
jnz 2b
# V
mov Src4, %esi # rewind, set to V in first odd line
add $4, %esi
add %ebx, %esi # register re-use; no compiler can beat that :)
mov DstV, %edi # V ptr
mov Height, %edx
shr %edx # height / 2
4: mov -76(%ebp), %ecx # Get width/4
5: lodsw
stosw
add $4, %esi # Skip Y
loop 5b
add %ebx, %esi # Skip line (V is on odd lines)
dec %edx
jnz 4b
/* That's it! */
9: pop %edi
pop %esi
pop %ebx
leave
ret
/* Go from 4:2:0 interlaced to 'normal' YUYV */
ENTRY(ccvt_420i_yuyv)
enter $80, $0 # 8 bytes extra space, no stackframes
push %ebx
push %esi
push %edi
call test_param_2
jc 9f
mov Width, %ecx # -4: width / 4 = no. loops per line
shr %ecx
shr %ecx
mov %ecx, -76(%ebp)
mov Width, %ebx # -8: width * 1.5 = line offset
shl %ebx
add Width, %ebx
shr %ebx
mov %ebx, -80(%ebp)
# Okay, this requires a bit of byte shuffling... we go from
# YYYY UU
# YYYY VV
# to
# YUYV YUYV
# YUYV YUYV
# which indeed takes up more space
#
0: mov -76(%ebp), %ecx
1: lodsl # 4 Y in eax
testl $1, Height # even or odd line?
jnz 2f
# Even
mov -80(%ebp), %ebx
mov (%ebx, %esi), %dx # 16 bits V
shl $16, %edx # store in high word
mov (%esi), %dx # 16 bits U
add $2, %esi
jmp 3f
2: # Odd
mov -80(%ebp), %ebx
neg %ebx # negative offset
mov (%esi), %dx # 16 bits V
shl $16, %edx # store in high word
mov (%ebx, %esi), %dx # 16 bits U
add $2, %esi
3: # eax = Y3Y2Y1Y0, edx = V1V0U1U0, ebx is free
push %eax
movzbl %al, %ebx # ______y0
and $0xFF00, %eax # ____y1__
shl $8, %eax # __y1____
or %ebx, %eax # __y1__y0
mov %edx, %ebx # v1v0u1u0
shl $8, %ebx # v0u1u0__
and $0xff00ff00, %ebx # v0__u0__
or %ebx, %eax # v0y1u0y0
stosl
pop %eax # y3y2y1y0
# Second half
shr $8, %eax # __y3y2y1
shr $8, %ax # __y3__y2
and $0xff00ff00, %edx # v1__u1__
or %edx, %eax # v1y3u1y2
stosl
loop 1b
decl Height # height--
jnz 0b
# Done
9: pop %edi
pop %esi
pop %ebx
leave
ret
......@@ -107,7 +107,7 @@ struct Nala_table_entry {
static struct Nala_table_entry Nala_table[PSZ_MAX][8] =
{
#include "nala.inc"
#include "pwc_nala.h"
};
/* This tables contains entries for the 675/680/690 (Timon) camera, with
......@@ -134,7 +134,7 @@ struct Timon_table_entry
static struct Timon_table_entry Timon_table[PSZ_MAX][6][4] =
{
#include "timon.inc"
#include "pwc_timon.h"
};
/* Entries for the Kiara (730/740) camera */
......@@ -149,7 +149,7 @@ struct Kiara_table_entry
static struct Kiara_table_entry Kiara_table[PSZ_MAX][6][4] =
{
#include "kiara.inc"
#include "pwc_kiara.h"
};
......@@ -184,7 +184,6 @@ void pwc_hexdump(void *p, int len)
static inline int send_video_command(struct usb_device *udev, int index, void *buf, int buflen)
{
#ifdef __KERNEL__
return usb_control_msg(udev,
usb_sndctrlpipe(udev, 0),
SET_EP_STREAM_CTL,
......@@ -192,9 +191,6 @@ static inline int send_video_command(struct usb_device *udev, int index, void *b
VIDEO_OUTPUT_CONTROL_FORMATTER,
index,
buf, buflen, HZ);
#else
return 0;
#endif
}
......
......@@ -55,7 +55,6 @@
#endif
/* Function prototypes and driver templates */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
/* hotplug device table support */
static __devinitdata struct usb_device_id pwc_device_table [] = {
......@@ -83,20 +82,6 @@ static struct usb_driver pwc_driver =
disconnect: usb_pwc_disconnect, /* disconnect() */
};
#else
static void *usb_pwc_probe(struct usb_device *udev, unsigned int ifnum);
static void usb_pwc_disconnect(struct usb_device *udev, void *ptr);
static struct usb_driver pwc_driver =
{
name: "Philips webcam", /* name */
probe: usb_pwc_probe, /* probe() */
disconnect: usb_pwc_disconnect, /* disconnect() */
};
#endif
static int default_size = PSZ_QCIF;
static int default_fps = 10;
static int default_palette = VIDEO_PALETTE_RGB24; /* This is normal for webcams */
......@@ -122,16 +107,10 @@ static int pwc_video_ioctl(struct video_device *vdev, unsigned int cmd, void *a
static int pwc_video_mmap(struct video_device *dev, const char *adr, unsigned long size);
static struct video_device pwc_template = {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,3)
owner: NULL,
#endif
owner: THIS_MODULE,
name: "Philips Webcam", /* Filled in later */
type: VID_TYPE_CAPTURE,
#ifdef VID_HARDWARE_PWC
hardware: VID_HARDWARE_PWC, /* Let's pretend for now */
#else
hardware: 0, /* 2.2.14 backport (?) */
#endif
open: pwc_video_open,
close: pwc_video_close,
read: pwc_video_read,
......@@ -612,14 +591,8 @@ static inline void pwc_next_image(struct pwc_device *pdev)
static int pwc_set_palette(struct pwc_device *pdev, int pal)
{
if ( pal == VIDEO_PALETTE_RGB24
|| pal == VIDEO_PALETTE_RGB32
|| pal == (VIDEO_PALETTE_RGB24 | 0x80)
|| pal == (VIDEO_PALETTE_RGB32 | 0x80)
|| pal == VIDEO_PALETTE_YUYV
|| pal == VIDEO_PALETTE_YUV422
|| pal == VIDEO_PALETTE_YUV420
|| pal == VIDEO_PALETTE_YUV420P
if (
pal == VIDEO_PALETTE_YUV420
#if PWC_DEBUG
|| pal == VIDEO_PALETTE_RAW
#endif
......@@ -949,17 +922,12 @@ static int pwc_video_open(struct video_device *vdev, int mode)
Trace(TRACE_OPEN, "video_open called(0x%p, 0%o).\n", vdev, mode);
if (vdev == NULL) {
Err("video_open() called with NULL structure?\n");
return -EFAULT;
}
if (vdev == NULL)
BUG();
pdev = (struct pwc_device *)vdev->priv;
if (pdev == NULL) {
Err("video_open() called with NULL pwc_device.\n");
return -EFAULT;
}
if (pdev == NULL)
BUG();
MOD_INC_USE_COUNT;
down(&pdev->modlock);
if (!pdev->usb_init) {
Trace(TRACE_OPEN, "Doing first time initialization.\n");
......@@ -981,7 +949,6 @@ static int pwc_video_open(struct video_device *vdev, int mode)
i = pwc_allocate_buffers(pdev);
if (i < 0) {
Trace(TRACE_OPEN, "Failed to allocate memory.\n");
MOD_DEC_USE_COUNT;
up(&pdev->modlock);
return i;
}
......@@ -1026,14 +993,12 @@ static int pwc_video_open(struct video_device *vdev, int mode)
}
if (i) {
Trace(TRACE_OPEN, "Second attempt at set_video_mode failed.\n");
MOD_DEC_USE_COUNT;
up(&pdev->modlock);
return i;
}
i = usb_set_interface(pdev->udev, 0, pdev->valternate);
if (i) {
Trace(TRACE_OPEN, "Failed to set alternate interface = %d.\n", i);
MOD_DEC_USE_COUNT;
up(&pdev->modlock);
return -EINVAL;
}
......@@ -1064,15 +1029,8 @@ static void pwc_video_close(struct video_device *vdev)
int i;
Trace(TRACE_OPEN, "video_close called(0x%p).\n", vdev);
if (vdev == NULL) {
Err("video_close() called with NULL structure?\n");
return;
}
pdev = (struct pwc_device *)vdev->priv;
if (pdev == NULL) {
Err("video_close() called with NULL pwc_device.\n");
return;
}
if (pdev->vopen == 0)
Info("video_close() called on closed device?\n");
......@@ -1116,10 +1074,12 @@ static void pwc_video_close(struct video_device *vdev)
/* wake up _disconnect() routine */
if (pdev->unplugged)
wake_up(&pdev->remove_ok);
MOD_DEC_USE_COUNT;
}
/*
* FIXME: what about two parallel reads ????
*/
static long pwc_video_read(struct video_device *vdev, char *buf, unsigned long count, int noblock)
{
struct pwc_device *pdev;
......@@ -1309,6 +1269,9 @@ static int pwc_video_ioctl(struct video_device *vdev, unsigned int cmd, void *ar
if (copy_from_user(&p, arg, sizeof(p)))
return -EFAULT;
/*
* FIXME: Suppose we are mid read
*/
pwc_set_brightness(pdev, p.brightness);
pwc_set_contrast(pdev, p.contrast);
pwc_set_gamma(pdev, p.whiteness);
......@@ -1482,6 +1445,8 @@ static int pwc_video_ioctl(struct video_device *vdev, unsigned int cmd, void *ar
conflict with read(), but any programmer that uses
read() and mmap() simultaneously should be given
a job at Micro$oft. As janitor.
FIXME: needs auditing for safety.
*/
while (pdev->full_frames == NULL) {
interruptible_sleep_on(&pdev->frameq);
......@@ -1521,11 +1486,9 @@ static int pwc_video_mmap(struct video_device *vdev, const char *adr, unsigned l
unsigned long page, pos;
Trace(TRACE_READ, "mmap(0x%p, 0x%p, %lu) called.\n", vdev, adr, size);
if (vdev == NULL)
return -EFAULT;
pdev = vdev->priv;
if (pdev == NULL)
return -EFAULT;
/* FIXME - audit mmap during a read */
pos = (unsigned long)pdev->image_data;
while (size > 0) {
......@@ -1551,11 +1514,7 @@ static int pwc_video_mmap(struct video_device *vdev, const char *adr, unsigned l
* is loaded.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
static void *usb_pwc_probe(struct usb_device *udev, unsigned int ifnum, const struct usb_device_id *id)
#else
static void *usb_pwc_probe(struct usb_device *udev, unsigned int ifnum)
#endif
{
struct pwc_device *pdev = NULL;
struct video_device *vdev;
......@@ -1579,8 +1538,6 @@ static void *usb_pwc_probe(struct usb_device *udev, unsigned int ifnum)
vendor_id = udev->descriptor.idVendor;
product_id = udev->descriptor.idProduct;
if (vendor_id != 0x0471 && vendor_id != 0x069A)
return NULL; /* Not Philips or Askey, for sure. */
if (vendor_id == 0x0471) {
switch (product_id) {
......@@ -1621,7 +1578,7 @@ static void *usb_pwc_probe(struct usb_device *udev, unsigned int ifnum)
break;
}
}
if (vendor_id == 0x069A) {
else if (vendor_id == 0x069A) {
switch(product_id) {
case 0x0001:
Info("Askey VC010 type 1 USB webcam detected.\n");
......@@ -1632,6 +1589,8 @@ static void *usb_pwc_probe(struct usb_device *udev, unsigned int ifnum)
break;
}
}
else return NULL; /* Not Philips or Askey, for sure. */
if (udev->descriptor.bNumConfigurations > 1)
Info("Warning: more than 1 configuration available.\n");
......@@ -1665,9 +1624,7 @@ static void *usb_pwc_probe(struct usb_device *udev, unsigned int ifnum)
}
memcpy(vdev, &pwc_template, sizeof(pwc_template));
sprintf(vdev->name, "Philips %d webcam", pdev->type);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,3)
SET_MODULE_OWNER(vdev);
#endif
pdev->vdev = vdev;
vdev->priv = pdev;
......@@ -1827,22 +1784,10 @@ static int __init usb_pwc_init(void)
}
if (palette) {
/* Determine default palette */
if (!strcmp(palette, "bgr24"))
default_palette = VIDEO_PALETTE_RGB24;
else if (!strcmp(palette, "rgb24"))
default_palette = VIDEO_PALETTE_RGB24 | 0x80;
else if (!strcmp(palette, "bgr32"))
default_palette = VIDEO_PALETTE_RGB32;
else if (!strcmp(palette, "rgb32"))
default_palette = VIDEO_PALETTE_RGB32 | 0x80;
else if (!strcmp(palette, "yuyv"))
default_palette = VIDEO_PALETTE_YUYV;
else if (!strcmp(palette, "yuv420"))
if (!strcmp(palette, "yuv420"))
default_palette = VIDEO_PALETTE_YUV420;
else if (!strcmp(palette, "yuv420p"))
default_palette = VIDEO_PALETTE_YUV420P;
else {
Err("Palette not recognized: try palette=[bgr24 | rgb24 | bgr32 | rgb32 | yuyv | yuv420 | yuv420p].\n");
Err("Palette not recognized: try palette=yuv420.\n");
return -EINVAL;
}
Info("Default palette set to %d.\n", default_palette);
......
......@@ -21,14 +21,12 @@
themselves. It also has a decompressor wrapper function.
*/
#include "ccvt.h"
#include "vcvt.h"
#include "pwc.h"
#include "pwc-uncompress.h"
/* This contains a list of all registered decompressors */
LIST_HEAD(pwc_decompressor_list);
static LIST_HEAD(pwc_decompressor_list);
/* Should the pwc_decompress structure ever change, we increase the
version number so that we don't get nasty surprises, or can
......@@ -126,50 +124,14 @@ int pwc_decompress(struct pwc_device *pdev)
if (pdev->image.x == pdev->view.x && pdev->image.y == pdev->view.y) {
/* Sizes matches; make it quick */
switch(pdev->vpalette) {
case VIDEO_PALETTE_RGB24 | 0x80:
ccvt_420i_rgb24(pdev->image.x, pdev->image.y, yuv, image);
break;
case VIDEO_PALETTE_RGB24:
ccvt_420i_bgr24(pdev->image.x, pdev->image.y, yuv, image);
break;
case VIDEO_PALETTE_RGB32 | 0x80:
ccvt_420i_rgb32(pdev->image.x, pdev->image.y, yuv, image);
break;
case VIDEO_PALETTE_RGB32:
ccvt_420i_bgr32(pdev->image.x, pdev->image.y, yuv, image);
break;
case VIDEO_PALETTE_YUYV:
case VIDEO_PALETTE_YUV422:
ccvt_420i_yuyv(pdev->image.x, pdev->image.y, yuv, image);
break;
case VIDEO_PALETTE_YUV420:
memcpy(image, yuv, pdev->image.size);
break;
case VIDEO_PALETTE_YUV420P:
n = pdev->image.x * pdev->image.y;
ccvt_420i_420p(pdev->image.x, pdev->image.y, yuv, image, image + n, image + n + (n / 4));
break;
}
}
else {
/* Size mismatch; use viewport conversion routines */
switch(pdev->vpalette) {
case VIDEO_PALETTE_RGB24 | 0x80:
vcvt_420i_rgb24(pdev->image.x, pdev->image.y, pdev->view.x, yuv, image + pdev->offset.size);
break;
case VIDEO_PALETTE_RGB24:
vcvt_420i_bgr24(pdev->image.x, pdev->image.y, pdev->view.x, yuv, image + pdev->offset.size);
break;
case VIDEO_PALETTE_RGB32 | 0x80:
vcvt_420i_rgb32(pdev->image.x, pdev->image.y, pdev->view.x, yuv, image + pdev->offset.size);
break;
case VIDEO_PALETTE_RGB32:
vcvt_420i_bgr32(pdev->image.x, pdev->image.y, pdev->view.x, yuv, image + pdev->offset.size);
break;
case VIDEO_PALETTE_YUYV:
case VIDEO_PALETTE_YUV422:
vcvt_420i_yuyv(pdev->image.x, pdev->image.y, pdev->view.x, yuv, image + pdev->offset.size);
break;
case VIDEO_PALETTE_YUV420:
dst = image + pdev->offset.size;
w = pdev->view.x * 6;
......@@ -180,41 +142,16 @@ int pwc_decompress(struct pwc_device *pdev)
yuv += c;
}
break;
case VIDEO_PALETTE_YUV420P:
n = pdev->view.x * pdev->view.y;
vcvt_420i_420p(pdev->image.x, pdev->image.y, pdev->view.x, yuv,
image + pdev->offset.size,
image + n + pdev->offset.size / 4,
image + n + n / 4 + pdev->offset.size / 4);
break;
}
}
return 0;
}
/* wrapper functions.
By using these wrapper functions and exporting them with no VERSIONING,
I can be sure the pwcx.o module will load on all systems.
*/
void *pwc_kmalloc(size_t size, int priority)
{
return kmalloc(size, priority);
}
void pwc_kfree(const void *pointer)
{
kfree(pointer);
}
/* Make sure these functions are available for the decompressor plugin
both when this code is compiled into the kernel or as as module.
We are using unversioned names!
*/
EXPORT_SYMBOL_NOVERS(pwc_decompressor_version);
EXPORT_SYMBOL_NOVERS(pwc_register_decompressor);
EXPORT_SYMBOL_NOVERS(pwc_unregister_decompressor);
EXPORT_SYMBOL_NOVERS(pwc_find_decompressor);
EXPORT_SYMBOL_NOVERS(pwc_kmalloc);
EXPORT_SYMBOL_NOVERS(pwc_kfree);
EXPORT_SYMBOL(pwc_register_decompressor);
EXPORT_SYMBOL(pwc_unregister_decompressor);
EXPORT_SYMBOL(pwc_find_decompressor);
......@@ -18,27 +18,16 @@
#ifndef PWC_H
#define PWC_H
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/module.h>
#include <asm/semaphore.h>
#include <asm/errno.h>
#include <linux/usb.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#else
#include <errno.h>
#include <sys/types.h>
#define printk printf
#define KERN_DEBUG "<7>"
#define KERN_INFO "<6>"
#define KERN_ERR "<3>"
#endif
#include <linux/config.h>
#include <linux/videodev.h>
#include <linux/wait.h>
#include <asm/semaphore.h>
#include <asm/errno.h>
/* Defines and structures for the Philips webcam */
/* Used for checking memory corruption/pointer validation */
#define PWC_MAGIC 0x89DC10ABUL
......@@ -56,10 +45,10 @@
#define TRACE_SIZE 0x0040
#define TRACE_SEQUENCE 0x1000
#define Trace(R, A...) if (pwc_trace & R) printk(KERN_DEBUG PWC_NAME " " ##A)
#define Debug(A...) printk(KERN_DEBUG PWC_NAME " " ##A)
#define Info(A...) printk(KERN_INFO PWC_NAME " " ##A)
#define Err(A...) printk(KERN_ERR PWC_NAME " " ##A)
#define Trace(R, A...) if (pwc_trace & R) printk(KERN_DEBUG PWC_NAME " " A)
#define Debug(A...) printk(KERN_DEBUG PWC_NAME " " A)
#define Info(A...) printk(KERN_INFO PWC_NAME " " A)
#define Err(A...) printk(KERN_ERR PWC_NAME " " A)
/* Defines for ToUCam cameras */
......@@ -122,9 +111,7 @@ struct pwc_iso_buf
void *data;
int length;
int read;
#ifdef __KERNEL__
purb_t urb;
#endif
};
/* intermediate buffers with raw data from the USB cam */
......@@ -218,7 +205,6 @@ struct pwc_device
int image_read_pos; /* In case we read data in pieces, keep track of were we are in the imagebuffer */
int image_used[MAX_IMAGES]; /* For MCAPTURE and SYNC */
#ifdef __KERNEL__
/* Kernel specific structures. These were once moved to the end
of the structure and padded with bytes after I found out
some of these have different sizes in different kernel versions.
......@@ -237,7 +223,6 @@ struct pwc_device
#if PWC_INT_PIPE
void *usb_int_handler; /* for the interrupt endpoint */
#endif
#endif
};
/* Enumeration of image sizes */
......
/*
*
* Hardware accelerated Matrox Millennium I, II, Mystique, G100, G200, G400 and G450.
*
* (c) 1998-2001 Petr Vandrovec <vandrove@vc.cvut.cz>
*
* Version: 1.52 2001/05/25
*
*/
#include "matroxfb_maven.h"
#include "matroxfb_crtc2.h"
#include "matroxfb_misc.h"
......@@ -649,6 +659,9 @@ static int matroxfb_dh_regit(CPMINFO struct matroxfb_dh_fb_info* m2info) {
void* oldcrtc2;
d = kmalloc(sizeof(*d), GFP_KERNEL);
if (!d) {
return -ENOMEM;
}
memset(d, 0, sizeof(*d));
......@@ -800,7 +813,7 @@ static void matroxfb_crtc2_exit(void) {
matroxfb_unregister_driver(&crtc2);
}
MODULE_AUTHOR("(c) 1999,2000 Petr Vandrovec <vandrove@vc.cvut.cz>");
MODULE_AUTHOR("(c) 1999-2001 Petr Vandrovec <vandrove@vc.cvut.cz>");
MODULE_DESCRIPTION("Matrox G400 CRTC2 driver");
module_init(matroxfb_crtc2_init);
module_exit(matroxfb_crtc2_exit);
......
......@@ -626,7 +626,7 @@ int inode_has_buffers(struct inode *inode)
to do in order to release the ramdisk memory is to destroy dirty buffers.
These are two special cases. Normal usage imply the device driver
to issue a sync on the device (without waiting I/O completation) and
to issue a sync on the device (without waiting I/O completion) and
then an invalidate_buffers call that doesn't trash dirty buffers. */
void __invalidate_buffers(kdev_t dev, int destroy_dirty_buffers)
{
......@@ -760,7 +760,12 @@ static void refill_freelist(int size)
balance_dirty(NODEV);
if (free_shortage())
page_launder(GFP_BUFFER, 0);
grow_buffers(size);
if (!grow_buffers(size)) {
wakeup_bdflush(1);
current->policy |= SCHED_YIELD;
__set_current_state(TASK_RUNNING);
schedule();
}
}
void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
......@@ -1025,12 +1030,13 @@ struct buffer_head * getblk(kdev_t dev, int block, int size)
write_unlock(&hash_table_lock);
spin_unlock(&lru_list_lock);
refill_freelist(size);
/* FIXME: getblk should fail if there's no enough memory */
goto repeat;
}
/* -1 -> no need to flush
0 -> async flush
1 -> sync flush (wait for I/O completation) */
1 -> sync flush (wait for I/O completion) */
int balance_dirty_state(kdev_t dev)
{
unsigned long dirty, tot, hard_dirty_limit, soft_dirty_limit;
......@@ -1419,6 +1425,7 @@ static void create_empty_buffers(struct page *page, kdev_t dev, unsigned long bl
{
struct buffer_head *bh, *head, *tail;
/* FIXME: create_buffers should fail if there's no enough memory */
head = create_buffers(page, blocksize, 1);
if (page->buffers)
BUG();
......@@ -2341,11 +2348,9 @@ int try_to_free_buffers(struct page * page, int wait)
spin_lock(&free_list[index].lock);
tmp = bh;
do {
struct buffer_head *p = tmp;
tmp = tmp->b_this_page;
if (buffer_busy(p))
if (buffer_busy(tmp))
goto busy_buffer_page;
tmp = tmp->b_this_page;
} while (tmp != bh);
spin_lock(&unused_list_lock);
......
......@@ -405,133 +405,110 @@ static void proc_kill_inodes(struct proc_dir_entry *de)
file_list_unlock();
}
struct proc_dir_entry *proc_symlink(const char *name,
struct proc_dir_entry *parent, const char *dest)
static struct proc_dir_entry *proc_create(struct proc_dir_entry **parent,
const char *name,
mode_t mode,
nlink_t nlink)
{
struct proc_dir_entry *ent = NULL;
const char *fn = name;
int len;
if (!parent && xlate_proc_name(name, &parent, &fn) != 0)
/* make sure name is valid */
if (!name || !strlen(name)) goto out;
if (!(*parent) && xlate_proc_name(name, parent, &fn) != 0)
goto out;
len = strlen(fn);
ent = kmalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL);
if (!ent)
goto out;
if (!ent) goto out;
memset(ent, 0, sizeof(struct proc_dir_entry));
memcpy(((char *) ent) + sizeof(*ent), fn, len + 1);
memcpy(((char *) ent) + sizeof(struct proc_dir_entry), fn, len + 1);
ent->name = ((char *) ent) + sizeof(*ent);
ent->namelen = len;
ent->nlink = 1;
ent->mode = S_IFLNK|S_IRUGO|S_IWUGO|S_IXUGO;
ent->mode = mode;
ent->nlink = nlink;
out:
return ent;
}
struct proc_dir_entry *proc_symlink(const char *name,
struct proc_dir_entry *parent, const char *dest)
{
struct proc_dir_entry *ent;
ent = proc_create(&parent,name,
(S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1);
if (ent) {
ent->data = kmalloc((ent->size=strlen(dest))+1, GFP_KERNEL);
if (!ent->data) {
if (ent->data) {
strcpy((char*)ent->data,dest);
proc_register(parent, ent);
} else {
kfree(ent);
ent = NULL;
goto out;
}
strcpy((char*)ent->data,dest);
proc_register(parent, ent);
out:
}
return ent;
}
struct proc_dir_entry *proc_mknod(const char *name, mode_t mode,
struct proc_dir_entry *parent, kdev_t rdev)
{
struct proc_dir_entry *ent = NULL;
const char *fn = name;
int len;
if (!parent && xlate_proc_name(name, &parent, &fn) != 0)
goto out;
len = strlen(fn);
struct proc_dir_entry *ent;
ent = kmalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL);
if (!ent)
goto out;
memset(ent, 0, sizeof(struct proc_dir_entry));
memcpy(((char *) ent) + sizeof(*ent), fn, len + 1);
ent->name = ((char *) ent) + sizeof(*ent);
ent->namelen = len;
ent->nlink = 1;
ent->mode = mode;
ent = proc_create(&parent,name,mode,1);
if (ent) {
ent->rdev = rdev;
proc_register(parent, ent);
out:
}
return ent;
}
struct proc_dir_entry *proc_mkdir(const char *name, struct proc_dir_entry *parent)
{
struct proc_dir_entry *ent = NULL;
const char *fn = name;
int len;
struct proc_dir_entry *ent;
if (!parent && xlate_proc_name(name, &parent, &fn) != 0)
goto out;
len = strlen(fn);
ent = kmalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL);
if (!ent)
goto out;
memset(ent, 0, sizeof(struct proc_dir_entry));
memcpy(((char *) ent) + sizeof(*ent), fn, len + 1);
ent->name = ((char *) ent) + sizeof(*ent);
ent->namelen = len;
ent = proc_create(&parent,name,
(S_IFDIR | S_IRUGO | S_IXUGO),2);
if (ent) {
ent->proc_fops = &proc_dir_operations;
ent->proc_iops = &proc_dir_inode_operations;
ent->nlink = 2;
ent->mode = S_IFDIR | S_IRUGO | S_IXUGO;
proc_register(parent, ent);
out:
}
return ent;
}
struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
struct proc_dir_entry *parent)
{
struct proc_dir_entry *ent = NULL;
const char *fn = name;
int len;
if (!parent && xlate_proc_name(name, &parent, &fn) != 0)
goto out;
len = strlen(fn);
ent = kmalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL);
if (!ent)
goto out;
memset(ent, 0, sizeof(struct proc_dir_entry));
memcpy(((char *) ent) + sizeof(*ent), fn, len + 1);
ent->name = ((char *) ent) + sizeof(*ent);
ent->namelen = len;
struct proc_dir_entry *ent;
nlink_t nlink;
if (S_ISDIR(mode)) {
if ((mode & S_IALLUGO) == 0)
mode |= S_IRUGO | S_IXUGO;
ent->proc_fops = &proc_dir_operations;
ent->proc_iops = &proc_dir_inode_operations;
ent->nlink = 2;
nlink = 2;
} else {
if ((mode & S_IFMT) == 0)
mode |= S_IFREG;
if ((mode & S_IALLUGO) == 0)
mode |= S_IRUGO;
ent->nlink = 1;
nlink = 1;
}
ent->mode = mode;
ent = proc_create(&parent,name,mode,nlink);
if (ent) {
if (S_ISDIR(mode)) {
ent->proc_fops = &proc_dir_operations;
ent->proc_iops = &proc_dir_inode_operations;
}
proc_register(parent, ent);
out:
}
return ent;
}
......
......@@ -282,6 +282,21 @@ struct file_system_type *get_fs_type(const char *name)
static LIST_HEAD(vfsmntlist);
struct vfsmount *alloc_vfsmnt(void)
{
struct vfsmount *mnt = kmalloc(sizeof(struct vfsmount), GFP_KERNEL);
if (mnt) {
memset(mnt, 0, sizeof(struct vfsmount));
atomic_set(&mnt->mnt_count,1);
INIT_LIST_HEAD(&mnt->mnt_clash);
INIT_LIST_HEAD(&mnt->mnt_child);
INIT_LIST_HEAD(&mnt->mnt_mounts);
INIT_LIST_HEAD(&mnt->mnt_list);
mnt->mnt_owner = current->uid;
}
return mnt;
}
static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd)
{
old_nd->dentry = mnt->mnt_mountpoint;
......@@ -314,13 +329,6 @@ static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd)
* Potential reason for failure (aside of trivial lack of memory) is a
* deleted mountpoint. Caller must hold ->i_zombie on mountpoint
* dentry (if any).
*
* Node is marked as MNT_VISIBLE (visible in /proc/mounts) unless both
* @nd and @devname are %NULL. It works since we pass non-%NULL @devname
* when we are mounting root and kern_mount() filesystems are deviceless.
* If we will get a kern_mount() filesystem with nontrivial @devname we
* will have to pass the visibility flag explicitly, so if we will add
* support for such beasts we'll have to change prototype.
*/
static struct vfsmount *add_vfsmnt(struct nameidata *nd,
......@@ -331,13 +339,9 @@ static struct vfsmount *add_vfsmnt(struct nameidata *nd,
struct super_block *sb = root->d_inode->i_sb;
char *name;
mnt = kmalloc(sizeof(struct vfsmount), GFP_KERNEL);
mnt = alloc_vfsmnt();
if (!mnt)
goto out;
memset(mnt, 0, sizeof(struct vfsmount));
if (nd || dev_name)
mnt->mnt_flags = MNT_VISIBLE;
/* It may be NULL, but who cares? */
if (dev_name) {
......@@ -347,8 +351,6 @@ static struct vfsmount *add_vfsmnt(struct nameidata *nd,
mnt->mnt_devname = name;
}
}
mnt->mnt_owner = current->uid;
atomic_set(&mnt->mnt_count,1);
mnt->mnt_sb = sb;
spin_lock(&dcache_lock);
......@@ -361,13 +363,12 @@ static struct vfsmount *add_vfsmnt(struct nameidata *nd,
} else {
mnt->mnt_mountpoint = mnt->mnt_root;
mnt->mnt_parent = mnt;
INIT_LIST_HEAD(&mnt->mnt_child);
INIT_LIST_HEAD(&mnt->mnt_clash);
}
INIT_LIST_HEAD(&mnt->mnt_mounts);
list_add(&mnt->mnt_instances, &sb->s_mounts);
list_add(&mnt->mnt_list, vfsmntlist.prev);
spin_unlock(&dcache_lock);
if (sb->s_type->fs_flags & FS_SINGLE)
get_filesystem(sb->s_type);
out:
return mnt;
fail:
......@@ -500,8 +501,6 @@ int get_filesystem_info( char *buf )
for (p = vfsmntlist.next; p != &vfsmntlist; p = p->next) {
struct vfsmount *tmp = list_entry(p, struct vfsmount, mnt_list);
if (!(tmp->mnt_flags & MNT_VISIBLE))
continue;
path = d_path(tmp->mnt_root, tmp, buffer, PAGE_SIZE);
if (!path)
continue;
......@@ -855,7 +854,6 @@ static struct super_block *get_sb_single(struct file_system_type *fs_type,
sb = fs_type->kern_mnt->mnt_sb;
if (!sb)
BUG();
get_filesystem(fs_type);
do_remount_sb(sb, flags, data);
return sb;
}
......@@ -947,21 +945,31 @@ static int do_remount_sb(struct super_block *sb, int flags, char *data)
struct vfsmount *kern_mount(struct file_system_type *type)
{
kdev_t dev = get_unnamed_dev();
struct super_block *sb;
struct vfsmount *mnt;
if (!dev)
struct vfsmount *mnt = alloc_vfsmnt();
kdev_t dev;
if (!mnt)
return ERR_PTR(-ENOMEM);
dev = get_unnamed_dev();
if (!dev) {
kfree(mnt);
return ERR_PTR(-EMFILE);
}
sb = read_super(dev, NULL, type, 0, NULL, 0);
if (!sb) {
put_unnamed_dev(dev);
kfree(mnt);
return ERR_PTR(-EINVAL);
}
mnt = add_vfsmnt(NULL, sb->s_root, NULL);
if (!mnt) {
kill_super(sb);
return ERR_PTR(-ENOMEM);
}
mnt->mnt_sb = sb;
mnt->mnt_root = dget(sb->s_root);
mnt->mnt_mountpoint = mnt->mnt_root;
mnt->mnt_parent = mnt;
spin_lock(&dcache_lock);
list_add(&mnt->mnt_instances, &sb->s_mounts);
spin_unlock(&dcache_lock);
type->kern_mnt = mnt;
return mnt;
}
......@@ -1158,8 +1166,6 @@ static int do_loopback(char *old_name, char *new_name)
goto out2;
err = -ENOMEM;
if (old_nd.mnt->mnt_sb->s_type->fs_flags & FS_SINGLE)
get_filesystem(old_nd.mnt->mnt_sb->s_type);
down(&mount_sem);
/* there we go */
......@@ -1170,8 +1176,6 @@ static int do_loopback(char *old_name, char *new_name)
err = 0;
up(&new_nd.dentry->d_inode->i_zombie);
up(&mount_sem);
if (err && old_nd.mnt->mnt_sb->s_type->fs_flags & FS_SINGLE)
put_filesystem(old_nd.mnt->mnt_sb->s_type);
out2:
path_release(&new_nd);
out1:
......@@ -1362,8 +1366,6 @@ long do_mount(char * dev_name, char * dir_name, char *type_page,
return retval;
fail:
if (fstype->fs_flags & FS_SINGLE)
put_filesystem(fstype);
kill_super(sb);
goto unlock_out;
}
......
......@@ -12,8 +12,6 @@
#define _LINUX_MOUNT_H
#ifdef __KERNEL__
#define MNT_VISIBLE 1
struct vfsmount
{
struct dentry *mnt_mountpoint; /* dentry of mountpoint */
......
......@@ -201,13 +201,21 @@
/* Power Management Registers */
#define PCI_PM_PMC 2 /* PM Capabilities Register */
#define PCI_PM_CAP_VER_MASK 0x0007 /* Version */
#define PCI_PM_CAP_PME_CLOCK 0x0008 /* PME clock required */
#define PCI_PM_CAP_AUX_POWER 0x0010 /* Auxilliary power support */
#define PCI_PM_CAP_RESERVED 0x0010 /* Reserved field */
#define PCI_PM_CAP_DSI 0x0020 /* Device specific initialization */
#define PCI_PM_CAP_AUX_POWER 0x01C0 /* Auxilliary power support mask */
#define PCI_PM_CAP_D1 0x0200 /* D1 power state support */
#define PCI_PM_CAP_D2 0x0400 /* D2 power state support */
#define PCI_PM_CAP_PME 0x0800 /* PME pin supported */
#define PCI_PM_CAP_PME_MASK 0xF800 /* PME Mask of all supported states */
#define PCI_PM_CAP_PME_D0 0x0800 /* PME# from D0 */
#define PCI_PM_CAP_PME_D1 0x1000 /* PME# from D1 */
#define PCI_PM_CAP_PME_D2 0x2000 /* PME# from D2 */
#define PCI_PM_CAP_PME_D3 0x4000 /* PME# from D3 (hot) */
#define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */
#define PCI_PM_CTRL 4 /* PM control and status register */
#define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */
#define PCI_PM_CTRL_PME_ENABLE 0x0100 /* PME pin enable */
......
......@@ -159,6 +159,19 @@ void kunmap_high(struct page *page)
spin_unlock(&kmap_lock);
}
#define POOL_SIZE 32
/*
* This lock gets no contention at all, normally.
*/
static spinlock_t emergency_lock = SPIN_LOCK_UNLOCKED;
int nr_emergency_pages;
static LIST_HEAD(emergency_pages);
int nr_emergency_bhs;
static LIST_HEAD(emergency_bhs);
/*
* Simple bounce buffer support for highmem pages.
* This will be moved to the block layer in 2.5.
......@@ -203,17 +216,72 @@ static inline void copy_to_high_bh_irq (struct buffer_head *to,
static inline void bounce_end_io (struct buffer_head *bh, int uptodate)
{
struct page *page;
struct buffer_head *bh_orig = (struct buffer_head *)(bh->b_private);
unsigned long flags;
bh_orig->b_end_io(bh_orig, uptodate);
__free_page(bh->b_page);
page = bh->b_page;
spin_lock_irqsave(&emergency_lock, flags);
if (nr_emergency_pages >= POOL_SIZE)
__free_page(page);
else {
/*
* We are abusing page->list to manage
* the highmem emergency pool:
*/
list_add(&page->list, &emergency_pages);
nr_emergency_pages++;
}
if (nr_emergency_bhs >= POOL_SIZE) {
#ifdef HIGHMEM_DEBUG
/* Don't clobber the constructed slab cache */
init_waitqueue_head(&bh->b_wait);
#endif
kmem_cache_free(bh_cachep, bh);
} else {
/*
* Ditto in the bh case, here we abuse b_inode_buffers:
*/
list_add(&bh->b_inode_buffers, &emergency_bhs);
nr_emergency_bhs++;
}
spin_unlock_irqrestore(&emergency_lock, flags);
}
static __init int init_emergency_pool(void)
{
spin_lock_irq(&emergency_lock);
while (nr_emergency_pages < POOL_SIZE) {
struct page * page = alloc_page(GFP_ATOMIC);
if (!page) {
printk("couldn't refill highmem emergency pages");
break;
}
list_add(&page->list, &emergency_pages);
nr_emergency_pages++;
}
while (nr_emergency_bhs < POOL_SIZE) {
struct buffer_head * bh = kmem_cache_alloc(bh_cachep, SLAB_ATOMIC);
if (!bh) {
printk("couldn't refill highmem emergency bhs");
break;
}
list_add(&bh->b_inode_buffers, &emergency_bhs);
nr_emergency_bhs++;
}
spin_unlock_irq(&emergency_lock);
printk("allocated %d pages and %d bhs reserved for the highmem bounces\n",
nr_emergency_pages, nr_emergency_bhs);
return 0;
}
__initcall(init_emergency_pool);
static void bounce_end_io_write (struct buffer_head *bh, int uptodate)
{
bounce_end_io(bh, uptodate);
......@@ -228,6 +296,82 @@ static void bounce_end_io_read (struct buffer_head *bh, int uptodate)
bounce_end_io(bh, uptodate);
}
struct page *alloc_bounce_page (void)
{
struct list_head *tmp;
struct page *page;
repeat_alloc:
page = alloc_page(GFP_BUFFER);
if (page)
return page;
/*
* No luck. First, kick the VM so it doesnt idle around while
* we are using up our emergency rations.
*/
wakeup_bdflush(0);
/*
* Try to allocate from the emergency pool.
*/
tmp = &emergency_pages;
spin_lock_irq(&emergency_lock);
if (!list_empty(tmp)) {
page = list_entry(tmp->next, struct page, list);
list_del(tmp->next);
nr_emergency_pages--;
}
spin_unlock_irq(&emergency_lock);
if (page)
return page;
/* we need to wait I/O completion */
run_task_queue(&tq_disk);
current->policy |= SCHED_YIELD;
__set_current_state(TASK_RUNNING);
schedule();
goto repeat_alloc;
}
struct buffer_head *alloc_bounce_bh (void)
{
struct list_head *tmp;
struct buffer_head *bh;
repeat_alloc:
bh = kmem_cache_alloc(bh_cachep, SLAB_BUFFER);
if (bh)
return bh;
/*
* No luck. First, kick the VM so it doesnt idle around while
* we are using up our emergency rations.
*/
wakeup_bdflush(0);
/*
* Try to allocate from the emergency pool.
*/
tmp = &emergency_bhs;
spin_lock_irq(&emergency_lock);
if (!list_empty(tmp)) {
bh = list_entry(tmp->next, struct buffer_head, b_inode_buffers);
list_del(tmp->next);
nr_emergency_bhs--;
}
spin_unlock_irq(&emergency_lock);
if (bh)
return bh;
/* we need to wait I/O completion */
run_task_queue(&tq_disk);
current->policy |= SCHED_YIELD;
__set_current_state(TASK_RUNNING);
schedule();
goto repeat_alloc;
}
struct buffer_head * create_bounce(int rw, struct buffer_head * bh_orig)
{
struct page *page;
......@@ -236,24 +380,15 @@ struct buffer_head * create_bounce(int rw, struct buffer_head * bh_orig)
if (!PageHighMem(bh_orig->b_page))
return bh_orig;
repeat_bh:
bh = kmem_cache_alloc(bh_cachep, SLAB_BUFFER);
if (!bh) {
wakeup_bdflush(1); /* Sets task->state to TASK_RUNNING */
goto repeat_bh;
}
bh = alloc_bounce_bh();
/*
* This is wasteful for 1k buffers, but this is a stopgap measure
* and we are being ineffective anyway. This approach simplifies
* things immensly. On boxes with more than 4GB RAM this should
* not be an issue anyway.
*/
repeat_page:
page = alloc_page(GFP_BUFFER);
if (!page) {
wakeup_bdflush(1); /* Sets task->state to TASK_RUNNING */
goto repeat_page;
}
page = alloc_bounce_page();
set_bh_page(bh, page, 0);
bh->b_next = NULL;
......
......@@ -251,10 +251,10 @@ static struct page * __alloc_pages_limit(zonelist_t *zonelist,
water_mark = z->pages_high;
}
if (z->free_pages + z->inactive_clean_pages > water_mark) {
if (z->free_pages + z->inactive_clean_pages >= water_mark) {
struct page *page = NULL;
/* If possible, reclaim a page directly. */
if (direct_reclaim && z->free_pages < z->pages_min + 8)
if (direct_reclaim)
page = reclaim_page(z);
/* If that fails, fall back to rmqueue. */
if (!page)
......@@ -299,21 +299,6 @@ struct page * __alloc_pages(zonelist_t *zonelist, unsigned long order)
if (order == 0 && (gfp_mask & __GFP_WAIT))
direct_reclaim = 1;
/*
* If we are about to get low on free pages and we also have
* an inactive page shortage, wake up kswapd.
*/
if (inactive_shortage() > inactive_target / 2 && free_shortage())
wakeup_kswapd();
/*
* If we are about to get low on free pages and cleaning
* the inactive_dirty pages would fix the situation,
* wake up bdflush.
*/
else if (free_shortage() && nr_inactive_dirty_pages > free_shortage()
&& nr_inactive_dirty_pages >= freepages.high)
wakeup_bdflush(0);
try_again:
/*
* First, see if we have any zones with lots of free memory.
......
......@@ -871,8 +871,11 @@ static int do_try_to_free_pages(unsigned int gfp_mask, int user)
* before we get around to moving them to the other
* list, so this is a relatively cheap operation.
*/
if (free_shortage())
if (free_shortage()) {
ret += page_launder(gfp_mask, user);
shrink_dcache_memory(DEF_PRIORITY, gfp_mask);
shrink_icache_memory(DEF_PRIORITY, gfp_mask);
}
/*
* If needed, we move pages from the active list
......@@ -882,23 +885,9 @@ static int do_try_to_free_pages(unsigned int gfp_mask, int user)
ret += refill_inactive(gfp_mask, user);
/*
* Delete pages from the inode and dentry caches and
* reclaim unused slab cache if memory is low.
*/
if (free_shortage()) {
shrink_dcache_memory(DEF_PRIORITY, gfp_mask);
shrink_icache_memory(DEF_PRIORITY, gfp_mask);
} else {
/*
* Illogical, but true. At least for now.
*
* If we're _not_ under shortage any more, we
* reap the caches. Why? Because a noticeable
* part of the caches are the buffer-heads,
* which we'll want to keep if under shortage.
* Reclaim unused slab cache if memory is low.
*/
kmem_cache_reap(gfp_mask);
}
return ret;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment