Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
0e1e2d82
Commit
0e1e2d82
authored
Sep 10, 2002
by
Anton Blanchard
Browse files
Options
Browse Files
Download
Plain Diff
Merge samba.org:/scratch/anton/linux-2.5
into samba.org:/scratch/anton/linux-2.5_bar
parents
b2a5f08a
09589177
Changes
50
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
50 changed files
with
866 additions
and
404 deletions
+866
-404
arch/i386/kernel/init_task.c
arch/i386/kernel/init_task.c
+1
-1
arch/i386/pci/fixup.c
arch/i386/pci/fixup.c
+17
-0
arch/ppc/mm/fault.c
arch/ppc/mm/fault.c
+1
-1
arch/sparc/mm/fault.c
arch/sparc/mm/fault.c
+1
-1
drivers/block/loop.c
drivers/block/loop.c
+3
-2
drivers/block/rd.c
drivers/block/rd.c
+13
-6
drivers/pci/probe.c
drivers/pci/probe.c
+11
-15
drivers/pci/quirks.c
drivers/pci/quirks.c
+12
-0
drivers/pci/setup-res.c
drivers/pci/setup-res.c
+1
-1
drivers/scsi/qlogicfc.c
drivers/scsi/qlogicfc.c
+2
-15
drivers/scsi/qlogicfc.h
drivers/scsi/qlogicfc.h
+1
-1
drivers/scsi/st.c
drivers/scsi/st.c
+7
-7
fs/affs/file.c
fs/affs/file.c
+9
-1
fs/buffer.c
fs/buffer.c
+41
-32
fs/driverfs/inode.c
fs/driverfs/inode.c
+8
-5
fs/exec.c
fs/exec.c
+10
-39
fs/ext2/dir.c
fs/ext2/dir.c
+5
-6
fs/ext3/inode.c
fs/ext3/inode.c
+6
-24
fs/fat/inode.c
fs/fat/inode.c
+15
-2
fs/jffs/inode-v23.c
fs/jffs/inode-v23.c
+7
-4
fs/jffs2/file.c
fs/jffs2/file.c
+3
-1
fs/jfs/jfs_metapage.c
fs/jfs/jfs_metapage.c
+0
-1
fs/minix/dir.c
fs/minix/dir.c
+7
-5
fs/namei.c
fs/namei.c
+2
-1
fs/partitions/check.c
fs/partitions/check.c
+1
-2
fs/ramfs/inode.c
fs/ramfs/inode.c
+9
-5
fs/reiserfs/inode.c
fs/reiserfs/inode.c
+13
-9
fs/reiserfs/stree.c
fs/reiserfs/stree.c
+3
-3
fs/reiserfs/tail_conversion.c
fs/reiserfs/tail_conversion.c
+3
-2
fs/sysv/dir.c
fs/sysv/dir.c
+3
-0
include/asm-i386/highmem.h
include/asm-i386/highmem.h
+3
-3
include/asm-i386/spinlock.h
include/asm-i386/spinlock.h
+2
-0
include/asm-i386/tlbflush.h
include/asm-i386/tlbflush.h
+14
-4
include/asm-ppc/hardirq.h
include/asm-ppc/hardirq.h
+2
-0
include/asm-ppc/highmem.h
include/asm-ppc/highmem.h
+5
-1
include/asm-sparc/hardirq.h
include/asm-sparc/hardirq.h
+6
-0
include/asm-sparc/highmem.h
include/asm-sparc/highmem.h
+5
-1
include/linux/highmem.h
include/linux/highmem.h
+2
-2
include/linux/init_task.h
include/linux/init_task.h
+3
-2
include/linux/pci.h
include/linux/pci.h
+9
-1
include/linux/preempt.h
include/linux/preempt.h
+11
-17
include/linux/sched.h
include/linux/sched.h
+34
-19
init/do_mounts.c
init/do_mounts.c
+19
-8
kernel/exit.c
kernel/exit.c
+12
-13
kernel/fork.c
kernel/fork.c
+11
-0
kernel/sched.c
kernel/sched.c
+9
-0
kernel/signal.c
kernel/signal.c
+415
-122
mm/filemap.c
mm/filemap.c
+83
-12
mm/page_alloc.c
mm/page_alloc.c
+4
-5
mm/vmscan.c
mm/vmscan.c
+2
-2
No files found.
arch/i386/kernel/init_task.c
View file @
0e1e2d82
...
...
@@ -10,7 +10,7 @@
static
struct
fs_struct
init_fs
=
INIT_FS
;
static
struct
files_struct
init_files
=
INIT_FILES
;
static
struct
signal_struct
init_signals
=
INIT_SIGNALS
;
static
struct
signal_struct
init_signals
=
INIT_SIGNALS
(
init_signals
)
;
struct
mm_struct
init_mm
=
INIT_MM
(
init_mm
);
/*
...
...
arch/i386/pci/fixup.c
View file @
0e1e2d82
...
...
@@ -166,6 +166,22 @@ static void __init pci_fixup_via_northbridge_bug(struct pci_dev *d)
}
}
/*
* For some reasons Intel decided that certain parts of their
* 815, 845 and some other chipsets must look like PCI-to-PCI bridges
* while they are obviously not. The 82801 family (AA, AB, BAM/CAM,
* BA/CA/DB and E) PCI bridges are actually HUB-to-PCI ones, according
* to Intel terminology. These devices do forward all addresses from
* system to PCI bus no matter what are their window settings, so they are
* "transparent" (or subtractive decoding) from programmers point of view.
*/
static
void
__init
pci_fixup_transparent_bridge
(
struct
pci_dev
*
dev
)
{
if
((
dev
->
class
>>
8
)
==
PCI_CLASS_BRIDGE_PCI
&&
(
dev
->
device
&
0xff00
)
==
0x2400
)
dev
->
transparent
=
1
;
}
struct
pci_fixup
pcibios_fixups
[]
=
{
{
PCI_FIXUP_HEADER
,
PCI_VENDOR_ID_INTEL
,
PCI_DEVICE_ID_INTEL_82451NX
,
pci_fixup_i450nx
},
{
PCI_FIXUP_HEADER
,
PCI_VENDOR_ID_INTEL
,
PCI_DEVICE_ID_INTEL_82454GX
,
pci_fixup_i450gx
},
...
...
@@ -183,5 +199,6 @@ struct pci_fixup pcibios_fixups[] = {
{
PCI_FIXUP_HEADER
,
PCI_VENDOR_ID_VIA
,
PCI_DEVICE_ID_VIA_8361
,
pci_fixup_via_northbridge_bug
},
{
PCI_FIXUP_HEADER
,
PCI_VENDOR_ID_VIA
,
PCI_DEVICE_ID_VIA_8367_0
,
pci_fixup_via_northbridge_bug
},
{
PCI_FIXUP_HEADER
,
PCI_VENDOR_ID_NCR
,
PCI_DEVICE_ID_NCR_53C810
,
pci_fixup_ncr53c810
},
{
PCI_FIXUP_HEADER
,
PCI_VENDOR_ID_INTEL
,
PCI_ANY_ID
,
pci_fixup_transparent_bridge
},
{
0
}
};
arch/ppc/mm/fault.c
View file @
0e1e2d82
...
...
@@ -102,7 +102,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
#endif
/* !CONFIG_4xx */
#endif
/* CONFIG_XMON || CONFIG_KGDB */
if
(
in_
interrupt
()
||
mm
==
NULL
)
{
if
(
in_
atomic
()
||
mm
==
NULL
)
{
bad_page_fault
(
regs
,
address
,
SIGSEGV
);
return
;
}
...
...
arch/sparc/mm/fault.c
View file @
0e1e2d82
...
...
@@ -233,7 +233,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if
(
in_
interrupt
()
||
!
mm
)
if
(
in_
atomic
()
||
!
mm
)
goto
no_context
;
down_read
(
&
mm
->
mmap_sem
);
...
...
drivers/block/loop.c
View file @
0e1e2d82
...
...
@@ -210,8 +210,7 @@ do_lo_send(struct loop_device *lo, struct bio_vec *bvec, int bsize, loff_t pos)
goto
fail
;
if
(
aops
->
prepare_write
(
file
,
page
,
offset
,
offset
+
size
))
goto
unlock
;
kaddr
=
page_address
(
page
);
flush_dcache_page
(
page
);
kaddr
=
kmap
(
page
);
transfer_result
=
lo_do_transfer
(
lo
,
WRITE
,
kaddr
+
offset
,
data
,
size
,
IV
);
if
(
transfer_result
)
{
/*
...
...
@@ -221,6 +220,8 @@ do_lo_send(struct loop_device *lo, struct bio_vec *bvec, int bsize, loff_t pos)
printk
(
KERN_ERR
"loop: transfer error block %ld
\n
"
,
index
);
memset
(
kaddr
+
offset
,
0
,
size
);
}
flush_dcache_page
(
page
);
kunmap
(
page
);
if
(
aops
->
commit_write
(
file
,
page
,
offset
,
offset
+
size
))
goto
unlock
;
if
(
transfer_result
)
...
...
drivers/block/rd.c
View file @
0e1e2d82
...
...
@@ -109,9 +109,11 @@ int rd_blocksize = BLOCK_SIZE; /* blocksize of the RAM disks */
static
int
ramdisk_readpage
(
struct
file
*
file
,
struct
page
*
page
)
{
if
(
!
PageUptodate
(
page
))
{
memset
(
kmap
(
page
),
0
,
PAGE_CACHE_SIZE
);
kunmap
(
page
);
void
*
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
memset
(
kaddr
,
0
,
PAGE_CACHE_SIZE
);
flush_dcache_page
(
page
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
SetPageUptodate
(
page
);
}
unlock_page
(
page
);
...
...
@@ -121,9 +123,11 @@ static int ramdisk_readpage(struct file *file, struct page * page)
static
int
ramdisk_prepare_write
(
struct
file
*
file
,
struct
page
*
page
,
unsigned
offset
,
unsigned
to
)
{
if
(
!
PageUptodate
(
page
))
{
void
*
addr
=
page_address
(
page
);
memset
(
addr
,
0
,
PAGE_CACHE_SIZE
);
void
*
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
memset
(
kaddr
,
0
,
PAGE_CACHE_SIZE
);
flush_dcache_page
(
page
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
SetPageUptodate
(
page
);
}
SetPageDirty
(
page
);
...
...
@@ -178,8 +182,11 @@ static int rd_blkdev_pagecache_IO(int rw, struct bio_vec *vec,
err
=
0
;
if
(
!
PageUptodate
(
page
))
{
memset
(
kmap
(
page
),
0
,
PAGE_CACHE_SIZE
);
kunmap
(
page
);
void
*
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
memset
(
kaddr
,
0
,
PAGE_CACHE_SIZE
);
flush_dcache_page
(
page
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
SetPageUptodate
(
page
);
}
...
...
drivers/pci/probe.c
View file @
0e1e2d82
...
...
@@ -128,6 +128,13 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
if
(
!
dev
)
/* It's a host bus, nothing to read */
return
;
if
(
dev
->
transparent
)
{
printk
(
"Transparent bridge - %s
\n
"
,
dev
->
name
);
for
(
i
=
0
;
i
<
PCI_BUS_NUM_RESOURCES
;
i
++
)
child
->
resource
[
i
]
=
child
->
parent
->
resource
[
i
];
return
;
}
for
(
i
=
0
;
i
<
3
;
i
++
)
child
->
resource
[
i
]
=
&
dev
->
resource
[
PCI_BRIDGE_RESOURCES
+
i
];
...
...
@@ -149,13 +156,6 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
res
->
flags
=
(
io_base_lo
&
PCI_IO_RANGE_TYPE_MASK
)
|
IORESOURCE_IO
;
res
->
start
=
base
;
res
->
end
=
limit
+
0xfff
;
}
else
{
/*
* Ugh. We don't know enough about this bridge. Just assume
* that it's entirely transparent.
*/
printk
(
KERN_ERR
"Unknown bridge resource %d: assuming transparent
\n
"
,
0
);
child
->
resource
[
0
]
=
child
->
parent
->
resource
[
0
];
}
res
=
child
->
resource
[
1
];
...
...
@@ -167,10 +167,6 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
res
->
flags
=
(
mem_base_lo
&
PCI_MEMORY_RANGE_TYPE_MASK
)
|
IORESOURCE_MEM
;
res
->
start
=
base
;
res
->
end
=
limit
+
0xfffff
;
}
else
{
/* See comment above. Same thing */
printk
(
KERN_ERR
"Unknown bridge resource %d: assuming transparent
\n
"
,
1
);
child
->
resource
[
1
]
=
child
->
parent
->
resource
[
1
];
}
res
=
child
->
resource
[
2
];
...
...
@@ -197,10 +193,6 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
res
->
flags
=
(
mem_base_lo
&
PCI_MEMORY_RANGE_TYPE_MASK
)
|
IORESOURCE_MEM
|
IORESOURCE_PREFETCH
;
res
->
start
=
base
;
res
->
end
=
limit
+
0xfffff
;
}
else
{
/* See comments above */
printk
(
KERN_ERR
"Unknown bridge resource %d: assuming transparent
\n
"
,
2
);
child
->
resource
[
2
]
=
child
->
parent
->
resource
[
2
];
}
}
...
...
@@ -389,6 +381,10 @@ int pci_setup_device(struct pci_dev * dev)
case
PCI_HEADER_TYPE_BRIDGE
:
/* bridge header */
if
(
class
!=
PCI_CLASS_BRIDGE_PCI
)
goto
bad
;
/* The PCI-to-PCI bridge spec requires that subtractive
decoding (i.e. transparent) bridge must have programming
interface code of 0x01. */
dev
->
transparent
=
((
class
&
0xff
)
==
1
);
pci_read_bases
(
dev
,
2
,
PCI_ROM_ADDRESS1
);
break
;
...
...
drivers/pci/quirks.c
View file @
0e1e2d82
...
...
@@ -471,6 +471,11 @@ static void __init quirk_dunord ( struct pci_dev * dev )
r
->
end
=
0xffffff
;
}
static
void
__init
quirk_transparent_bridge
(
struct
pci_dev
*
dev
)
{
dev
->
transparent
=
1
;
}
/*
* The main table of quirks.
*/
...
...
@@ -525,6 +530,13 @@ static struct pci_fixup pci_fixups[] __initdata = {
{
PCI_FIXUP_FINAL
,
PCI_VENDOR_ID_AMD
,
PCI_DEVICE_ID_AMD_VIPER_7410
,
quirk_amd_ioapic
},
{
PCI_FIXUP_FINAL
,
PCI_VENDOR_ID_AMD
,
PCI_DEVICE_ID_AMD_FE_GATE_700C
,
quirk_amd_ordering
},
/*
* i82380FB mobile docking controller: its PCI-to-PCI bridge
* is subtractive decoding (transparent), and does indicate this
* in the ProgIf. Unfortunately, the ProgIf value is wrong - 0x80
* instead of 0x01.
*/
{
PCI_FIXUP_HEADER
,
PCI_VENDOR_ID_INTEL
,
PCI_DEVICE_ID_INTEL_82380FB
,
quirk_transparent_bridge
},
{
0
}
};
...
...
drivers/pci/setup-res.c
View file @
0e1e2d82
...
...
@@ -73,7 +73,7 @@ static int pci_assign_bus_resource(const struct pci_bus *bus,
int
i
;
type_mask
|=
IORESOURCE_IO
|
IORESOURCE_MEM
;
for
(
i
=
0
;
i
<
4
;
i
++
)
{
for
(
i
=
0
;
i
<
PCI_BUS_NUM_RESOURCES
;
i
++
)
{
struct
resource
*
r
=
bus
->
resource
[
i
];
if
(
!
r
)
continue
;
...
...
drivers/scsi/qlogicfc.c
View file @
0e1e2d82
...
...
@@ -1342,18 +1342,11 @@ int isp2x00_queuecommand(Scsi_Cmnd * Cmnd, void (*done) (Scsi_Cmnd *))
num_free
=
QLOGICFC_REQ_QUEUE_LEN
-
REQ_QUEUE_DEPTH
(
in_ptr
,
out_ptr
);
num_free
=
(
num_free
>
2
)
?
num_free
-
2
:
0
;
host
->
can_queue
=
hostdata
->
queued
+
num_free
;
host
->
can_queue
=
host
->
host_busy
+
num_free
;
if
(
host
->
can_queue
>
QLOGICFC_REQ_QUEUE_LEN
)
host
->
can_queue
=
QLOGICFC_REQ_QUEUE_LEN
;
host
->
sg_tablesize
=
QLOGICFC_MAX_SG
(
num_free
);
/* this is really gross */
if
(
host
->
can_queue
<=
host
->
host_busy
){
if
(
host
->
can_queue
+
2
<
host
->
host_busy
)
DEBUG
(
printk
(
"qlogicfc%d.c crosses its fingers.
\n
"
,
hostdata
->
host_id
));
host
->
can_queue
=
host
->
host_busy
+
1
;
}
LEAVE
(
"isp2x00_queuecommand"
);
return
0
;
...
...
@@ -1623,17 +1616,11 @@ void isp2x00_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
num_free
=
QLOGICFC_REQ_QUEUE_LEN
-
REQ_QUEUE_DEPTH
(
in_ptr
,
out_ptr
);
num_free
=
(
num_free
>
2
)
?
num_free
-
2
:
0
;
host
->
can_queue
=
hostdata
->
queued
+
num_free
;
host
->
can_queue
=
host
->
host_busy
+
num_free
;
if
(
host
->
can_queue
>
QLOGICFC_REQ_QUEUE_LEN
)
host
->
can_queue
=
QLOGICFC_REQ_QUEUE_LEN
;
host
->
sg_tablesize
=
QLOGICFC_MAX_SG
(
num_free
);
if
(
host
->
can_queue
<=
host
->
host_busy
){
if
(
host
->
can_queue
+
2
<
host
->
host_busy
)
DEBUG
(
printk
(
"qlogicfc%d : crosses its fingers.
\n
"
,
hostdata
->
host_id
));
host
->
can_queue
=
host
->
host_busy
+
1
;
}
outw
(
HCCR_CLEAR_RISC_INTR
,
host
->
io_port
+
HOST_HCCR
);
LEAVE_INTR
(
"isp2x00_intr_handler"
);
}
...
...
drivers/scsi/qlogicfc.h
View file @
0e1e2d82
...
...
@@ -65,7 +65,7 @@
#define DATASEGS_PER_COMMAND 2
#define DATASEGS_PER_CONT 5
#define QLOGICFC_REQ_QUEUE_LEN
127
/* must be power of two - 1 */
#define QLOGICFC_REQ_QUEUE_LEN
255
/* must be power of two - 1 */
#define QLOGICFC_MAX_SG(ql) (DATASEGS_PER_COMMAND + (((ql) > 0) ? DATASEGS_PER_CONT*((ql) - 1) : 0))
#define QLOGICFC_CMD_PER_LUN 8
...
...
drivers/scsi/st.c
View file @
0e1e2d82
...
...
@@ -184,7 +184,7 @@ static struct Scsi_Device_Template st_template = {
static
int
st_compression
(
Scsi_Tape
*
,
int
);
static
int
find_partition
(
Scsi_Tape
*
);
static
int
update
_partition
(
Scsi_Tape
*
);
static
int
switch
_partition
(
Scsi_Tape
*
);
static
int
st_int_ioctl
(
Scsi_Tape
*
,
unsigned
int
,
unsigned
long
);
...
...
@@ -1028,9 +1028,9 @@ static int st_flush(struct file *filp)
}
if
(
STp
->
can_partitions
&&
(
result2
=
update
_partition
(
STp
))
<
0
)
{
(
result2
=
switch
_partition
(
STp
))
<
0
)
{
DEBC
(
printk
(
ST_DEB_MSG
"st%d:
update
_partition at close failed.
\n
"
,
dev
));
"st%d:
switch
_partition at close failed.
\n
"
,
dev
));
if
(
result
==
0
)
result
=
result2
;
goto
out
;
...
...
@@ -1206,7 +1206,7 @@ static ssize_t rw_checks(Scsi_Tape *STp, struct file *filp, size_t count, loff_t
}
)
/* end DEB */
if
(
STp
->
can_partitions
&&
(
retval
=
update
_partition
(
STp
))
<
0
)
(
retval
=
switch
_partition
(
STp
))
<
0
)
goto
out
;
if
(
STp
->
block_size
==
0
&&
STp
->
max_block
>
0
&&
...
...
@@ -2904,7 +2904,7 @@ static int find_partition(Scsi_Tape *STp)
/* Change the partition if necessary */
static
int
update
_partition
(
Scsi_Tape
*
STp
)
static
int
switch
_partition
(
Scsi_Tape
*
STp
)
{
ST_partstat
*
STps
;
...
...
@@ -3239,7 +3239,7 @@ static int st_ioctl(struct inode *inode, struct file *file,
}
if
(
STp
->
can_partitions
&&
STp
->
ready
==
ST_READY
&&
(
i
=
update
_partition
(
STp
))
<
0
)
{
(
i
=
switch
_partition
(
STp
))
<
0
)
{
retval
=
i
;
goto
out
;
}
...
...
@@ -3260,7 +3260,7 @@ static int st_ioctl(struct inode *inode, struct file *file,
goto
out
;
}
if
(
STp
->
can_partitions
&&
(
i
=
update
_partition
(
STp
))
<
0
)
{
(
i
=
switch
_partition
(
STp
))
<
0
)
{
retval
=
i
;
goto
out
;
}
...
...
fs/affs/file.c
View file @
0e1e2d82
...
...
@@ -27,6 +27,7 @@
#include <linux/fs.h>
#include <linux/amigaffs.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
...
...
@@ -518,6 +519,7 @@ affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsign
pr_debug
(
"AFFS: read_page(%u, %ld, %d, %d)
\n
"
,
(
u32
)
inode
->
i_ino
,
page
->
index
,
from
,
to
);
if
(
from
>
to
||
to
>
PAGE_CACHE_SIZE
)
BUG
();
kmap
(
page
);
data
=
page_address
(
page
);
bsize
=
AFFS_SB
(
sb
)
->
s_data_blksize
;
tmp
=
(
page
->
index
<<
PAGE_CACHE_SHIFT
)
+
from
;
...
...
@@ -537,6 +539,8 @@ affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsign
from
+=
tmp
;
boff
=
0
;
}
flush_dcache_page
(
page
);
kunmap
(
page
);
return
0
;
}
...
...
@@ -656,7 +660,11 @@ static int affs_prepare_write_ofs(struct file *file, struct page *page, unsigned
return
err
;
}
if
(
to
<
PAGE_CACHE_SIZE
)
{
memset
(
page_address
(
page
)
+
to
,
0
,
PAGE_CACHE_SIZE
-
to
);
char
*
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
memset
(
kaddr
+
to
,
0
,
PAGE_CACHE_SIZE
-
to
);
flush_dcache_page
(
page
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
if
(
size
>
offset
+
to
)
{
if
(
size
<
offset
+
PAGE_CACHE_SIZE
)
tmp
=
size
&
~
PAGE_CACHE_MASK
;
...
...
fs/buffer.c
View file @
0e1e2d82
...
...
@@ -1784,6 +1784,7 @@ static int __block_write_full_page(struct inode *inode,
if
(
err
==
0
)
return
ret
;
return
err
;
recover:
/*
* ENOSPC, or some other error. We may already have added some
...
...
@@ -1795,7 +1796,8 @@ static int __block_write_full_page(struct inode *inode,
bh
=
head
;
/* Recovery: lock and submit the mapped buffers */
do
{
if
(
buffer_mapped
(
bh
))
{
get_bh
(
bh
);
if
(
buffer_mapped
(
bh
)
&&
buffer_dirty
(
bh
))
{
lock_buffer
(
bh
);
mark_buffer_async_write
(
bh
);
}
else
{
...
...
@@ -1805,21 +1807,21 @@ static int __block_write_full_page(struct inode *inode,
*/
clear_buffer_dirty
(
bh
);
}
bh
=
bh
->
b_this_page
;
}
while
(
bh
!=
head
);
}
while
((
bh
=
bh
->
b_this_page
)
!=
head
);
SetPageError
(
page
);
BUG_ON
(
PageWriteback
(
page
));
SetPageWriteback
(
page
);
unlock_page
(
page
);
do
{
struct
buffer_head
*
next
=
bh
->
b_this_page
;
if
(
buffer_async_write
(
bh
))
{
set_buffer_uptodate
(
bh
);
clear_buffer_dirty
(
bh
);
submit_bh
(
WRITE
,
bh
);
nr_underway
++
;
}
put_bh
(
bh
);
bh
=
next
;
}
while
(
bh
!=
head
);
BUG_ON
(
PageWriteback
(
page
));
SetPageWriteback
(
page
);
unlock_page
(
page
);
goto
done
;
}
...
...
@@ -1831,7 +1833,6 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
int
err
=
0
;
unsigned
blocksize
,
bbits
;
struct
buffer_head
*
bh
,
*
head
,
*
wait
[
2
],
**
wait_bh
=
wait
;
char
*
kaddr
=
kmap
(
page
);
BUG_ON
(
!
PageLocked
(
page
));
BUG_ON
(
from
>
PAGE_CACHE_SIZE
);
...
...
@@ -1872,13 +1873,19 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
set_buffer_uptodate
(
bh
);
continue
;
}
if
(
block_end
>
to
)
memset
(
kaddr
+
to
,
0
,
block_end
-
to
);
if
(
block_start
<
from
)
memset
(
kaddr
+
block_start
,
0
,
from
-
block_start
);
if
(
block_end
>
to
||
block_start
<
from
)
if
(
block_end
>
to
||
block_start
<
from
)
{
void
*
kaddr
;
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
if
(
block_end
>
to
)
memset
(
kaddr
+
to
,
0
,
block_end
-
to
);
if
(
block_start
<
from
)
memset
(
kaddr
+
block_start
,
0
,
from
-
block_start
);
flush_dcache_page
(
page
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
}
continue
;
}
}
...
...
@@ -1917,10 +1924,14 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
if
(
block_start
>=
to
)
break
;
if
(
buffer_new
(
bh
))
{
void
*
kaddr
;
clear_buffer_new
(
bh
);
if
(
buffer_uptodate
(
bh
))
buffer_error
();
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
memset
(
kaddr
+
block_start
,
0
,
bh
->
b_size
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
set_buffer_uptodate
(
bh
);
mark_buffer_dirty
(
bh
);
}
...
...
@@ -2006,9 +2017,10 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
SetPageError
(
page
);
}
if
(
!
buffer_mapped
(
bh
))
{
memset
(
kmap
(
page
)
+
i
*
blocksize
,
0
,
blocksize
);
void
*
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
memset
(
kaddr
+
i
*
blocksize
,
0
,
blocksize
);
flush_dcache_page
(
page
);
kunmap
(
page
);
kunmap
_atomic
(
kaddr
,
KM_USER0
);
set_buffer_uptodate
(
bh
);
continue
;
}
...
...
@@ -2116,7 +2128,7 @@ int cont_prepare_write(struct page *page, unsigned offset,
long
status
;
unsigned
zerofrom
;
unsigned
blocksize
=
1
<<
inode
->
i_blkbits
;
char
*
kaddr
;
void
*
kaddr
;
while
(
page
->
index
>
(
pgpos
=
*
bytes
>>
PAGE_CACHE_SHIFT
))
{
status
=
-
ENOMEM
;
...
...
@@ -2138,12 +2150,12 @@ int cont_prepare_write(struct page *page, unsigned offset,
PAGE_CACHE_SIZE
,
get_block
);
if
(
status
)
goto
out_unmap
;
kaddr
=
page_address
(
new_page
);
kaddr
=
kmap_atomic
(
new_page
,
KM_USER0
);
memset
(
kaddr
+
zerofrom
,
0
,
PAGE_CACHE_SIZE
-
zerofrom
);
flush_dcache_page
(
new_page
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
__block_commit_write
(
inode
,
new_page
,
zerofrom
,
PAGE_CACHE_SIZE
);
kunmap
(
new_page
);
unlock_page
(
new_page
);
page_cache_release
(
new_page
);
}
...
...
@@ -2168,21 +2180,20 @@ int cont_prepare_write(struct page *page, unsigned offset,
status
=
__block_prepare_write
(
inode
,
page
,
zerofrom
,
to
,
get_block
);
if
(
status
)
goto
out1
;
kaddr
=
page_address
(
page
);
if
(
zerofrom
<
offset
)
{
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
memset
(
kaddr
+
zerofrom
,
0
,
offset
-
zerofrom
);
flush_dcache_page
(
page
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
__block_commit_write
(
inode
,
page
,
zerofrom
,
offset
);
}
return
0
;
out1:
ClearPageUptodate
(
page
);
kunmap
(
page
);
return
status
;
out_unmap:
ClearPageUptodate
(
new_page
);
kunmap
(
new_page
);
unlock_page
(
new_page
);
page_cache_release
(
new_page
);
out:
...
...
@@ -2194,10 +2205,8 @@ int block_prepare_write(struct page *page, unsigned from, unsigned to,
{
struct
inode
*
inode
=
page
->
mapping
->
host
;
int
err
=
__block_prepare_write
(
inode
,
page
,
from
,
to
,
get_block
);
if
(
err
)
{
if
(
err
)
ClearPageUptodate
(
page
);
kunmap
(
page
);
}
return
err
;
}
...
...
@@ -2205,7 +2214,6 @@ int block_commit_write(struct page *page, unsigned from, unsigned to)
{
struct
inode
*
inode
=
page
->
mapping
->
host
;
__block_commit_write
(
inode
,
page
,
from
,
to
);
kunmap
(
page
);
return
0
;
}
...
...
@@ -2215,7 +2223,6 @@ int generic_commit_write(struct file *file, struct page *page,
struct
inode
*
inode
=
page
->
mapping
->
host
;
loff_t
pos
=
((
loff_t
)
page
->
index
<<
PAGE_CACHE_SHIFT
)
+
to
;
__block_commit_write
(
inode
,
page
,
from
,
to
);
kunmap
(
page
);
if
(
pos
>
inode
->
i_size
)
{
inode
->
i_size
=
pos
;
mark_inode_dirty
(
inode
);
...
...
@@ -2232,6 +2239,7 @@ int block_truncate_page(struct address_space *mapping,
struct
inode
*
inode
=
mapping
->
host
;
struct
page
*
page
;
struct
buffer_head
*
bh
;
void
*
kaddr
;
int
err
;
blocksize
=
1
<<
inode
->
i_blkbits
;
...
...
@@ -2284,9 +2292,10 @@ int block_truncate_page(struct address_space *mapping,
goto
unlock
;
}
memset
(
kmap
(
page
)
+
offset
,
0
,
length
);
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
memset
(
kaddr
+
offset
,
0
,
length
);
flush_dcache_page
(
page
);
kunmap
(
page
);
kunmap
_atomic
(
kaddr
,
KM_USER0
);
mark_buffer_dirty
(
bh
);
err
=
0
;
...
...
@@ -2306,7 +2315,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block)
struct
inode
*
const
inode
=
page
->
mapping
->
host
;
const
unsigned
long
end_index
=
inode
->
i_size
>>
PAGE_CACHE_SHIFT
;
unsigned
offset
;
char
*
kaddr
;
void
*
kaddr
;
/* Is the page fully inside i_size? */
if
(
page
->
index
<
end_index
)
...
...
@@ -2326,10 +2335,10 @@ int block_write_full_page(struct page *page, get_block_t *get_block)
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
kaddr
=
kmap
(
page
);
kaddr
=
kmap
_atomic
(
page
,
KM_USER0
);
memset
(
kaddr
+
offset
,
0
,
PAGE_CACHE_SIZE
-
offset
);
flush_dcache_page
(
page
);
kunmap
(
page
);
kunmap
_atomic
(
kaddr
,
KM_USER0
);
return
__block_write_full_page
(
inode
,
page
,
get_block
);
}
...
...
fs/driverfs/inode.c
View file @
0e1e2d82
...
...
@@ -59,9 +59,11 @@ static int mount_count = 0;
static
int
driverfs_readpage
(
struct
file
*
file
,
struct
page
*
page
)
{
if
(
!
PageUptodate
(
page
))
{
memset
(
kmap
(
page
),
0
,
PAGE_CACHE_SIZE
);
kunmap
(
page
);
void
*
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
memset
(
kaddr
,
0
,
PAGE_CACHE_SIZE
);
flush_dcache_page
(
page
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
SetPageUptodate
(
page
);
}
unlock_page
(
page
);
...
...
@@ -70,10 +72,12 @@ static int driverfs_readpage(struct file *file, struct page * page)
static
int
driverfs_prepare_write
(
struct
file
*
file
,
struct
page
*
page
,
unsigned
offset
,
unsigned
to
)
{
void
*
addr
=
kmap
(
page
);
if
(
!
PageUptodate
(
page
))
{
memset
(
addr
,
0
,
PAGE_CACHE_SIZE
);
void
*
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
memset
(
kaddr
,
0
,
PAGE_CACHE_SIZE
);
flush_dcache_page
(
page
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
SetPageUptodate
(
page
);
}
return
0
;
...
...
@@ -85,7 +89,6 @@ static int driverfs_commit_write(struct file *file, struct page *page, unsigned
loff_t
pos
=
((
loff_t
)
page
->
index
<<
PAGE_CACHE_SHIFT
)
+
to
;
set_page_dirty
(
page
);
kunmap
(
page
);
if
(
pos
>
inode
->
i_size
)
inode
->
i_size
=
pos
;
return
0
;
...
...
fs/exec.c
View file @
0e1e2d82
...
...
@@ -504,6 +504,8 @@ static inline int make_private_signals(void)
{
struct
signal_struct
*
newsig
;
remove_thread_group
(
current
,
current
->
sig
);
if
(
atomic_read
(
&
current
->
sig
->
count
)
<=
1
)
return
0
;
newsig
=
kmem_cache_alloc
(
sigact_cachep
,
GFP_KERNEL
);
...
...
@@ -512,6 +514,8 @@ static inline int make_private_signals(void)
spin_lock_init
(
&
newsig
->
siglock
);
atomic_set
(
&
newsig
->
count
,
1
);
memcpy
(
newsig
->
action
,
current
->
sig
->
action
,
sizeof
(
newsig
->
action
));
init_sigpending
(
&
newsig
->
shared_pending
);
spin_lock_irq
(
&
current
->
sigmask_lock
);
current
->
sig
=
newsig
;
spin_unlock_irq
(
&
current
->
sigmask_lock
);
...
...
@@ -575,42 +579,10 @@ static inline void flush_old_files(struct files_struct * files)
*/
static
void
de_thread
(
struct
task_struct
*
tsk
)
{
struct
task_struct
*
sub
;
struct
list_head
*
head
,
*
ptr
;
struct
siginfo
info
;
int
pause
;
write_lock_irq
(
&
tasklist_lock
);
if
(
tsk
->
tgid
!=
tsk
->
pid
)
{
/* subsidiary thread - just escapes the group */
list_del_init
(
&
tsk
->
thread_group
);
tsk
->
tgid
=
tsk
->
pid
;
pause
=
0
;
}
else
{
/* master thread - kill all subsidiary threads */
info
.
si_signo
=
SIGKILL
;
info
.
si_errno
=
0
;
info
.
si_code
=
SI_DETHREAD
;
info
.
si_pid
=
current
->
pid
;
info
.
si_uid
=
current
->
uid
;
head
=
tsk
->
thread_group
.
next
;
list_del_init
(
&
tsk
->
thread_group
);
list_for_each
(
ptr
,
head
)
{
sub
=
list_entry
(
ptr
,
struct
task_struct
,
thread_group
);
send_sig_info
(
SIGKILL
,
&
info
,
sub
);
}
pause
=
1
;
}
write_unlock_irq
(
&
tasklist_lock
);
/* give the subsidiary threads a chance to clean themselves up */
if
(
pause
)
yield
();
if
(
!
list_empty
(
&
tsk
->
thread_group
))
BUG
();
/* An exec() starts a new thread group: */
tsk
->
tgid
=
tsk
->
pid
;
}
int
flush_old_exec
(
struct
linux_binprm
*
bprm
)
...
...
@@ -633,6 +605,8 @@ int flush_old_exec(struct linux_binprm * bprm)
if
(
retval
)
goto
mmap_failed
;
/* This is the point of no return */
de_thread
(
current
);
release_old_signals
(
oldsig
);
current
->
sas_ss_sp
=
current
->
sas_ss_size
=
0
;
...
...
@@ -651,9 +625,6 @@ int flush_old_exec(struct linux_binprm * bprm)
flush_thread
();
if
(
!
list_empty
(
&
current
->
thread_group
))
de_thread
(
current
);
if
(
bprm
->
e_uid
!=
current
->
euid
||
bprm
->
e_gid
!=
current
->
egid
||
permission
(
bprm
->
file
->
f_dentry
->
d_inode
,
MAY_READ
))
current
->
mm
->
dumpable
=
0
;
...
...
fs/ext2/dir.c
View file @
0e1e2d82
...
...
@@ -571,8 +571,8 @@ int ext2_make_empty(struct inode *inode, struct inode *parent)
struct
page
*
page
=
grab_cache_page
(
mapping
,
0
);
unsigned
chunk_size
=
ext2_chunk_size
(
inode
);
struct
ext2_dir_entry_2
*
de
;
char
*
base
;
int
err
;
void
*
kaddr
;
if
(
!
page
)
return
-
ENOMEM
;
...
...
@@ -581,22 +581,21 @@ int ext2_make_empty(struct inode *inode, struct inode *parent)
unlock_page
(
page
);
goto
fail
;
}
base
=
page_address
(
page
);
de
=
(
struct
ext2_dir_entry_2
*
)
base
;
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
de
=
(
struct
ext2_dir_entry_2
*
)
kaddr
;
de
->
name_len
=
1
;
de
->
rec_len
=
cpu_to_le16
(
EXT2_DIR_REC_LEN
(
1
));
memcpy
(
de
->
name
,
".
\0\0
"
,
4
);
de
->
inode
=
cpu_to_le32
(
inode
->
i_ino
);
ext2_set_de_type
(
de
,
inode
);
de
=
(
struct
ext2_dir_entry_2
*
)
(
base
+
EXT2_DIR_REC_LEN
(
1
));
de
=
(
struct
ext2_dir_entry_2
*
)
(
kaddr
+
EXT2_DIR_REC_LEN
(
1
));
de
->
name_len
=
2
;
de
->
rec_len
=
cpu_to_le16
(
chunk_size
-
EXT2_DIR_REC_LEN
(
1
));
de
->
inode
=
cpu_to_le32
(
parent
->
i_ino
);
memcpy
(
de
->
name
,
"..
\0
"
,
4
);
ext2_set_de_type
(
de
,
inode
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
err
=
ext2_commit_chunk
(
page
,
0
,
chunk_size
);
fail:
page_cache_release
(
page
);
...
...
fs/ext3/inode.c
View file @
0e1e2d82
...
...
@@ -1082,16 +1082,6 @@ static int ext3_prepare_write(struct file *file, struct page *page,
if
(
ext3_should_journal_data
(
inode
))
{
ret
=
walk_page_buffers
(
handle
,
page_buffers
(
page
),
from
,
to
,
NULL
,
do_journal_get_write_access
);
if
(
ret
)
{
/*
* We're going to fail this prepare_write(),
* so commit_write() will not be called.
* We need to undo block_prepare_write()'s kmap().
* AKPM: Do we need to clear PageUptodate? I don't
* think so.
*/
kunmap
(
page
);
}
}
prepare_write_failed:
if
(
ret
)
...
...
@@ -1151,7 +1141,6 @@ static int ext3_commit_write(struct file *file, struct page *page,
from
,
to
,
&
partial
,
commit_write_fn
);
if
(
!
partial
)
SetPageUptodate
(
page
);
kunmap
(
page
);
if
(
pos
>
inode
->
i_size
)
inode
->
i_size
=
pos
;
EXT3_I
(
inode
)
->
i_state
|=
EXT3_STATE_JDATA
;
...
...
@@ -1162,17 +1151,8 @@ static int ext3_commit_write(struct file *file, struct page *page,
}
/* Be careful here if generic_commit_write becomes a
* required invocation after block_prepare_write. */
if
(
ret
==
0
)
{
if
(
ret
==
0
)
ret
=
generic_commit_write
(
file
,
page
,
from
,
to
);
}
else
{
/*
* block_prepare_write() was called, but we're not
* going to call generic_commit_write(). So we
* need to perform generic_commit_write()'s kunmap
* by hand.
*/
kunmap
(
page
);
}
}
if
(
inode
->
i_size
>
EXT3_I
(
inode
)
->
i_disksize
)
{
EXT3_I
(
inode
)
->
i_disksize
=
inode
->
i_size
;
...
...
@@ -1535,6 +1515,7 @@ static int ext3_block_truncate_page(handle_t *handle,
struct
page
*
page
;
struct
buffer_head
*
bh
;
int
err
;
void
*
kaddr
;
blocksize
=
inode
->
i_sb
->
s_blocksize
;
length
=
offset
&
(
blocksize
-
1
);
...
...
@@ -1590,10 +1571,11 @@ static int ext3_block_truncate_page(handle_t *handle,
if
(
err
)
goto
unlock
;
}
memset
(
kmap
(
page
)
+
offset
,
0
,
length
);
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
memset
(
kaddr
+
offset
,
0
,
length
);
flush_dcache_page
(
page
);
kunmap
(
page
);
kunmap
_atomic
(
kaddr
,
KM_USER0
);
BUFFER_TRACE
(
bh
,
"zeroed end of block"
);
...
...
fs/fat/inode.c
View file @
0e1e2d82
...
...
@@ -982,11 +982,24 @@ static int fat_readpage(struct file *file, struct page *page)
{
return
block_read_full_page
(
page
,
fat_get_block
);
}
static
int
fat_prepare_write
(
struct
file
*
file
,
struct
page
*
page
,
unsigned
from
,
unsigned
to
)
static
int
fat_prepare_write
(
struct
file
*
file
,
struct
page
*
page
,
unsigned
from
,
unsigned
to
)
{
kmap
(
page
);
return
cont_prepare_write
(
page
,
from
,
to
,
fat_get_block
,
&
MSDOS_I
(
page
->
mapping
->
host
)
->
mmu_private
);
}
static
int
fat_commit_write
(
struct
file
*
file
,
struct
page
*
page
,
unsigned
from
,
unsigned
to
)
{
kunmap
(
page
);
return
generic_commit_write
(
file
,
page
,
from
,
to
);
}
static
int
_fat_bmap
(
struct
address_space
*
mapping
,
long
block
)
{
return
generic_block_bmap
(
mapping
,
block
,
fat_get_block
);
...
...
@@ -996,7 +1009,7 @@ static struct address_space_operations fat_aops = {
writepage:
fat_writepage
,
sync_page:
block_sync_page
,
prepare_write:
fat_prepare_write
,
commit_write:
generic
_commit_write
,
commit_write:
fat
_commit_write
,
bmap:
_fat_bmap
};
...
...
fs/jffs/inode-v23.c
View file @
0e1e2d82
...
...
@@ -47,6 +47,7 @@
#include <linux/stat.h>
#include <linux/blkdev.h>
#include <linux/quotaops.h>
#include <linux/highmem.h>
#include <linux/smp_lock.h>
#include <asm/semaphore.h>
#include <asm/byteorder.h>
...
...
@@ -751,7 +752,6 @@ jffs_do_readpage_nolock(struct file *file, struct page *page)
get_page
(
page
);
/* Don't SetPageLocked(page), should be locked already */
buf
=
page_address
(
page
);
ClearPageUptodate
(
page
);
ClearPageError
(
page
);
...
...
@@ -760,8 +760,10 @@ jffs_do_readpage_nolock(struct file *file, struct page *page)
read_len
=
0
;
result
=
0
;
offset
=
page
->
index
<<
PAGE_CACHE_SHIFT
;
kmap
(
page
);
buf
=
page_address
(
page
);
if
(
offset
<
inode
->
i_size
)
{
read_len
=
min_t
(
long
,
inode
->
i_size
-
offset
,
PAGE_SIZE
);
r
=
jffs_read_data
(
f
,
buf
,
offset
,
read_len
);
...
...
@@ -779,6 +781,8 @@ jffs_do_readpage_nolock(struct file *file, struct page *page)
/* This handles the case of partial or no read in above */
if
(
read_len
<
PAGE_SIZE
)
memset
(
buf
+
read_len
,
0
,
PAGE_SIZE
-
read_len
);
flush_dcache_page
(
page
);
kunmap
(
page
);
D3
(
printk
(
KERN_NOTICE
"readpage(): up biglock
\n
"
));
up
(
&
c
->
fmc
->
biglock
);
...
...
@@ -788,9 +792,8 @@ jffs_do_readpage_nolock(struct file *file, struct page *page)
}
else
{
SetPageUptodate
(
page
);
}
flush_dcache_page
(
page
);
p
ut_pag
e
(
page
);
p
age_cache_releas
e
(
page
);
D3
(
printk
(
"jffs_readpage(): Leaving...
\n
"
));
...
...
fs/jffs2/file.c
View file @
0e1e2d82
...
...
@@ -17,6 +17,7 @@
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/crc32.h>
#include <linux/jffs2.h>
#include "nodelist.h"
...
...
@@ -381,9 +382,10 @@ int jffs2_commit_write (struct file *filp, struct page *pg, unsigned start, unsi
ri
->
isize
=
(
uint32_t
)
inode
->
i_size
;
ri
->
atime
=
ri
->
ctime
=
ri
->
mtime
=
CURRENT_TIME
;
/* We rely on the fact that generic_file_write() currently kmaps the page for us. */
kmap
(
pg
);
ret
=
jffs2_write_inode_range
(
c
,
f
,
ri
,
page_address
(
pg
)
+
start
,
(
pg
->
index
<<
PAGE_CACHE_SHIFT
)
+
start
,
end
-
start
,
&
writtenlen
);
kunmap
(
pg
);
if
(
ret
)
{
/* There was an error writing. */
...
...
fs/jfs/jfs_metapage.c
View file @
0e1e2d82
...
...
@@ -403,7 +403,6 @@ static void __write_metapage(metapage_t * mp)
if
(
rc
)
{
jERROR
(
1
,
(
"prepare_write return %d!
\n
"
,
rc
));
ClearPageUptodate
(
mp
->
page
);
kunmap
(
mp
->
page
);
unlock_page
(
mp
->
page
);
clear_bit
(
META_dirty
,
&
mp
->
flag
);
return
;
...
...
fs/minix/dir.c
View file @
0e1e2d82
...
...
@@ -7,6 +7,7 @@
*/
#include "minix.h"
#include <linux/highmem.h>
#include <linux/smp_lock.h>
typedef
struct
minix_dir_entry
minix_dirent
;
...
...
@@ -261,7 +262,7 @@ int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
{
struct
address_space
*
mapping
=
page
->
mapping
;
struct
inode
*
inode
=
(
struct
inode
*
)
mapping
->
host
;
char
*
kaddr
=
(
char
*
)
page_address
(
page
);
char
*
kaddr
=
page_address
(
page
);
unsigned
from
=
(
char
*
)
de
-
kaddr
;
unsigned
to
=
from
+
minix_sb
(
inode
->
i_sb
)
->
s_dirsize
;
int
err
;
...
...
@@ -286,7 +287,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
struct
page
*
page
=
grab_cache_page
(
mapping
,
0
);
struct
minix_sb_info
*
sbi
=
minix_sb
(
inode
->
i_sb
);
struct
minix_dir_entry
*
de
;
char
*
base
;
char
*
kaddr
;
int
err
;
if
(
!
page
)
...
...
@@ -297,15 +298,16 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
goto
fail
;
}
base
=
(
char
*
)
page_address
(
page
);
memset
(
base
,
0
,
PAGE_CACHE_SIZE
);
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
memset
(
kaddr
,
0
,
PAGE_CACHE_SIZE
);
de
=
(
struct
minix_dir_entry
*
)
base
;
de
=
(
struct
minix_dir_entry
*
)
kaddr
;
de
->
inode
=
inode
->
i_ino
;
strcpy
(
de
->
name
,
"."
);
de
=
minix_next_entry
(
de
,
sbi
);
de
->
inode
=
dir
->
i_ino
;
strcpy
(
de
->
name
,
".."
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
err
=
dir_commit_chunk
(
page
,
0
,
2
*
sbi
->
s_dirsize
);
fail:
...
...
fs/namei.c
View file @
0e1e2d82
...
...
@@ -2200,8 +2200,9 @@ int page_symlink(struct inode *inode, const char *symname, int len)
err
=
mapping
->
a_ops
->
prepare_write
(
NULL
,
page
,
0
,
len
-
1
);
if
(
err
)
goto
fail_map
;
kaddr
=
page_address
(
page
);
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
memcpy
(
kaddr
,
symname
,
len
-
1
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
mapping
->
a_ops
->
commit_write
(
NULL
,
page
,
0
,
len
-
1
);
/*
* Notice that we are _not_ going to block here - end of page is
...
...
fs/partitions/check.c
View file @
0e1e2d82
...
...
@@ -213,7 +213,6 @@ static void driverfs_remove_partitions(struct gendisk *hd)
static
void
check_partition
(
struct
gendisk
*
hd
,
struct
block_device
*
bdev
)
{
devfs_handle_t
de
=
NULL
;
dev_t
dev
=
bdev
->
bd_dev
;
char
buf
[
64
];
struct
parsed_partitions
*
state
;
int
i
;
...
...
@@ -254,7 +253,7 @@ static void check_partition(struct gendisk *hd, struct block_device *bdev)
#if CONFIG_BLK_DEV_MD
if
(
!
state
->
parts
[
j
-
1
].
flags
)
continue
;
md_autodetect_dev
(
dev
+
j
);
md_autodetect_dev
(
bdev
->
bd_
dev
+
j
);
#endif
}
return
;
...
...
fs/ramfs/inode.c
View file @
0e1e2d82
...
...
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/smp_lock.h>
...
...
@@ -47,8 +48,10 @@ static struct inode_operations ramfs_dir_inode_operations;
static
int
ramfs_readpage
(
struct
file
*
file
,
struct
page
*
page
)
{
if
(
!
PageUptodate
(
page
))
{
memset
(
kmap
(
page
),
0
,
PAGE_CACHE_SIZE
);
kunmap
(
page
);
char
*
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
memset
(
kaddr
,
0
,
PAGE_CACHE_SIZE
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
flush_dcache_page
(
page
);
SetPageUptodate
(
page
);
}
...
...
@@ -58,10 +61,12 @@ static int ramfs_readpage(struct file *file, struct page * page)
static
int
ramfs_prepare_write
(
struct
file
*
file
,
struct
page
*
page
,
unsigned
offset
,
unsigned
to
)
{
void
*
addr
=
kmap
(
page
);
if
(
!
PageUptodate
(
page
))
{
memset
(
addr
,
0
,
PAGE_CACHE_SIZE
);
char
*
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
memset
(
kaddr
,
0
,
PAGE_CACHE_SIZE
);
flush_dcache_page
(
page
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
SetPageUptodate
(
page
);
}
SetPageDirty
(
page
);
...
...
@@ -73,7 +78,6 @@ static int ramfs_commit_write(struct file *file, struct page *page, unsigned off
struct
inode
*
inode
=
page
->
mapping
->
host
;
loff_t
pos
=
((
loff_t
)
page
->
index
<<
PAGE_CACHE_SHIFT
)
+
to
;
kunmap
(
page
);
if
(
pos
>
inode
->
i_size
)
inode
->
i_size
=
pos
;
return
0
;
...
...
fs/reiserfs/inode.c
View file @
0e1e2d82
...
...
@@ -7,6 +7,7 @@
#include <linux/reiserfs_fs.h>
#include <linux/smp_lock.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h>
#include <linux/buffer_head.h>
...
...
@@ -1692,8 +1693,6 @@ static int grab_tail_page(struct inode *p_s_inode,
if
(
error
)
goto
unlock
;
kunmap
(
page
)
;
/* mapped by block_prepare_write */
head
=
page_buffers
(
page
)
;
bh
=
head
;
do
{
...
...
@@ -1788,10 +1787,13 @@ void reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps) {
length
=
offset
&
(
blocksize
-
1
)
;
/* if we are not on a block boundary */
if
(
length
)
{
char
*
kaddr
;
length
=
blocksize
-
length
;
memset
((
char
*
)
kmap
(
page
)
+
offset
,
0
,
length
)
;
kaddr
=
kmap_atomic
(
page
,
KM_USER0
)
;
memset
(
kaddr
+
offset
,
0
,
length
)
;
flush_dcache_page
(
page
)
;
kunmap
(
page
)
;
kunmap
_atomic
(
kaddr
,
KM_USER0
)
;
if
(
buffer_mapped
(
bh
)
&&
bh
->
b_blocknr
!=
0
)
{
mark_buffer_dirty
(
bh
)
;
}
...
...
@@ -1941,23 +1943,25 @@ static int reiserfs_write_full_page(struct page *page) {
struct
buffer_head
*
arr
[
PAGE_CACHE_SIZE
/
512
]
;
int
nr
=
0
;
if
(
!
page_has_buffers
(
page
))
{
if
(
!
page_has_buffers
(
page
))
block_prepare_write
(
page
,
0
,
0
,
NULL
)
;
kunmap
(
page
)
;
}
/* last page in the file, zero out any contents past the
** last byte in the file
*/
if
(
page
->
index
>=
end_index
)
{
char
*
kaddr
;
last_offset
=
inode
->
i_size
&
(
PAGE_CACHE_SIZE
-
1
)
;
/* no file contents in this page */
if
(
page
->
index
>=
end_index
+
1
||
!
last_offset
)
{
error
=
-
EIO
;
goto
fail
;
}
memset
((
char
*
)
kmap
(
page
)
+
last_offset
,
0
,
PAGE_CACHE_SIZE
-
last_offset
)
;
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
memset
(
kaddr
+
last_offset
,
0
,
PAGE_CACHE_SIZE
-
last_offset
)
;
flush_dcache_page
(
page
)
;
kunmap
(
page
)
;
kunmap
_atomic
(
kaddr
,
KM_USER0
)
;
}
head
=
page_buffers
(
page
)
;
bh
=
head
;
...
...
fs/reiserfs/stree.c
View file @
0e1e2d82
...
...
@@ -1284,15 +1284,15 @@ int reiserfs_delete_item (struct reiserfs_transaction_handle *th,
**
** p_s_un_bh is from the page cache (all unformatted nodes are
** from the page cache) and might be a highmem page. So, we
** can't use p_s_un_bh->b_data. But, the page has already been
** kmapped, so we can use page_address()
** can't use p_s_un_bh->b_data.
** -clm
*/
data
=
page_address
(
p_s_un_bh
->
b_page
)
;
data
=
kmap_atomic
(
p_s_un_bh
->
b_page
,
KM_USER0
)
;
off
=
((
le_ih_k_offset
(
&
s_ih
)
-
1
)
&
(
PAGE_CACHE_SIZE
-
1
));
memcpy
(
data
+
off
,
B_I_PITEM
(
PATH_PLAST_BUFFER
(
p_s_path
),
&
s_ih
),
n_ret_value
);
kunmap_atomic
(
data
,
KM_USER0
);
}
/* Perform balancing after all resources have been collected at once. */
...
...
fs/reiserfs/tail_conversion.c
View file @
0e1e2d82
...
...
@@ -122,11 +122,12 @@ int direct2indirect (struct reiserfs_transaction_handle *th, struct inode * inod
}
/* if we've copied bytes from disk into the page, we need to zero
** out the unused part of the block (it was not up to date before)
** the page is still kmapped (by whoever called reiserfs_get_block)
*/
if
(
up_to_date_bh
)
{
unsigned
pgoff
=
(
tail_offset
+
total_tail
-
1
)
&
(
PAGE_CACHE_SIZE
-
1
);
memset
(
page_address
(
unbh
->
b_page
)
+
pgoff
,
0
,
n_blk_size
-
total_tail
)
;
char
*
kaddr
=
kmap_atomic
(
up_to_date_bh
->
b_page
,
KM_USER0
);
memset
(
kaddr
+
pgoff
,
0
,
n_blk_size
-
total_tail
)
;
kunmap_atomic
(
kaddr
,
KM_USER0
);
}
REISERFS_I
(
inode
)
->
i_first_direct_byte
=
U32_MAX
;
...
...
fs/sysv/dir.c
View file @
0e1e2d82
...
...
@@ -14,6 +14,7 @@
*/
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/smp_lock.h>
#include "sysv.h"
...
...
@@ -273,6 +274,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir)
if
(
!
page
)
return
-
ENOMEM
;
kmap
(
page
);
err
=
mapping
->
a_ops
->
prepare_write
(
NULL
,
page
,
0
,
2
*
SYSV_DIRSIZE
);
if
(
err
)
{
unlock_page
(
page
);
...
...
@@ -291,6 +293,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir)
err
=
dir_commit_chunk
(
page
,
0
,
2
*
SYSV_DIRSIZE
);
fail:
kunmap
(
page
);
page_cache_release
(
page
);
return
err
;
}
...
...
include/asm-i386/highmem.h
View file @
0e1e2d82
...
...
@@ -81,7 +81,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
enum
fixed_addresses
idx
;
unsigned
long
vaddr
;
preempt_disable
();
inc_preempt_count
();
if
(
page
<
highmem_start_page
)
return
page_address
(
page
);
...
...
@@ -104,7 +104,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
enum
fixed_addresses
idx
=
type
+
KM_TYPE_NR
*
smp_processor_id
();
if
(
vaddr
<
FIXADDR_START
)
{
// FIXME
preempt_enable
();
dec_preempt_count
();
return
;
}
...
...
@@ -119,7 +119,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
__flush_tlb_one
(
vaddr
);
#endif
preempt_enable
();
dec_preempt_count
();
}
#endif
/* __KERNEL__ */
...
...
include/asm-i386/spinlock.h
View file @
0e1e2d82
...
...
@@ -158,6 +158,8 @@ typedef struct {
#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
#define rwlock_is_locked(x) ((x)->lock != RW_LOCK_BIAS)
/*
* On x86, we implement read-write locks as a 32-bit counter
* with the high bit (sign) being the "contended" bit.
...
...
include/asm-i386/tlbflush.h
View file @
0e1e2d82
...
...
@@ -45,11 +45,21 @@ extern unsigned long pgkern_mask;
__flush_tlb(); \
} while (0)
#ifndef CONFIG_X86_INVLPG
#define __flush_tlb_one(addr) __flush_tlb()
#define cpu_has_invlpg (boot_cpu_data.x86 > 3)
#define __flush_tlb_single(addr) \
__asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
#ifdef CONFIG_X86_INVLPG
# define __flush_tlb_one(addr) __flush_tlb_single(addr)
#else
#define __flush_tlb_one(addr) \
__asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
# define __flush_tlb_one(addr) \
do { \
if (cpu_has_invlpg) \
__flush_tlb_single(addr); \
else \
__flush_tlb(); \
} while (0)
#endif
/*
...
...
include/asm-ppc/hardirq.h
View file @
0e1e2d82
...
...
@@ -85,8 +85,10 @@ typedef struct {
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#if CONFIG_PREEMPT
# define in_atomic() (preempt_count() != kernel_locked())
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
# define in_atomic() (preempt_count() != 0)
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
#define irq_exit() \
...
...
include/asm-ppc/highmem.h
View file @
0e1e2d82
...
...
@@ -88,6 +88,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
unsigned
int
idx
;
unsigned
long
vaddr
;
inc_preempt_count
();
if
(
page
<
highmem_start_page
)
return
page_address
(
page
);
...
...
@@ -109,8 +110,10 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
unsigned
long
vaddr
=
(
unsigned
long
)
kvaddr
&
PAGE_MASK
;
unsigned
int
idx
=
type
+
KM_TYPE_NR
*
smp_processor_id
();
if
(
vaddr
<
KMAP_FIX_BEGIN
)
// FIXME
if
(
vaddr
<
KMAP_FIX_BEGIN
)
{
// FIXME
dec_preempt_count
();
return
;
}
if
(
vaddr
!=
KMAP_FIX_BEGIN
+
idx
*
PAGE_SIZE
)
BUG
();
...
...
@@ -122,6 +125,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
pte_clear
(
kmap_pte
+
idx
);
flush_tlb_page
(
0
,
vaddr
);
#endif
dec_preempt_count
();
}
#endif
/* __KERNEL__ */
...
...
include/asm-sparc/hardirq.h
View file @
0e1e2d82
...
...
@@ -113,6 +113,12 @@ do { \
#define irq_exit() br_read_unlock(BR_GLOBALIRQ_LOCK)
#endif
#if CONFIG_PREEMPT
# define in_atomic() (preempt_count() != kernel_locked())
#else
# define in_atomic() (preempt_count() != 0)
#endif
#ifndef CONFIG_SMP
#define synchronize_irq() barrier()
...
...
include/asm-sparc/highmem.h
View file @
0e1e2d82
...
...
@@ -83,6 +83,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
unsigned
long
idx
;
unsigned
long
vaddr
;
inc_preempt_count
();
if
(
page
<
highmem_start_page
)
return
page_address
(
page
);
...
...
@@ -116,8 +117,10 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
unsigned
long
vaddr
=
(
unsigned
long
)
kvaddr
;
unsigned
long
idx
=
type
+
KM_TYPE_NR
*
smp_processor_id
();
if
(
vaddr
<
FIX_KMAP_BEGIN
)
// FIXME
if
(
vaddr
<
FIX_KMAP_BEGIN
)
{
// FIXME
dec_preempt_count
();
return
;
}
if
(
vaddr
!=
FIX_KMAP_BEGIN
+
idx
*
PAGE_SIZE
)
BUG
();
...
...
@@ -142,6 +145,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
flush_tlb_all
();
#endif
#endif
dec_preempt_count
();
}
#endif
/* __KERNEL__ */
...
...
include/linux/highmem.h
View file @
0e1e2d82
...
...
@@ -24,8 +24,8 @@ static inline void *kmap(struct page *page) { return page_address(page); }
#define kunmap(page) do { (void) (page); } while (0)
#define kmap_atomic(page,
idx) kmap
(page)
#define kunmap_atomic(
page,idx) kunmap(page
)
#define kmap_atomic(page,
idx) page_address
(page)
#define kunmap_atomic(
addr, idx) do { } while (0
)
#endif
/* CONFIG_HIGHMEM */
...
...
include/linux/init_task.h
View file @
0e1e2d82
...
...
@@ -29,10 +29,11 @@
.mmlist = LIST_HEAD_INIT(name.mmlist), \
}
#define INIT_SIGNALS { \
#define INIT_SIGNALS
(sig)
{ \
.count = ATOMIC_INIT(1), \
.action = { {{0,}}, }, \
.siglock = SPIN_LOCK_UNLOCKED \
.siglock = SPIN_LOCK_UNLOCKED, \
.shared_pending = { NULL, &sig.shared_pending.head, {{0}}}, \
}
/*
...
...
include/linux/pci.h
View file @
0e1e2d82
...
...
@@ -386,6 +386,9 @@ struct pci_dev {
int
ro
;
/* ISAPnP: read only */
unsigned
short
regs
;
/* ISAPnP: supported registers */
/* These fields are used by common fixups */
unsigned
short
transparent
:
1
;
/* Transparent PCI bridge */
int
(
*
prepare
)(
struct
pci_dev
*
dev
);
/* ISAPnP hooks */
int
(
*
activate
)(
struct
pci_dev
*
dev
);
int
(
*
deactivate
)(
struct
pci_dev
*
dev
);
...
...
@@ -406,6 +409,10 @@ struct pci_dev {
#define PCI_ROM_RESOURCE 6
#define PCI_BRIDGE_RESOURCES 7
#define PCI_NUM_RESOURCES 11
#ifndef PCI_BUS_NUM_RESOURCES
#define PCI_BUS_NUM_RESOURCES 4
#endif
#define PCI_REGION_FLAG_MASK 0x0fU
/* These bits of resource flags tell us the PCI region flags */
...
...
@@ -415,7 +422,8 @@ struct pci_bus {
struct
list_head
children
;
/* list of child buses */
struct
list_head
devices
;
/* list of devices on this bus */
struct
pci_dev
*
self
;
/* bridge device as seen by parent */
struct
resource
*
resource
[
4
];
/* address space routed to this bus */
struct
resource
*
resource
[
PCI_BUS_NUM_RESOURCES
];
/* address space routed to this bus */
struct
pci_ops
*
ops
;
/* configuration access functions */
void
*
sysdata
;
/* hook for sys-specific extension */
...
...
include/linux/preempt.h
View file @
0e1e2d82
#ifndef __LINUX_PREEMPT_H
#define __LINUX_PREEMPT_H
/*
* include/linux/preempt.h - macros for accessing and manipulating
* preempt_count (used for kernel preemption, interrupt count, etc.)
*/
#include <linux/config.h>
#define preempt_count()
(current_thread_info()->preempt_count)
#define preempt_count()
(current_thread_info()->preempt_count)
#define inc_preempt_count() \
do { \
...
...
@@ -31,36 +36,25 @@ do { \
barrier(); \
} while (0)
#define preempt_
enable
() \
#define preempt_
check_resched
() \
do { \
preempt_enable_no_resched(); \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)
#define preempt_
check_resched
() \
#define preempt_
enable
() \
do { \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
\
preempt_schedule
(); \
preempt_enable_no_resched();
\
preempt_check_resched
(); \
} while (0)
#define inc_preempt_count_non_preempt() do { } while (0)
#define dec_preempt_count_non_preempt() do { } while (0)
#else
#define preempt_disable() do { } while (0)
#define preempt_enable_no_resched() do {
} while
(0)
#define preempt_enable_no_resched() do {
} while
(0)
#define preempt_enable() do { } while (0)
#define preempt_check_resched() do { } while (0)
/*
* Sometimes we want to increment the preempt count, but we know that it's
* already incremented if the kernel is compiled for preemptibility.
*/
#define inc_preempt_count_non_preempt() inc_preempt_count()
#define dec_preempt_count_non_preempt() dec_preempt_count()
#endif
#endif
/* __LINUX_PREEMPT_H */
include/linux/sched.h
View file @
0e1e2d82
...
...
@@ -211,6 +211,11 @@ struct signal_struct {
atomic_t
count
;
struct
k_sigaction
action
[
_NSIG
];
spinlock_t
siglock
;
/* current thread group signal load-balancing target: */
task_t
*
curr_target
;
struct
sigpending
shared_pending
;
};
/*
...
...
@@ -356,7 +361,7 @@ struct task_struct {
spinlock_t
sigmask_lock
;
/* Protects signal and blocked */
struct
signal_struct
*
sig
;
sigset_t
blocked
;
sigset_t
blocked
,
real_blocked
,
shared_unblocked
;
struct
sigpending
pending
;
unsigned
long
sas_ss_sp
;
...
...
@@ -431,6 +436,7 @@ extern void set_cpus_allowed(task_t *p, unsigned long new_mask);
extern
void
set_user_nice
(
task_t
*
p
,
long
nice
);
extern
int
task_prio
(
task_t
*
p
);
extern
int
task_nice
(
task_t
*
p
);
extern
int
task_curr
(
task_t
*
p
);
extern
int
idle_cpu
(
int
cpu
);
void
yield
(
void
);
...
...
@@ -535,7 +541,7 @@ extern void proc_caches_init(void);
extern
void
flush_signals
(
struct
task_struct
*
);
extern
void
flush_signal_handlers
(
struct
task_struct
*
);
extern
void
sig_exit
(
int
,
int
,
struct
siginfo
*
);
extern
int
dequeue_signal
(
s
igset_t
*
,
siginfo_t
*
);
extern
int
dequeue_signal
(
s
truct
sigpending
*
pending
,
sigset_t
*
mask
,
siginfo_t
*
info
);
extern
void
block_all_signals
(
int
(
*
notifier
)(
void
*
priv
),
void
*
priv
,
sigset_t
*
mask
);
extern
void
unblock_all_signals
(
void
);
...
...
@@ -654,6 +660,7 @@ extern void exit_thread(void);
extern
void
exit_mm
(
struct
task_struct
*
);
extern
void
exit_files
(
struct
task_struct
*
);
extern
void
exit_sighand
(
struct
task_struct
*
);
extern
void
remove_thread_group
(
struct
task_struct
*
tsk
,
struct
signal_struct
*
sig
);
extern
void
reparent_to_init
(
void
);
extern
void
daemonize
(
void
);
...
...
@@ -786,8 +793,29 @@ static inline struct task_struct *younger_sibling(struct task_struct *p)
#define for_each_thread(task) \
for (task = next_thread(current) ; task != current ; task = next_thread(task))
#define next_thread(p) \
list_entry((p)->thread_group.next, struct task_struct, thread_group)
static
inline
task_t
*
next_thread
(
task_t
*
p
)
{
if
(
!
p
->
sig
)
BUG
();
#if CONFIG_SMP
if
(
!
spin_is_locked
(
&
p
->
sig
->
siglock
)
&&
!
rwlock_is_locked
(
&
tasklist_lock
))
BUG
();
#endif
return
list_entry
((
p
)
->
thread_group
.
next
,
task_t
,
thread_group
);
}
static
inline
task_t
*
prev_thread
(
task_t
*
p
)
{
if
(
!
p
->
sig
)
BUG
();
#if CONFIG_SMP
if
(
!
spin_is_locked
(
&
p
->
sig
->
siglock
)
&&
!
rwlock_is_locked
(
&
tasklist_lock
))
BUG
();
#endif
return
list_entry
((
p
)
->
thread_group
.
prev
,
task_t
,
thread_group
);
}
#define thread_group_leader(p) (p->pid == p->tgid)
...
...
@@ -903,21 +931,8 @@ static inline void cond_resched(void)
This is required every time the blocked sigset_t changes.
Athread cathreaders should have t->sigmask_lock. */
static
inline
void
recalc_sigpending_tsk
(
struct
task_struct
*
t
)
{
if
(
has_pending_signals
(
&
t
->
pending
.
signal
,
&
t
->
blocked
))
set_tsk_thread_flag
(
t
,
TIF_SIGPENDING
);
else
clear_tsk_thread_flag
(
t
,
TIF_SIGPENDING
);
}
static
inline
void
recalc_sigpending
(
void
)
{
if
(
has_pending_signals
(
&
current
->
pending
.
signal
,
&
current
->
blocked
))
set_thread_flag
(
TIF_SIGPENDING
);
else
clear_thread_flag
(
TIF_SIGPENDING
);
}
extern
FASTCALL
(
void
recalc_sigpending_tsk
(
struct
task_struct
*
t
));
extern
void
recalc_sigpending
(
void
);
/*
* Wrappers for p->thread_info->cpu access. No-op on UP.
...
...
init/do_mounts.c
View file @
0e1e2d82
...
...
@@ -28,6 +28,7 @@ extern asmlinkage long sys_mount(char *dev_name, char *dir_name, char *type,
unsigned
long
flags
,
void
*
data
);
extern
asmlinkage
long
sys_mkdir
(
const
char
*
name
,
int
mode
);
extern
asmlinkage
long
sys_chdir
(
const
char
*
name
);
extern
asmlinkage
long
sys_fchdir
(
int
fd
);
extern
asmlinkage
long
sys_chroot
(
const
char
*
name
);
extern
asmlinkage
long
sys_unlink
(
const
char
*
name
);
extern
asmlinkage
long
sys_symlink
(
const
char
*
old
,
const
char
*
new
);
...
...
@@ -730,17 +731,13 @@ static void __init mount_root(void)
}
#ifdef CONFIG_BLK_DEV_INITRD
static
int
old_fd
,
root_fd
;
static
int
do_linuxrc
(
void
*
shell
)
{
static
char
*
argv
[]
=
{
"linuxrc"
,
NULL
,
};
extern
char
*
envp_init
[];
sys_chdir
(
"/root"
);
sys_mount
(
"."
,
"/"
,
NULL
,
MS_MOVE
,
NULL
);
sys_chroot
(
"."
);
mount_devfs_fs
();
close
(
old_fd
);
close
(
root_fd
);
close
(
0
);
close
(
1
);
close
(
2
);
setsid
();
(
void
)
open
(
"/dev/console"
,
O_RDWR
,
0
);
...
...
@@ -758,9 +755,16 @@ static void __init handle_initrd(void)
int
i
,
pid
;
create_dev
(
"/dev/root.old"
,
Root_RAM0
,
NULL
);
/* mount initrd on rootfs' /root */
mount_block_root
(
"/dev/root.old"
,
root_mountflags
&
~
MS_RDONLY
);
sys_mkdir
(
"/old"
,
0700
);
sys_chdir
(
"/old"
);
root_fd
=
open
(
"/"
,
0
,
0
);
old_fd
=
open
(
"/old"
,
0
,
0
);
/* move initrd over / and chdir/chroot in initrd root */
sys_chdir
(
"/root"
);
sys_mount
(
"."
,
"/"
,
NULL
,
MS_MOVE
,
NULL
);
sys_chroot
(
"."
);
mount_devfs_fs
();
pid
=
kernel_thread
(
do_linuxrc
,
"/linuxrc"
,
SIGCHLD
);
if
(
pid
>
0
)
{
...
...
@@ -768,7 +772,14 @@ static void __init handle_initrd(void)
yield
();
}
sys_mount
(
".."
,
"."
,
NULL
,
MS_MOVE
,
NULL
);
/* move initrd to rootfs' /old */
sys_fchdir
(
old_fd
);
sys_mount
(
"/"
,
"."
,
NULL
,
MS_MOVE
,
NULL
);
/* switch root and cwd back to / of rootfs */
sys_fchdir
(
root_fd
);
sys_chroot
(
"."
);
close
(
old_fd
);
close
(
root_fd
);
sys_umount
(
"/old/dev"
,
0
);
if
(
real_root_dev
==
Root_RAM0
)
{
...
...
kernel/exit.c
View file @
0e1e2d82
...
...
@@ -36,7 +36,6 @@ static inline void __unhash_process(struct task_struct *p)
nr_threads
--
;
unhash_pid
(
p
);
REMOVE_LINKS
(
p
);
list_del
(
&
p
->
thread_group
);
p
->
pid
=
0
;
proc_dentry
=
p
->
proc_dentry
;
if
(
unlikely
(
proc_dentry
!=
NULL
))
{
...
...
@@ -73,6 +72,7 @@ static void release_task(struct task_struct * p)
}
BUG_ON
(
!
list_empty
(
&
p
->
ptrace_list
)
||
!
list_empty
(
&
p
->
ptrace_children
));
unhash_process
(
p
);
exit_sighand
(
p
);
release_thread
(
p
);
if
(
p
!=
current
)
{
...
...
@@ -244,7 +244,8 @@ void daemonize(void)
static
void
reparent_thread
(
task_t
*
p
,
task_t
*
reaper
,
task_t
*
child_reaper
)
{
/* We dont want people slaying init */
p
->
exit_signal
=
SIGCHLD
;
if
(
p
->
exit_signal
!=
-
1
)
p
->
exit_signal
=
SIGCHLD
;
p
->
self_exec_id
++
;
/* Make sure we're not reparenting to ourselves */
...
...
@@ -412,18 +413,15 @@ void exit_mm(struct task_struct *tsk)
*/
static
inline
void
forget_original_parent
(
struct
task_struct
*
father
)
{
struct
task_struct
*
p
,
*
reaper
;
struct
task_struct
*
p
,
*
reaper
=
father
;
struct
list_head
*
_p
;
read_lock
(
&
tasklist_lock
);
write_lock_irq
(
&
tasklist_lock
);
/* Next in our thread group, if they're not already exiting */
reaper
=
father
;
do
{
reaper
=
next_thread
(
reaper
);
if
(
!
(
reaper
->
flags
&
PF_EXITING
))
break
;
}
while
(
reaper
!=
father
);
if
(
father
->
exit_signal
!=
-
1
)
reaper
=
prev_thread
(
reaper
);
else
reaper
=
child_reaper
;
if
(
reaper
==
father
)
reaper
=
child_reaper
;
...
...
@@ -444,7 +442,7 @@ static inline void forget_original_parent(struct task_struct * father)
p
=
list_entry
(
_p
,
struct
task_struct
,
ptrace_list
);
reparent_thread
(
p
,
reaper
,
child_reaper
);
}
read_unlock
(
&
tasklist_lock
);
write_unlock_irq
(
&
tasklist_lock
);
}
static
inline
void
zap_thread
(
task_t
*
p
,
task_t
*
father
,
int
traced
)
...
...
@@ -604,7 +602,6 @@ NORET_TYPE void do_exit(long code)
__exit_files
(
tsk
);
__exit_fs
(
tsk
);
exit_namespace
(
tsk
);
exit_sighand
(
tsk
);
exit_thread
();
if
(
current
->
leader
)
...
...
@@ -763,6 +760,8 @@ asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struc
if
(
options
&
__WNOTHREAD
)
break
;
tsk
=
next_thread
(
tsk
);
if
(
tsk
->
sig
!=
current
->
sig
)
BUG
();
}
while
(
tsk
!=
current
);
read_unlock
(
&
tasklist_lock
);
if
(
flag
)
{
...
...
kernel/fork.c
View file @
0e1e2d82
...
...
@@ -630,6 +630,9 @@ static inline int copy_sighand(unsigned long clone_flags, struct task_struct * t
spin_lock_init
(
&
sig
->
siglock
);
atomic_set
(
&
sig
->
count
,
1
);
memcpy
(
tsk
->
sig
->
action
,
current
->
sig
->
action
,
sizeof
(
tsk
->
sig
->
action
));
sig
->
curr_target
=
NULL
;
init_sigpending
(
&
sig
->
shared_pending
);
return
0
;
}
...
...
@@ -664,6 +667,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
if
((
clone_flags
&
(
CLONE_NEWNS
|
CLONE_FS
))
==
(
CLONE_NEWNS
|
CLONE_FS
))
return
ERR_PTR
(
-
EINVAL
);
/*
* Thread groups must share signals as well:
*/
if
(
clone_flags
&
CLONE_THREAD
)
clone_flags
|=
CLONE_SIGHAND
;
retval
=
security_ops
->
task_create
(
clone_flags
);
if
(
retval
)
goto
fork_out
;
...
...
@@ -843,8 +852,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p
->
parent
=
p
->
real_parent
;
if
(
clone_flags
&
CLONE_THREAD
)
{
spin_lock
(
&
current
->
sig
->
siglock
);
p
->
tgid
=
current
->
tgid
;
list_add
(
&
p
->
thread_group
,
&
current
->
thread_group
);
spin_unlock
(
&
current
->
sig
->
siglock
);
}
SET_LINKS
(
p
);
...
...
kernel/sched.c
View file @
0e1e2d82
...
...
@@ -1335,6 +1335,15 @@ int task_nice(task_t *p)
return
TASK_NICE
(
p
);
}
/**
* task_curr - is this task currently executing on a CPU?
* @p: the task in question.
*/
int
task_curr
(
task_t
*
p
)
{
return
cpu_curr
(
task_cpu
(
p
))
==
p
;
}
/**
* idle_cpu - is a given cpu idle currently?
* @cpu: the processor in question.
...
...
kernel/signal.c
View file @
0e1e2d82
This diff is collapsed.
Click to expand it.
mm/filemap.c
View file @
0e1e2d82
...
...
@@ -1036,7 +1036,52 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
UPDATE_ATIME
(
inode
);
}
int
file_read_actor
(
read_descriptor_t
*
desc
,
struct
page
*
page
,
unsigned
long
offset
,
unsigned
long
size
)
/*
* Fault a userspace page into pagetables. Return non-zero on a fault.
*
* FIXME: this assumes that two userspace pages are always sufficient. That's
* not true if PAGE_CACHE_SIZE > PAGE_SIZE.
*/
static
inline
int
fault_in_pages_writeable
(
char
*
uaddr
,
int
size
)
{
int
ret
;
/*
* Writing zeroes into userspace here is OK, because we know that if
* the zero gets there, we'll be overwriting it.
*/
ret
=
__put_user
(
0
,
uaddr
);
if
(
ret
==
0
)
{
char
*
end
=
uaddr
+
size
-
1
;
/*
* If the page was already mapped, this will get a cache miss
* for sure, so try to avoid doing it.
*/
if
(((
unsigned
long
)
uaddr
&
PAGE_MASK
)
!=
((
unsigned
long
)
end
&
PAGE_MASK
))
ret
=
__put_user
(
0
,
end
);
}
return
ret
;
}
static
inline
void
fault_in_pages_readable
(
const
char
*
uaddr
,
int
size
)
{
volatile
char
c
;
int
ret
;
ret
=
__get_user
(
c
,
(
char
*
)
uaddr
);
if
(
ret
==
0
)
{
const
char
*
end
=
uaddr
+
size
-
1
;
if
(((
unsigned
long
)
uaddr
&
PAGE_MASK
)
!=
((
unsigned
long
)
end
&
PAGE_MASK
))
__get_user
(
c
,
(
char
*
)
end
);
}
}
int
file_read_actor
(
read_descriptor_t
*
desc
,
struct
page
*
page
,
unsigned
long
offset
,
unsigned
long
size
)
{
char
*
kaddr
;
unsigned
long
left
,
count
=
desc
->
count
;
...
...
@@ -1044,14 +1089,28 @@ int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long o
if
(
size
>
count
)
size
=
count
;
/*
* Faults on the destination of a read are common, so do it before
* taking the kmap.
*/
if
(
!
fault_in_pages_writeable
(
desc
->
buf
,
size
))
{
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
left
=
__copy_to_user
(
desc
->
buf
,
kaddr
+
offset
,
size
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
if
(
left
==
0
)
goto
success
;
}
/* Do it the slow way */
kaddr
=
kmap
(
page
);
left
=
__copy_to_user
(
desc
->
buf
,
kaddr
+
offset
,
size
);
kunmap
(
page
);
if
(
left
)
{
size
-=
left
;
desc
->
error
=
-
EFAULT
;
}
success:
desc
->
count
=
count
-
size
;
desc
->
written
+=
size
;
desc
->
buf
+=
size
;
...
...
@@ -1838,6 +1897,26 @@ inline void remove_suid(struct dentry *dentry)
}
}
static
inline
int
filemap_copy_from_user
(
struct
page
*
page
,
unsigned
long
offset
,
const
char
*
buf
,
unsigned
bytes
)
{
char
*
kaddr
;
int
left
;
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
left
=
__copy_from_user
(
kaddr
+
offset
,
buf
,
bytes
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
if
(
left
!=
0
)
{
/* Do it the slow way */
kaddr
=
kmap
(
page
);
left
=
__copy_from_user
(
kaddr
+
offset
,
buf
,
bytes
);
kunmap
(
page
);
}
return
left
;
}
/*
* Write to a file through the page cache.
*
...
...
@@ -1990,7 +2069,6 @@ ssize_t generic_file_write_nolock(struct file *file, const char *buf,
unsigned
long
index
;
unsigned
long
offset
;
long
page_fault
;
char
*
kaddr
;
offset
=
(
pos
&
(
PAGE_CACHE_SIZE
-
1
));
/* Within page */
index
=
pos
>>
PAGE_CACHE_SHIFT
;
...
...
@@ -2004,10 +2082,7 @@ ssize_t generic_file_write_nolock(struct file *file, const char *buf,
* same page as we're writing to, without it being marked
* up-to-date.
*/
{
volatile
unsigned
char
dummy
;
__get_user
(
dummy
,
buf
);
__get_user
(
dummy
,
buf
+
bytes
-
1
);
}
fault_in_pages_readable
(
buf
,
bytes
);
page
=
__grab_cache_page
(
mapping
,
index
,
&
cached_page
,
&
lru_pvec
);
if
(
!
page
)
{
...
...
@@ -2015,22 +2090,19 @@ ssize_t generic_file_write_nolock(struct file *file, const char *buf,
break
;
}
kaddr
=
kmap
(
page
);
status
=
a_ops
->
prepare_write
(
file
,
page
,
offset
,
offset
+
bytes
);
if
(
unlikely
(
status
))
{
/*
* prepare_write() may have instantiated a few blocks
* outside i_size. Trim these off again.
*/
kunmap
(
page
);
unlock_page
(
page
);
page_cache_release
(
page
);
if
(
pos
+
bytes
>
inode
->
i_size
)
vmtruncate
(
inode
,
inode
->
i_size
);
break
;
}
page_fault
=
__copy_from_user
(
kaddr
+
offset
,
buf
,
bytes
);
flush_dcache_page
(
page
);
page_fault
=
filemap_copy_from_user
(
page
,
offset
,
buf
,
bytes
);
status
=
a_ops
->
commit_write
(
file
,
page
,
offset
,
offset
+
bytes
);
if
(
unlikely
(
page_fault
))
{
status
=
-
EFAULT
;
...
...
@@ -2045,7 +2117,6 @@ ssize_t generic_file_write_nolock(struct file *file, const char *buf,
buf
+=
status
;
}
}
kunmap
(
page
);
if
(
!
PageReferenced
(
page
))
SetPageReferenced
(
page
);
unlock_page
(
page
);
...
...
mm/page_alloc.c
View file @
0e1e2d82
...
...
@@ -782,10 +782,9 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
const
unsigned
long
zone_required_alignment
=
1UL
<<
(
MAX_ORDER
-
1
);
totalpages
=
0
;
for
(
i
=
0
;
i
<
MAX_NR_ZONES
;
i
++
)
{
unsigned
long
size
=
zones_size
[
i
];
totalpages
+=
size
;
}
for
(
i
=
0
;
i
<
MAX_NR_ZONES
;
i
++
)
totalpages
+=
zones_size
[
i
];
realtotalpages
=
totalpages
;
if
(
zholes_size
)
for
(
i
=
0
;
i
<
MAX_NR_ZONES
;
i
++
)
...
...
@@ -823,7 +822,7 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
if
(
zholes_size
)
realsize
-=
zholes_size
[
j
];
printk
(
"
zone(%lu): %lu pages.
\n
"
,
j
,
size
);
printk
(
"
%s zone: %lu pages
\n
"
,
zone_names
[
j
],
real
size
);
zone
->
size
=
size
;
zone
->
name
=
zone_names
[
j
];
spin_lock_init
(
&
zone
->
lock
);
...
...
mm/vmscan.c
View file @
0e1e2d82
...
...
@@ -483,7 +483,7 @@ shrink_zone(struct zone *zone, int priority,
ratio
=
(
unsigned
long
)
nr_pages
*
zone
->
nr_active
/
((
zone
->
nr_inactive
|
1
)
*
2
);
atomic_add
(
ratio
+
1
,
&
zone
->
refill_counter
);
if
(
atomic_read
(
&
zone
->
refill_counter
)
>
SWAP_CLUSTER_MAX
)
{
while
(
atomic_read
(
&
zone
->
refill_counter
)
>
SWAP_CLUSTER_MAX
)
{
atomic_sub
(
SWAP_CLUSTER_MAX
,
&
zone
->
refill_counter
);
refill_inactive_zone
(
zone
,
SWAP_CLUSTER_MAX
);
}
...
...
@@ -517,7 +517,7 @@ shrink_caches(struct zone *classzone, int priority,
first_classzone
=
classzone
->
zone_pgdat
->
node_zones
;
zone
=
classzone
;
while
(
zone
>=
first_classzone
)
{
while
(
zone
>=
first_classzone
&&
nr_pages
>
0
)
{
if
(
zone
->
free_pages
<=
zone
->
pages_high
)
{
nr_pages
=
shrink_zone
(
zone
,
priority
,
gfp_mask
,
nr_pages
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment