Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
bd91ad93
Commit
bd91ad93
authored
Jun 13, 2003
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
bk://linux-dj.bkbits.net/agpgart
into home.transmeta.com:/home/torvalds/v2.5/linux
parents
40588f73
5ba15264
Changes
67
Hide whitespace changes
Inline
Side-by-side
Showing
67 changed files
with
888 additions
and
896 deletions
+888
-896
arch/alpha/vmlinux.lds.S
arch/alpha/vmlinux.lds.S
+3
-0
arch/arm/vmlinux-armo.lds.in
arch/arm/vmlinux-armo.lds.in
+1
-0
arch/arm/vmlinux-armv.lds.in
arch/arm/vmlinux-armv.lds.in
+1
-0
arch/cris/vmlinux.lds.S
arch/cris/vmlinux.lds.S
+6
-1
arch/h8300/platform/h8300h/generic/rom.ld
arch/h8300/platform/h8300h/generic/rom.ld
+1
-0
arch/i386/vmlinux.lds.S
arch/i386/vmlinux.lds.S
+1
-0
arch/ia64/vmlinux.lds.S
arch/ia64/vmlinux.lds.S
+4
-0
arch/m68k/vmlinux-std.lds
arch/m68k/vmlinux-std.lds
+1
-0
arch/m68k/vmlinux-sun3.lds
arch/m68k/vmlinux-sun3.lds
+1
-0
arch/m68knommu/vmlinux.lds.S
arch/m68knommu/vmlinux.lds.S
+1
-3
arch/mips/vmlinux.lds.S
arch/mips/vmlinux.lds.S
+1
-0
arch/mips64/vmlinux.lds.S
arch/mips64/vmlinux.lds.S
+1
-0
arch/parisc/vmlinux.lds.S
arch/parisc/vmlinux.lds.S
+1
-0
arch/ppc/vmlinux.lds.S
arch/ppc/vmlinux.lds.S
+2
-0
arch/ppc64/vmlinux.lds.S
arch/ppc64/vmlinux.lds.S
+1
-0
arch/s390/vmlinux.lds.S
arch/s390/vmlinux.lds.S
+1
-0
arch/sh/vmlinux.lds.S
arch/sh/vmlinux.lds.S
+1
-0
arch/sparc/vmlinux.lds.S
arch/sparc/vmlinux.lds.S
+1
-0
arch/sparc64/vmlinux.lds.S
arch/sparc64/vmlinux.lds.S
+1
-0
arch/x86_64/Kconfig
arch/x86_64/Kconfig
+12
-0
arch/x86_64/ia32/ia32entry.S
arch/x86_64/ia32/ia32entry.S
+1
-0
arch/x86_64/kernel/acpi/boot.c
arch/x86_64/kernel/acpi/boot.c
+4
-2
arch/x86_64/kernel/apic.c
arch/x86_64/kernel/apic.c
+7
-1
arch/x86_64/kernel/bluesmoke.c
arch/x86_64/kernel/bluesmoke.c
+2
-12
arch/x86_64/kernel/pci-gart.c
arch/x86_64/kernel/pci-gart.c
+2
-13
arch/x86_64/kernel/process.c
arch/x86_64/kernel/process.c
+5
-1
arch/x86_64/kernel/smpboot.c
arch/x86_64/kernel/smpboot.c
+8
-3
arch/x86_64/kernel/time.c
arch/x86_64/kernel/time.c
+224
-34
arch/x86_64/kernel/traps.c
arch/x86_64/kernel/traps.c
+1
-1
arch/x86_64/kernel/vsyscall.c
arch/x86_64/kernel/vsyscall.c
+13
-5
arch/x86_64/lib/clear_page.S
arch/x86_64/lib/clear_page.S
+11
-21
arch/x86_64/lib/copy_page.S
arch/x86_64/lib/copy_page.S
+83
-66
arch/x86_64/lib/csum-copy.S
arch/x86_64/lib/csum-copy.S
+134
-160
arch/x86_64/lib/csum-partial.c
arch/x86_64/lib/csum-partial.c
+60
-40
arch/x86_64/lib/csum-wrappers.c
arch/x86_64/lib/csum-wrappers.c
+40
-22
arch/x86_64/lib/memcpy.S
arch/x86_64/lib/memcpy.S
+35
-60
arch/x86_64/lib/memset.S
arch/x86_64/lib/memset.S
+30
-27
arch/x86_64/vmlinux.lds.S
arch/x86_64/vmlinux.lds.S
+4
-3
drivers/char/rocket.c
drivers/char/rocket.c
+36
-251
drivers/char/rocket.h
drivers/char/rocket.h
+2
-22
drivers/char/rocket_int.h
drivers/char/rocket_int.h
+2
-44
drivers/ieee1394/dv1394.c
drivers/ieee1394/dv1394.c
+3
-4
drivers/ieee1394/eth1394.c
drivers/ieee1394/eth1394.c
+7
-1
drivers/ieee1394/ohci1394.c
drivers/ieee1394/ohci1394.c
+1
-1
drivers/pci/pci.ids
drivers/pci/pci.ids
+2
-0
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_scan.c
+2
-2
fs/fs-writeback.c
fs/fs-writeback.c
+14
-1
fs/namei.c
fs/namei.c
+1
-1
include/asm-generic/vmlinux.lds.h
include/asm-generic/vmlinux.lds.h
+6
-0
include/asm-i386/fixmap.h
include/asm-i386/fixmap.h
+8
-0
include/asm-x86_64/checksum.h
include/asm-x86_64/checksum.h
+11
-1
include/asm-x86_64/fixmap.h
include/asm-x86_64/fixmap.h
+2
-0
include/asm-x86_64/mc146818rtc.h
include/asm-x86_64/mc146818rtc.h
+5
-0
include/asm-x86_64/processor.h
include/asm-x86_64/processor.h
+1
-1
include/asm-x86_64/proto.h
include/asm-x86_64/proto.h
+2
-0
include/asm-x86_64/timex.h
include/asm-x86_64/timex.h
+29
-1
include/asm-x86_64/vsyscall.h
include/asm-x86_64/vsyscall.h
+10
-8
include/linux/init.h
include/linux/init.h
+6
-0
include/linux/pci_ids.h
include/linux/pci_ids.h
+2
-0
include/linux/security.h
include/linux/security.h
+0
-39
init/main.c
init/main.c
+1
-1
kernel/sys.c
kernel/sys.c
+7
-13
mm/memory.c
mm/memory.c
+8
-6
security/capability.c
security/capability.c
+1
-9
security/dummy.c
security/dummy.c
+0
-12
security/root_plug.c
security/root_plug.c
+1
-2
security/security.c
security/security.c
+12
-1
No files found.
arch/alpha/vmlinux.lds.S
View file @
bd91ad93
...
...
@@ -74,6 +74,9 @@ SECTIONS
__con_initcall_end
=
.
;
}
.
=
ALIGN
(
8
)
;
SECURITY_INIT
.
=
ALIGN
(
64
)
;
__per_cpu_start
=
.
;
.
data.percpu
:
{
*(
.
data
.
percpu
)
}
...
...
arch/arm/vmlinux-armo.lds.in
View file @
bd91ad93
...
...
@@ -43,6 +43,7 @@ SECTIONS
__con_initcall_start = .;
*(.con_initcall.init)
__con_initcall_end = .;
SECURITY_INIT
. = ALIGN(32768);
__init_end = .;
}
...
...
arch/arm/vmlinux-armv.lds.in
View file @
bd91ad93
...
...
@@ -53,6 +53,7 @@ SECTIONS
__con_initcall_start = .;
*(.con_initcall.init)
__con_initcall_end = .;
SECURITY_INIT
. = ALIGN(32);
__initramfs_start = .;
usr/built-in.o(.init.ramfs)
...
...
arch/cris/vmlinux.lds.S
View file @
bd91ad93
...
...
@@ -74,7 +74,12 @@ SECTIONS
__con_initcall_start
=
.
;
*(.
con_initcall
.
init
)
__con_initcall_end
=
.
;
}
.
security_initcall
.
init
:
{
__security_initcall_start
=
.
;
*(.
security_initcall
.
init
)
__security_initcall_end
=
.
;
/
*
We
fill
to
the
next
page
,
so
we
can
discard
all
init
pages
without
needing
to
consider
what
payload
might
be
appended
to
the
kernel
image
.
*/
...
...
arch/h8300/platform/h8300h/generic/rom.ld
View file @
bd91ad93
...
...
@@ -83,6 +83,7 @@ SECTIONS
___con_initcall_start = .;
*(.con_initcall.init)
___con_initcall_end = .;
SECURITY_INIT
. = ALIGN(4);
___initramfs_start = .;
*(.init.ramfs)
...
...
arch/i386/vmlinux.lds.S
View file @
bd91ad93
...
...
@@ -81,6 +81,7 @@ SECTIONS
__con_initcall_start
=
.
;
.
con_initcall
.
init
:
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
SECURITY_INIT
.
=
ALIGN
(
4
)
;
__alt_instructions
=
.
;
.
altinstructions
:
{
*(
.
altinstructions
)
}
...
...
arch/ia64/vmlinux.lds.S
View file @
bd91ad93
...
...
@@ -141,6 +141,10 @@ SECTIONS
.
con_initcall
.
init
:
AT
(
ADDR
(
.
con_initcall
.
init
)
-
PAGE_OFFSET
)
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
__security_initcall_start
=
.
;
.
security_initcall
.
init
:
AT
(
ADDR
(
.
security_initcall
.
init
)
-
PAGE_OFFSET
)
{
*(
.
security_initcall
.
init
)
}
__security_initcall_end
=
.
;
.
=
ALIGN
(
PAGE_SIZE
)
;
__init_end
=
.
;
...
...
arch/m68k/vmlinux-std.lds
View file @
bd91ad93
...
...
@@ -67,6 +67,7 @@ SECTIONS
__con_initcall_start = .;
.con_initcall.init : { *(.con_initcall.init) }
__con_initcall_end = .;
SECURITY_INIT
. = ALIGN(8192);
__initramfs_start = .;
.init.ramfs : { *(.init.ramfs) }
...
...
arch/m68k/vmlinux-sun3.lds
View file @
bd91ad93
...
...
@@ -61,6 +61,7 @@ __init_begin = .;
__con_initcall_start = .;
.con_initcall.init : { *(.con_initcall.init) }
__con_initcall_end = .;
SECURITY_INIT
. = ALIGN(8192);
__initramfs_start = .;
.init.ramfs : { *(.init.ramfs) }
...
...
arch/m68knommu/vmlinux.lds.S
View file @
bd91ad93
...
...
@@ -277,9 +277,7 @@ SECTIONS {
__con_initcall_start
=
.
;
*(.
con_initcall
.
init
)
__con_initcall_end
=
.
;
__security_initcall_start
=
.
;
*(.
security_initcall
.
init
)
__security_initcall_end
=
.
;
SECURITY_INIT
.
=
ALIGN
(
4
)
;
__initramfs_start
=
.
;
*(.
init.ramfs
)
...
...
arch/mips/vmlinux.lds.S
View file @
bd91ad93
...
...
@@ -54,6 +54,7 @@ SECTIONS
__con_initcall_start
=
.
;
.
con_initcall
.
init
:
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
SECURITY_INIT
.
=
ALIGN
(
4096
)
; /* Align double page for init_task_union */
__init_end
=
.
;
...
...
arch/mips64/vmlinux.lds.S
View file @
bd91ad93
...
...
@@ -53,6 +53,7 @@ SECTIONS
__con_initcall_start
=
.
;
.
con_initcall
.
init
:
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
SECURITY_INIT
.
=
ALIGN
(
4096
)
; /* Align double page for init_task_union */
__init_end
=
.
;
...
...
arch/parisc/vmlinux.lds.S
View file @
bd91ad93
...
...
@@ -80,6 +80,7 @@ SECTIONS
__con_initcall_start
=
.
;
.
con_initcall
.
init
:
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
SECURITY_INIT
.
=
ALIGN
(
4096
)
;
__initramfs_start
=
.
;
.
init.ramfs
:
{
*(
.
init
.
ramfs
)
}
...
...
arch/ppc/vmlinux.lds.S
View file @
bd91ad93
...
...
@@ -119,6 +119,8 @@ SECTIONS
.
con_initcall
.
init
:
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
SECURITY_INIT
__start___ftr_fixup
=
.
;
__ftr_fixup
:
{
*(
__ftr_fixup
)
}
__stop___ftr_fixup
=
.
;
...
...
arch/ppc64/vmlinux.lds.S
View file @
bd91ad93
...
...
@@ -104,6 +104,7 @@ SECTIONS
__con_initcall_start
=
.
;
.
con_initcall
.
init
:
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
SECURITY_INIT
.
=
ALIGN
(
4096
)
;
__initramfs_start
=
.
;
.
init.ramfs
:
{
*(
.
init
.
ramfs
)
}
...
...
arch/s390/vmlinux.lds.S
View file @
bd91ad93
...
...
@@ -94,6 +94,7 @@ SECTIONS
__con_initcall_start
=
.
;
.
con_initcall
.
init
:
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
SECURITY_INIT
.
=
ALIGN
(
256
)
;
__initramfs_start
=
.
;
.
init.ramfs
:
{
*(
.
init
.
initramfs
)
}
...
...
arch/sh/vmlinux.lds.S
View file @
bd91ad93
...
...
@@ -71,6 +71,7 @@ SECTIONS
__con_initcall_start
=
.
;
.
con_initcall
.
init
:
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
SECURITY_INIT
__machvec_start
=
.
;
.
machvec.init
:
{
*(
.
machvec
.
init
)
}
__machvec_end
=
.
;
...
...
arch/sparc/vmlinux.lds.S
View file @
bd91ad93
...
...
@@ -62,6 +62,7 @@ SECTIONS
__con_initcall_start
=
.
;
.
con_initcall
.
init
:
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
SECURITY_INIT
.
=
ALIGN
(
4096
)
;
__initramfs_start
=
.
;
.
init.ramfs
:
{
*(
.
init
.
ramfs
)
}
...
...
arch/sparc64/vmlinux.lds.S
View file @
bd91ad93
...
...
@@ -68,6 +68,7 @@ SECTIONS
__con_initcall_start
=
.
;
.
con_initcall
.
init
:
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
SECURITY_INIT
.
=
ALIGN
(
8192
)
;
__initramfs_start
=
.
;
.
init.ramfs
:
{
*(
.
init
.
ramfs
)
}
...
...
arch/x86_64/Kconfig
View file @
bd91ad93
...
...
@@ -52,6 +52,18 @@ config EARLY_PRINTK
klogd/syslogd or the X server. You should normally N here, unless
you want to debug such a crash.
config HPET_TIMER
bool
default y
help
Use the IA-PC HPET (High Precision Event Timer) to manage
time in preference to the PIT and RTC, if a HPET is
present. The HPET provides a stable time base on SMP
systems, unlike the RTC, but it is more expensive to access,
as it is off-chip. You can find the HPET spec at
<http://www.intel.com/labs/platcomp/hpet/hpetspec.htm>.
If unsure, say Y.
config GENERIC_ISA_DMA
bool
...
...
arch/x86_64/ia32/ia32entry.S
View file @
bd91ad93
...
...
@@ -47,6 +47,7 @@
ENTRY
(
ia32_cstar_target
)
swapgs
movl
%
esp
,%
r8d
movq
%
r8
,%
gs
:
pda_oldrsp
movq
%
gs
:
pda_kernelstack
,%
rsp
sti
SAVE_ARGS
8
,
1
...
...
arch/x86_64/kernel/acpi/boot.c
View file @
bd91ad93
...
...
@@ -244,9 +244,11 @@ acpi_parse_hpet (
return
-
1
;
}
hpet
.
address
=
hpet_tbl
->
addr
.
addrl
|
((
long
)
hpet_tbl
->
addr
.
addrh
<<
32
);
vxtime
.
hpet_address
=
hpet_tbl
->
addr
.
addrl
|
((
long
)
hpet_tbl
->
addr
.
addrh
<<
32
);
printk
(
KERN_INFO
"acpi: HPET id: %#x base: %#lx
\n
"
,
hpet_tbl
->
id
,
hpet
.
address
);
printk
(
KERN_INFO
"acpi: HPET id: %#x base: %#lx
\n
"
,
hpet_tbl
->
id
,
vxtime
.
hpet_address
);
return
0
;
}
...
...
arch/x86_64/kernel/apic.c
View file @
bd91ad93
...
...
@@ -690,7 +690,13 @@ static void setup_APIC_timer(unsigned int clocks)
}
/* wait for irq slice */
{
if
(
vxtime
.
hpet_address
)
{
int
trigger
=
hpet_readl
(
HPET_T0_CMP
);
while
(
hpet_readl
(
HPET_COUNTER
)
>=
trigger
)
/* do nothing */
;
while
(
hpet_readl
(
HPET_COUNTER
)
<
trigger
)
/* do nothing */
;
}
else
{
int
c1
,
c2
;
outb_p
(
0x00
,
0x43
);
c2
=
inb_p
(
0x40
);
...
...
arch/x86_64/kernel/bluesmoke.c
View file @
bd91ad93
...
...
@@ -363,22 +363,12 @@ static void __init k8_mcheck_init(struct cpuinfo_x86 *c)
machine_check_vector
=
k8_machine_check
;
for
(
i
=
0
;
i
<
banks
;
i
++
)
{
u64
val
=
((
1UL
<<
i
)
&
disabled_banks
)
?
0
:
~
0UL
;
if
(
val
&&
i
==
4
)
val
=
k8_nb_flags
;
wrmsrl
(
MSR_IA32_MC0_CTL
+
4
*
i
,
val
);
wrmsrl
(
MSR_IA32_MC0_STATUS
+
4
*
i
,
0
);
}
nb
=
find_k8_nb
();
if
(
nb
!=
NULL
)
{
u32
reg
,
reg2
;
pci_read_config_dword
(
nb
,
0x40
,
&
reg
);
pci_write_config_dword
(
nb
,
0x40
,
k8_nb_flags
);
pci_read_config_dword
(
nb
,
0x44
,
&
reg2
);
pci_write_config_dword
(
nb
,
0x44
,
reg2
);
printk
(
KERN_INFO
"Machine Check for K8 Northbridge %d enabled (%x,%x)
\n
"
,
nb
->
devfn
,
reg
,
reg2
);
ignored_banks
|=
(
1UL
<<
4
);
}
set_in_cr4
(
X86_CR4_MCE
);
if
(
mcheck_interval
&&
(
smp_processor_id
()
==
0
))
{
...
...
arch/x86_64/kernel/pci-gart.c
View file @
bd91ad93
...
...
@@ -173,12 +173,10 @@ void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
if
(
iommu_page
==
-
1
)
goto
error
;
/* Fill in the GATT
, allocating pages as needed.
*/
/* Fill in the GATT */
for
(
i
=
0
;
i
<
size
;
i
++
)
{
unsigned
long
phys_mem
;
void
*
mem
=
memory
+
i
*
PAGE_SIZE
;
if
(
i
>
0
)
atomic_inc
(
&
virt_to_page
(
mem
)
->
count
);
phys_mem
=
virt_to_phys
(
mem
);
BUG_ON
(
phys_mem
&
~
PHYSICAL_PAGE_MASK
);
iommu_gatt_base
[
iommu_page
+
i
]
=
GPTE_ENCODE
(
phys_mem
);
...
...
@@ -206,16 +204,14 @@ void pci_free_consistent(struct pci_dev *hwdev, size_t size,
size
=
round_up
(
size
,
PAGE_SIZE
);
if
(
bus
>=
iommu_bus_base
&&
bus
<=
iommu_bus_base
+
iommu_size
)
{
unsigned
pages
=
size
>>
PAGE_SHIFT
;
int
i
;
iommu_page
=
(
bus
-
iommu_bus_base
)
>>
PAGE_SHIFT
;
vaddr
=
__va
(
GPTE_DECODE
(
iommu_gatt_base
[
iommu_page
]));
#ifdef CONFIG_IOMMU_DEBUG
int
i
;
for
(
i
=
0
;
i
<
pages
;
i
++
)
{
u64
pte
=
iommu_gatt_base
[
iommu_page
+
i
];
BUG_ON
((
pte
&
GPTE_VALID
)
==
0
);
iommu_gatt_base
[
iommu_page
+
i
]
=
0
;
}
#endif
free_iommu
(
iommu_page
,
pages
);
}
free_pages
((
unsigned
long
)
vaddr
,
get_order
(
size
));
...
...
@@ -319,11 +315,6 @@ dma_addr_t pci_map_single(struct pci_dev *dev, void *addr, size_t size, int dir)
*/
iommu_gatt_base
[
iommu_page
+
i
]
=
GPTE_ENCODE
(
phys_mem
);
#ifdef CONFIG_IOMMU_DEBUG
/* paranoia check */
BUG_ON
(
GPTE_DECODE
(
iommu_gatt_base
[
iommu_page
+
i
])
!=
phys_mem
);
#endif
#ifdef CONFIG_IOMMU_LEAK
/* XXX need eventually caller of pci_map_sg */
if
(
iommu_leak_tab
)
...
...
@@ -350,7 +341,6 @@ void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
return
;
iommu_page
=
(
dma_addr
-
iommu_bus_base
)
>>
PAGE_SHIFT
;
npages
=
round_up
(
size
+
(
dma_addr
&
~
PAGE_MASK
),
PAGE_SIZE
)
>>
PAGE_SHIFT
;
#ifdef CONFIG_IOMMU_DEBUG
int
i
;
for
(
i
=
0
;
i
<
npages
;
i
++
)
{
iommu_gatt_base
[
iommu_page
+
i
]
=
0
;
...
...
@@ -359,7 +349,6 @@ void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
iommu_leak_tab
[
iommu_page
+
i
]
=
0
;
#endif
}
#endif
free_iommu
(
iommu_page
,
npages
);
}
...
...
arch/x86_64/kernel/process.c
View file @
bd91ad93
...
...
@@ -150,7 +150,7 @@ __setup("idle=", idle_setup);
/* Prints also some state that isn't saved in the pt_regs */
void
show_regs
(
struct
pt_regs
*
regs
)
void
__
show_regs
(
struct
pt_regs
*
regs
)
{
unsigned
long
cr0
=
0L
,
cr2
=
0L
,
cr3
=
0L
,
cr4
=
0L
,
fs
,
gs
,
shadowgs
;
unsigned
int
fsindex
,
gsindex
;
...
...
@@ -192,7 +192,11 @@ void show_regs(struct pt_regs * regs)
fs
,
fsindex
,
gs
,
gsindex
,
shadowgs
);
printk
(
"CS: %04x DS: %04x ES: %04x CR0: %016lx
\n
"
,
cs
,
ds
,
es
,
cr0
);
printk
(
"CR2: %016lx CR3: %016lx CR4: %016lx
\n
"
,
cr2
,
cr3
,
cr4
);
}
void
show_regs
(
struct
pt_regs
*
regs
)
{
__show_regs
(
regs
);
show_trace
(
&
regs
->
rsp
);
}
...
...
arch/x86_64/kernel/smpboot.c
View file @
bd91ad93
...
...
@@ -67,6 +67,8 @@ struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
/* Set when the idlers are all forked */
int
smp_threads_ready
;
extern
void
time_init_smp
(
void
);
/*
* Trampoline 80x86 program as an array.
*/
...
...
@@ -760,7 +762,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
if
(
APIC_init_uniprocessor
())
printk
(
KERN_NOTICE
"Local APIC not detected."
" Using dummy APIC emulation.
\n
"
);
return
;
goto
smp_done
;
}
/*
...
...
@@ -784,7 +786,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
cpu_online_map
=
phys_cpu_present_map
=
1
;
phys_cpu_present_map
=
1
;
disable_apic
=
1
;
return
;
goto
smp_done
;
}
verify_local_APIC
();
...
...
@@ -799,7 +801,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
cpu_online_map
=
phys_cpu_present_map
=
1
;
phys_cpu_present_map
=
1
;
disable_apic
=
1
;
return
;
goto
smp_done
;
}
connect_bsp_APIC
();
...
...
@@ -883,6 +885,9 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
*/
if
(
cpu_has_tsc
&&
cpucount
)
synchronize_tsc_bp
();
smp_done:
time_init_smp
();
}
/* These are wrappers to interface to the new boot process. Someone
...
...
arch/x86_64/kernel/time.c
View file @
bd91ad93
...
...
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/bcd.h>
#include <asm/pgtable.h>
#include <asm/vsyscall.h>
#include <asm/timex.h>
#ifdef CONFIG_X86_LOCAL_APIC
...
...
@@ -35,41 +36,61 @@ u64 jiffies_64 = INITIAL_JIFFIES;
extern
int
using_apic_timer
;
spinlock_t
rtc_lock
=
SPIN_LOCK_UNLOCKED
;
spinlock_t
i8253_lock
=
SPIN_LOCK_UNLOCKED
;
extern
int
using_apic_timer
;
extern
void
smp_local_timer_interrupt
(
struct
pt_regs
*
regs
);
#undef HPET_HACK_ENABLE_DANGEROUS
unsigned
int
cpu_khz
;
/* TSC clocks / usec, not used here */
unsigned
long
hpet_period
;
/* fsecs / HPET clock */
unsigned
long
hpet_tick
;
/* HPET clocks / interrupt */
int
hpet_report_lost_ticks
;
/* command line option */
unsigned
long
vxtime_hz
=
1193182
;
int
report_lost_ticks
;
/* command line option */
struct
hpet_data
__hpet
__section_hpet
;
/* address, quotient, trigger, hz
*/
struct
vxtime_data
__vxtime
__section_vxtime
;
/* for vsyscalls
*/
volatile
unsigned
long
__jiffies
__section_jiffies
=
INITIAL_JIFFIES
;
unsigned
long
__wall_jiffies
__section_wall_jiffies
=
INITIAL_JIFFIES
;
struct
timespec
__xtime
__section_xtime
;
struct
timezone
__sys_tz
__section_sys_tz
;
static
inline
void
rdtscll_sync
(
unsigned
long
*
tsc
)
{
#ifdef CONFIG_SMP
sync_core
();
#endif
rdtscll
(
*
tsc
);
}
/*
* do_gettimeoffset() returns microseconds since last timer interrupt was
* triggered by hardware. A memory read of HPET is slower than a register read
* of TSC, but much more reliable. It's also synchronized to the timer
* interrupt. Note that do_gettimeoffset() may return more than hpet_tick, if a
* timer interrupt has happened already, but
hpet
.trigger wasn't updated yet.
* timer interrupt has happened already, but
vxtime
.trigger wasn't updated yet.
* This is not a problem, because jiffies hasn't updated either. They are bound
* together by xtime_lock.
*/
inline
unsigned
int
do_gettimeoffset
(
void
)
static
inline
unsigned
int
do_gettimeoffset_tsc
(
void
)
{
unsigned
long
t
;
sync_core
();
rdtscll
(
t
);
return
(
t
-
hpet
.
last_tsc
)
*
(
1000000L
/
HZ
)
/
hpet
.
ticks
+
hpet
.
offset
;
unsigned
long
x
;
rdtscll_sync
(
&
t
);
x
=
((
t
-
vxtime
.
last_tsc
)
*
vxtime
.
tsc_quot
)
>>
32
;
return
x
;
}
static
inline
unsigned
int
do_gettimeoffset_hpet
(
void
)
{
return
((
hpet_readl
(
HPET_COUNTER
)
-
vxtime
.
last
)
*
vxtime
.
quot
)
>>
32
;
}
unsigned
int
(
*
do_gettimeoffset
)(
void
)
=
do_gettimeoffset_tsc
;
/*
* This version of gettimeofday() has microsecond resolution and better than
* microsecond precision, as we're using at least a 10 MHz (usually 14.31818
...
...
@@ -87,7 +108,8 @@ void do_gettimeofday(struct timeval *tv)
sec
=
xtime
.
tv_sec
;
usec
=
xtime
.
tv_nsec
/
1000
;
t
=
(
jiffies
-
wall_jiffies
)
*
(
1000000L
/
HZ
)
+
do_gettimeoffset
();
t
=
(
jiffies
-
wall_jiffies
)
*
(
1000000L
/
HZ
)
+
do_gettimeoffset
();
usec
+=
t
;
}
while
(
read_seqretry
(
&
xtime_lock
,
seq
));
...
...
@@ -107,7 +129,7 @@ void do_settimeofday(struct timeval *tv)
write_seqlock_irq
(
&
xtime_lock
);
tv
->
tv_usec
-=
do_gettimeoffset
()
+
(
jiffies
-
wall_jiffies
)
*
tick_usec
;
(
jiffies
-
wall_jiffies
)
*
(
USEC_PER_SEC
/
HZ
)
;
while
(
tv
->
tv_usec
<
0
)
{
tv
->
tv_usec
+=
1000000
;
...
...
@@ -178,8 +200,8 @@ static void set_rtc_mmss(unsigned long nowtime)
CMOS_WRITE
(
real_seconds
,
RTC_SECONDS
);
CMOS_WRITE
(
real_minutes
,
RTC_MINUTES
);
}
else
printk
(
KERN_WARNING
"time.c: can't update CMOS clock
from %d to %d
\n
"
,
cmos_minutes
,
real_minutes
);
printk
(
KERN_WARNING
"time.c: can't update CMOS clock
"
"from %d to %d
\n
"
,
cmos_minutes
,
real_minutes
);
/*
* The following flags have to be released exactly in this order, otherwise the
...
...
@@ -198,6 +220,8 @@ static void set_rtc_mmss(unsigned long nowtime)
static
irqreturn_t
timer_interrupt
(
int
irq
,
void
*
dev_id
,
struct
pt_regs
*
regs
)
{
static
unsigned
long
rtc_update
=
0
;
unsigned
long
tsc
,
lost
=
0
;
int
delay
,
offset
=
0
;
/*
* Here we are in the timer irq handler. We have irqs locally disabled (so we
...
...
@@ -208,17 +232,53 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
write_seqlock
(
&
xtime_lock
);
{
unsigned
long
t
;
if
(
vxtime
.
hpet_address
)
{
offset
=
hpet_readl
(
HPET_T0_CMP
)
-
hpet_tick
;
delay
=
hpet_readl
(
HPET_COUNTER
)
-
offset
;
}
else
{
spin_lock
(
&
i8253_lock
);
outb_p
(
0x00
,
0x43
);
delay
=
inb_p
(
0x40
);
delay
|=
inb
(
0x40
)
<<
8
;
spin_unlock
(
&
i8253_lock
);
delay
=
LATCH
-
1
-
delay
;
}
sync_core
();
rdtscll
(
t
);
hpet
.
offset
=
(
t
-
hpet
.
last_tsc
)
*
(
1000000L
/
HZ
)
/
hpet
.
ticks
+
hpet
.
offset
-
1000000L
/
HZ
;
if
(
hpet
.
offset
>=
1000000L
/
HZ
)
hpet
.
offset
=
0
;
hpet
.
ticks
=
min_t
(
long
,
max_t
(
long
,
(
t
-
hpet
.
last_tsc
)
*
(
1000000L
/
HZ
)
/
(
1000000L
/
HZ
-
hpet
.
offset
),
cpu_khz
*
1000
/
HZ
*
15
/
16
),
cpu_khz
*
1000
/
HZ
*
16
/
15
);
hpet
.
last_tsc
=
t
;
rdtscll_sync
(
&
tsc
);
if
(
vxtime
.
mode
==
VXTIME_HPET
)
{
if
(
offset
-
vxtime
.
last
>
hpet_tick
)
{
lost
=
(
offset
-
vxtime
.
last
)
/
hpet_tick
-
1
;
}
vxtime
.
last
=
offset
;
}
else
{
offset
=
(((
tsc
-
vxtime
.
last_tsc
)
*
vxtime
.
tsc_quot
)
>>
32
)
-
(
USEC_PER_SEC
/
HZ
);
if
(
offset
<
0
)
offset
=
0
;
if
(
offset
>
(
USEC_PER_SEC
/
HZ
))
{
lost
=
offset
/
(
USEC_PER_SEC
/
HZ
);
offset
%=
(
USEC_PER_SEC
/
HZ
);
}
vxtime
.
last_tsc
=
tsc
-
vxtime
.
quot
*
delay
/
vxtime
.
tsc_quot
;
if
((((
tsc
-
vxtime
.
last_tsc
)
*
vxtime
.
tsc_quot
)
>>
32
)
<
offset
)
vxtime
.
last_tsc
=
tsc
-
(((
long
)
offset
<<
32
)
/
vxtime
.
tsc_quot
)
-
1
;
}
if
(
lost
)
{
if
(
report_lost_ticks
)
printk
(
KERN_WARNING
"time.c: Lost %ld timer "
"tick(s)! (rip %016lx)
\n
"
,
(
offset
-
vxtime
.
last
)
/
hpet_tick
-
1
,
regs
->
rip
);
jiffies
+=
lost
;
}
/*
...
...
@@ -244,7 +304,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
* If we have an externally synchronized Linux clock, then update CMOS clock
* accordingly every ~11 minutes. set_rtc_mmss() will be called in the jiffy
* closest to exactly 500 ms before the next second. If the update fails, we
* don'tcare, as it'll be updated on the next turn, and the problem (time way
* don't
care, as it'll be updated on the next turn, and the problem (time way
* off) isn't likely to go away much sooner anyway.
*/
...
...
@@ -263,6 +323,7 @@ unsigned long get_cmos_time(void)
{
unsigned
int
timeout
,
year
,
mon
,
day
,
hour
,
min
,
sec
;
unsigned
char
last
,
this
;
unsigned
long
flags
;
/*
* The Linux interpretation of the CMOS clock register contents: When the
...
...
@@ -272,7 +333,7 @@ unsigned long get_cmos_time(void)
* standard 8.3 MHz ISA bus.
*/
spin_lock
(
&
rtc_lock
);
spin_lock
_irqsave
(
&
rtc_lock
,
flags
);
timeout
=
1000000
;
last
=
this
=
0
;
...
...
@@ -295,7 +356,7 @@ unsigned long get_cmos_time(void)
mon
=
CMOS_READ
(
RTC_MONTH
);
year
=
CMOS_READ
(
RTC_YEAR
);
spin_unlock
(
&
rtc_lock
);
spin_unlock
_irqrestore
(
&
rtc_lock
,
flags
);
/*
* We know that x86-64 always uses BCD format, no need to check the config
...
...
@@ -326,6 +387,32 @@ unsigned long get_cmos_time(void)
#define TICK_COUNT 100000000
static
unsigned
int
__init
hpet_calibrate_tsc
(
void
)
{
int
tsc_start
,
hpet_start
;
int
tsc_now
,
hpet_now
;
unsigned
long
flags
;
local_irq_save
(
flags
);
local_irq_disable
();
hpet_start
=
hpet_readl
(
HPET_COUNTER
);
rdtscl
(
tsc_start
);
do
{
local_irq_disable
();
hpet_now
=
hpet_readl
(
HPET_COUNTER
);
sync_core
();
rdtscl
(
tsc_now
);
local_irq_restore
(
flags
);
}
while
((
tsc_now
-
tsc_start
)
<
TICK_COUNT
&&
(
hpet_now
-
hpet_start
)
<
TICK_COUNT
);
return
(
tsc_now
-
tsc_start
)
*
1000000000L
/
((
hpet_now
-
hpet_start
)
*
hpet_period
/
1000
);
}
/*
* pit_calibrate_tsc() uses the speaker output (channel 2) of
* the PIT. This is better than using the timer interrupt output,
...
...
@@ -339,10 +426,9 @@ static unsigned int __init pit_calibrate_tsc(void)
unsigned
long
start
,
end
;
unsigned
long
flags
;
outb
((
inb
(
0x61
)
&
~
0x02
)
|
0x01
,
0x61
);
spin_lock_irqsave
(
&
i8253_lock
,
flags
);
local_irq_save
(
flags
);
local_irq_disable
();
outb
((
inb
(
0x61
)
&
~
0x02
)
|
0x01
,
0x61
);
outb
(
0xb0
,
0x43
);
outb
((
1193182
/
(
1000
/
50
))
&
0xff
,
0x42
);
...
...
@@ -353,42 +439,146 @@ static unsigned int __init pit_calibrate_tsc(void)
sync_core
();
rdtscll
(
end
);
local_irq_restore
(
flags
);
spin_unlock_irqrestore
(
&
i8253_lock
,
flags
);
return
(
end
-
start
)
/
50
;
}
static
int
hpet_init
(
void
)
{
unsigned
int
cfg
,
id
;
if
(
!
vxtime
.
hpet_address
)
return
-
1
;
set_fixmap_nocache
(
FIX_HPET_BASE
,
vxtime
.
hpet_address
);
/*
* Read the period, compute tick and quotient.
*/
id
=
hpet_readl
(
HPET_ID
);
if
(
!
(
id
&
HPET_ID_VENDOR
)
||
!
(
id
&
HPET_ID_NUMBER
)
||
!
(
id
&
HPET_ID_LEGSUP
))
return
-
1
;
hpet_period
=
hpet_readl
(
HPET_PERIOD
);
if
(
hpet_period
<
100000
||
hpet_period
>
100000000
)
return
-
1
;
hpet_tick
=
(
1000000000L
*
(
USEC_PER_SEC
/
HZ
)
+
hpet_period
/
2
)
/
hpet_period
;
/*
* Stop the timers and reset the main counter.
*/
cfg
=
hpet_readl
(
HPET_CFG
);
cfg
&=
~
(
HPET_CFG_ENABLE
|
HPET_CFG_LEGACY
);
hpet_writel
(
cfg
,
HPET_CFG
);
hpet_writel
(
0
,
HPET_COUNTER
);
hpet_writel
(
0
,
HPET_COUNTER
+
4
);
/*
* Set up timer 0, as periodic with first interrupt to happen at hpet_tick,
* and period also hpet_tick.
*/
hpet_writel
(
HPET_T0_ENABLE
|
HPET_T0_PERIODIC
|
HPET_T0_SETVAL
|
HPET_T0_32BIT
,
HPET_T0_CFG
);
hpet_writel
(
hpet_tick
,
HPET_T0_CMP
);
hpet_writel
(
hpet_tick
,
HPET_T0_CMP
);
/*
* Go!
*/
cfg
|=
HPET_CFG_ENABLE
|
HPET_CFG_LEGACY
;
hpet_writel
(
cfg
,
HPET_CFG
);
return
0
;
}
void
__init
pit_init
(
void
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
i8253_lock
,
flags
);
outb_p
(
0x34
,
0x43
);
/* binary, mode 2, LSB/MSB, ch 0 */
outb_p
(
LATCH
&
0xff
,
0x40
);
/* LSB */
outb_p
(
LATCH
>>
8
,
0x40
);
/* MSB */
spin_unlock_irqrestore
(
&
i8253_lock
,
flags
);
}
int
__init
time_setup
(
char
*
str
)
{
hpet_
report_lost_ticks
=
1
;
report_lost_ticks
=
1
;
return
1
;
}
static
struct
irqaction
irq0
=
{
timer_interrupt
,
SA_INTERRUPT
,
0
,
"timer"
,
NULL
,
NULL
};
static
struct
irqaction
irq0
=
{
timer_interrupt
,
SA_INTERRUPT
,
0
,
"timer"
,
NULL
,
NULL
};
extern
void
__init
config_acpi_tables
(
void
);
void
__init
time_init
(
void
)
{
char
*
timename
;
#ifdef HPET_HACK_ENABLE_DANGEROUS
if
(
!
vxtime
.
hpet_address
)
{
printk
(
KERN_WARNING
"time.c: WARNING: Enabling HPET base "
"manually!
\n
"
);
outl
(
0x800038a0
,
0xcf8
);
outl
(
0xff000001
,
0xcfc
);
outl
(
0x800038a0
,
0xcf8
);
hpet_address
=
inl
(
0xcfc
)
&
0xfffffffe
;
printk
(
KERN_WARNING
"time.c: WARNING: Enabled HPET "
"at %#lx.
\n
"
,
hpet_address
);
}
#endif
xtime
.
tv_sec
=
get_cmos_time
();
xtime
.
tv_nsec
=
0
;
if
(
!
hpet_init
())
{
vxtime_hz
=
(
1000000000000000L
+
hpet_period
/
2
)
/
hpet_period
;
cpu_khz
=
hpet_calibrate_tsc
();
timename
=
"HPET"
;
}
else
{
pit_init
();
printk
(
KERN_INFO
"time.c: Using 1.1931816 MHz PIT timer.
\n
"
);
cpu_khz
=
pit_calibrate_tsc
();
timename
=
"PIT"
;
}
printk
(
KERN_INFO
"time.c: Using %ld.%06ld MHz %s timer.
\n
"
,
vxtime_hz
/
1000000
,
vxtime_hz
%
1000000
,
timename
);
printk
(
KERN_INFO
"time.c: Detected %d.%03d MHz processor.
\n
"
,
cpu_khz
/
1000
,
cpu_khz
%
1000
);
hpet
.
ticks
=
cpu_khz
*
(
1000
/
HZ
);
rdtscll
(
hpet
.
last_tsc
);
vxtime
.
mode
=
VXTIME_TSC
;
vxtime
.
quot
=
(
1000000L
<<
32
)
/
vxtime_hz
;
vxtime
.
tsc_quot
=
(
1000L
<<
32
)
/
cpu_khz
;
vxtime
.
hz
=
vxtime_hz
;
rdtscll_sync
(
&
vxtime
.
last_tsc
);
setup_irq
(
0
,
&
irq0
);
}
void
__init
time_init_smp
(
void
)
{
char
*
timetype
;
if
(
vxtime
.
hpet_address
)
{
timetype
=
"HPET"
;
vxtime
.
last
=
hpet_readl
(
HPET_T0_CMP
)
-
hpet_tick
;
vxtime
.
mode
=
VXTIME_HPET
;
do_gettimeoffset
=
do_gettimeoffset_hpet
;
}
else
{
timetype
=
"PIT/TSC"
;
vxtime
.
mode
=
VXTIME_TSC
;
}
printk
(
KERN_INFO
"time.c: Using %s based timekeeping.
\n
"
,
timetype
);
}
__setup
(
"report_lost_ticks"
,
time_setup
);
arch/x86_64/kernel/traps.c
View file @
bd91ad93
...
...
@@ -263,7 +263,7 @@ void show_registers(struct pt_regs *regs)
rsp
=
regs
->
rsp
;
printk
(
"CPU %d "
,
cpu
);
show_regs
(
regs
);
__
show_regs
(
regs
);
printk
(
"Process %s (pid: %d, stackpage=%08lx)
\n
"
,
cur
->
comm
,
cur
->
pid
,
4096
+
(
unsigned
long
)
cur
);
...
...
arch/x86_64/kernel/vsyscall.c
View file @
bd91ad93
...
...
@@ -78,13 +78,21 @@ static force_inline void do_vgettimeofday(struct timeval * tv)
do
{
sequence
=
read_seqbegin
(
&
__xtime_lock
);
sync_core
();
rdtscll
(
t
);
sec
=
__xtime
.
tv_sec
;
usec
=
(
__xtime
.
tv_nsec
/
1000
)
+
(
__jiffies
-
__wall_jiffies
)
*
(
1000000
/
HZ
)
+
(
t
-
__hpet
.
last_tsc
)
*
(
1000000
/
HZ
)
/
__hpet
.
ticks
+
__hpet
.
offset
;
(
__jiffies
-
__wall_jiffies
)
*
(
1000000
/
HZ
);
if
(
__vxtime
.
mode
==
VXTIME_TSC
)
{
sync_core
();
rdtscll
(
t
);
usec
+=
((
t
-
__vxtime
.
last_tsc
)
*
__vxtime
.
tsc_quot
)
>>
32
;
}
else
{
#if 0
usec += ((readl(fix_to_virt(VSYSCALL_HPET) + 0xf0) -
__vxtime.last) * __vxtime.quot) >> 32;
#endif
}
}
while
(
read_seqretry
(
&
__xtime_lock
,
sequence
));
tv
->
tv_sec
=
sec
+
usec
/
1000000
;
...
...
arch/x86_64/lib/clear_page.S
View file @
bd91ad93
/*
*
Copyright
2002
Andi
Kleen
,
SuSE
Labs
.
*/
#include <linux/linkage.h>
/*
*
Zero
a
page
.
*
rdi
page
*/
ENTRY
(
clear_page
)
.
globl
clear_page
.
p2align
4
clear_page
:
xorl
%
eax
,%
eax
movl
$
4096
/
128
,%
ecx
movl
$
128
,%
edx
loop
:
movl
$
4096
/
64
,%
ecx
.
p2align
4
.
Lloop
:
decl
%
ecx
#define PUT(x) movq %rax,x*8(%rdi)
PUT
(0
)
movq
%
rax
,(%
rdi
)
PUT
(1)
PUT
(2)
PUT
(3)
...
...
@@ -21,17 +20,8 @@ loop:
PUT
(5)
PUT
(6)
PUT
(7)
PUT
(8)
PUT
(9)
PUT
(10)
PUT
(11)
PUT
(12)
PUT
(13)
PUT
(14)
PUT
(15)
addq
%
rdx
,%
rdi
decl
%
ecx
jnz
loop
sfence
leaq
64
(%
rdi
),%
rdi
jnz
.
Lloop
nop
ret
arch/x86_64/lib/copy_page.S
View file @
bd91ad93
/*
*
Copyright
2002
Andi
Kleen
,
SuSE
Labs
.
*/
/*
Written
2003
by
Andi
Kleen
,
based
on
a
kernel
by
Evandro
Menezes
*/
#include <linux/linkage.h>
#include <linux/config.h>
#ifdef CONFIG_PREEMPT
#warning "check your fpu context saving!"
#endif
/*
Don
'
t
use
streaming
store
because
it
's better when the target
ends
up
in
cache
.
*/
/*
Could
vary
the
prefetch
distance
based
on
SMP
/
UP
*/
/*
*
Copy
a
page
.
*
*
rdi
destination
page
*
rsi
source
page
*
*
src
/
dst
must
be
aligned
to
16
bytes
.
*
*
Warning
:
in
case
of
super
lazy
FP
save
this
needs
to
be
preempt_stop
*/
.
globl
copy_page
.
p2align
.
p2align
4
copy_page
:
prefetchnta
(%
rsi
)
prefetchnta
64
(%
rsi
)
movq
%
rsp
,%
rax
subq
$
16
*
4
,%
rsp
andq
$~
15
,%
rsp
movdqa
%
xmm0
,(%
rsp
)
movdqa
%
xmm1
,
16
(%
rsp
)
movdqa
%
xmm2
,
32
(%
rsp
)
movdqa
%
xmm3
,
48
(%
rsp
)
movl
$
(
4096
/
128
)-
2
,%
ecx
movl
$
128
,%
edx
loop
:
prefetchnta
(%
rsi
)
prefetchnta
64
(%
rsi
)
loop_no_prefetch
:
movdqa
(%
rsi
),%
xmm0
movdqa
16
(%
rsi
),%
xmm1
movdqa
32
(%
rsi
),%
xmm2
movdqa
48
(%
rsi
),%
xmm3
movntdq
%
xmm0
,(%
rdi
)
movntdq
%
xmm1
,
16
(%
rdi
)
movntdq
%
xmm2
,
32
(%
rdi
)
movntdq
%
xmm3
,
48
(%
rdi
)
movdqa
64
(%
rsi
),%
xmm0
movdqa
80
(%
rsi
),%
xmm1
movdqa
96
(%
rsi
),%
xmm2
movdqa
112
(%
rsi
),%
xmm3
movntdq
%
xmm0
,
64
(%
rdi
)
movntdq
%
xmm1
,
80
(%
rdi
)
movntdq
%
xmm2
,
96
(%
rdi
)
movntdq
%
xmm3
,
112
(%
rdi
)
prefetch
(%
rsi
)
prefetch
1
*
64
(%
rsi
)
prefetch
2
*
64
(%
rsi
)
prefetch
3
*
64
(%
rsi
)
prefetch
4
*
64
(%
rsi
)
prefetchw
(%
rdi
)
prefetchw
1
*
64
(%
rdi
)
prefetchw
2
*
64
(%
rdi
)
prefetchw
3
*
64
(%
rdi
)
prefetchw
4
*
64
(%
rdi
)
subq
$
3
*
8
,%
rsp
movq
%
rbx
,(%
rsp
)
movq
%
r12
,
1
*
8
(%
rsp
)
movq
%
r13
,
2
*
8
(%
rsp
)
movl
$
(
4096
/
64
)-
5
,%
ecx
.
p2align
4
.
Loop64
:
dec
%
rcx
movq
(%
rsi
),
%
rax
movq
8
(%
rsi
),
%
rbx
movq
16
(%
rsi
),
%
rdx
movq
24
(%
rsi
),
%
r8
movq
32
(%
rsi
),
%
r9
movq
40
(%
rsi
),
%
r10
movq
48
(%
rsi
),
%
r11
movq
56
(%
rsi
),
%
r12
prefetch
5
*
64
(%
rsi
)
movq
%
rax
,
(%
rdi
)
movq
%
rbx
,
8
(%
rdi
)
movq
%
rdx
,
16
(%
rdi
)
movq
%
r8
,
24
(%
rdi
)
movq
%
r9
,
32
(%
rdi
)
movq
%
r10
,
40
(%
rdi
)
movq
%
r11
,
48
(%
rdi
)
movq
%
r12
,
56
(%
rdi
)
addq
%
rdx
,%
rdi
addq
%
rdx
,%
rsi
prefetchw
5
*
64
(%
rdi
)
leaq
64
(%
rsi
),
%
rsi
leaq
64
(%
rdi
),
%
rdi
jnz
.
Loop64
movl
$
5
,%
ecx
.
p2align
4
.
Loop2
:
decl
%
ecx
jns
loop
cmpl
$
-
1
,%
ecx
je
loop_no_prefetch
sfence
movdqa
(%
rsp
),%
xmm0
movdqa
16
(%
rsp
),%
xmm1
movdqa
32
(%
rsp
),%
xmm2
movdqa
48
(%
rsp
),%
xmm3
movq
%
rax
,%
rsp
movq
(%
rsi
),
%
rax
movq
8
(%
rsi
),
%
rbx
movq
16
(%
rsi
),
%
rdx
movq
24
(%
rsi
),
%
r8
movq
32
(%
rsi
),
%
r9
movq
40
(%
rsi
),
%
r10
movq
48
(%
rsi
),
%
r11
movq
56
(%
rsi
),
%
r12
movq
%
rax
,
(%
rdi
)
movq
%
rbx
,
8
(%
rdi
)
movq
%
rdx
,
16
(%
rdi
)
movq
%
r8
,
24
(%
rdi
)
movq
%
r9
,
32
(%
rdi
)
movq
%
r10
,
40
(%
rdi
)
movq
%
r11
,
48
(%
rdi
)
movq
%
r12
,
56
(%
rdi
)
leaq
64
(%
rdi
),%
rdi
leaq
64
(%
rsi
),%
rsi
jnz
.
Loop2
movq
(%
rsp
),%
rbx
movq
1
*
8
(%
rsp
),%
r12
movq
2
*
8
(%
rsp
),%
r13
addq
$
3
*
8
,%
rsp
ret
arch/x86_64/lib/csum-copy.S
View file @
bd91ad93
/*
*
Copyright
2002
Andi
Kleen
*
Copyright
2002
,
2003
Andi
Kleen
,
SuSE
Labs
.
*
*
This
file
is
subject
to
the
terms
and
conditions
of
the
GNU
General
Public
*
License
.
See
the
file
COPYING
in
the
main
directory
of
this
archive
...
...
@@ -8,7 +8,6 @@
#include <linux/linkage.h>
#include <asm/errno.h>
//
#
define
FIX_ALIGNMENT
1
/*
*
Checksum
copy
with
exception
handling
.
*
On
exceptions
src_err_ptr
or
dst_err_ptr
is
set
to
-
EFAULT
and
the
...
...
@@ -26,17 +25,14 @@
*
eax
64
bit
sum
.
undefined
in
case
of
exception
.
*
*
Wrappers
need
to
take
care
of
valid
exception
sum
and
zeroing
.
*
They
also
should
align
source
or
destination
to
8
bytes
.
*/
/*
for
now
-
should
vary
this
based
on
direction
*/
#define prefetch prefetcht2
#define movnti movq
.
macro
source
10
:
.
section
__ex_table
,
"a"
.
align
8
.
quad
10
b
,
bad_source
.
quad
10
b
,
.
L
bad_source
.
previous
.
endm
...
...
@@ -44,57 +40,74 @@
20
:
.
section
__ex_table
,
"a"
.
align
8
.
quad
20
b
,
bad_dest
.
quad
20
b
,
.
L
bad_dest
.
previous
.
endm
.
macro
ignore
L
=
.
Lignore
30
:
.
section
__ex_table
,
"a"
.
align
8
.
quad
30
b
,
\
L
.
previous
.
endm
.
globl
csum_partial_copy_generic
.
p2align
.
p2align
4
csum_partial_copy_generic
:
prefetchnta
(%
rdi
)
cmpl
$
3
*
64
,%
edx
jle
.
Lignore
ignore
prefetch
(%
rdi
)
ignore
prefetch
1
*
64
(%
rdi
)
ignore
prefetch
2
*
64
(%
rdi
)
ignore
prefetch
3
*
64
(%
rdi
)
ignore
prefetch
4
*
64
(%
rdi
)
ignore
prefetchw
(%
rsi
)
ignore
prefetchw
1
*
64
(%
rsi
)
ignore
prefetchw
2
*
64
(%
rsi
)
ignore
prefetchw
3
*
64
(%
rsi
)
ignore
prefetchw
4
*
64
(%
rsi
)
.
Lignore
:
subq
$
7
*
8
,%
rsp
movq
%
rbx
,
2
*
8
(%
rsp
)
movq
%
r12
,
3
*
8
(%
rsp
)
movq
%
r14
,
4
*
8
(%
rsp
)
movq
%
r13
,
5
*
8
(%
rsp
)
movq
%
rbp
,
6
*
8
(%
rsp
)
movq
%
r8
,(%
rsp
)
movq
%
r9
,
1
*
8
(%
rsp
)
pushq
%
rbx
pushq
%
r12
pushq
%
r14
pushq
%
r15
movq
%
r8
,%
r14
movq
%
r9
,%
r15
movl
%
ecx
,%
eax
movl
%
edx
,%
ecx
#ifdef FIX_ALIGNMENT
/
*
align
source
to
8
bytes
*/
movl
%
edi
,%
r8d
andl
$
7
,%
r8d
jnz
bad_alignment
after_bad_alignment
:
#endif
movl
$
64
,%
r10d
xorl
%
r9d
,%
r9d
movq
%
rcx
,%
r12
shrq
$
6
,%
r12
/
*
loopcounter
is
maintained
as
one
less
to
test
efficiently
for
the
previous
to
last
iteration
.
This
is
needed
to
stop
the
prefetching
.
*/
decq
%
r12
js
handle_tail
/*
<
64
*/
jz
loop_no_prefetch
/*
=
64
+
X
*/
jz
.
Lhandle_tail
/*
<
64
*/
clc
/
*
main
loop
.
clear
in
64
byte
blocks
*/
/
*
tries
hard
not
to
prefetch
over
the
boundary
*/
/
*
r10
:
64
,
r9
:
zero
,
r8
:
temp2
,
rbx
:
temp1
,
rax
:
sum
,
rcx
:
saved
length
*/
/
*
r9
:
zero
,
r8
:
temp2
,
rbx
:
temp1
,
rax
:
sum
,
rcx
:
saved
length
*/
/
*
r11
:
temp3
,
rdx
:
temp4
,
r12
loopcnt
*/
.
p2align
loop
:
/
*
Could
prefetch
more
than
one
loop
,
but
then
it
would
be
even
trickier
to
avoid
prefetching
over
the
boundary
.
The
hardware
prefetch
should
take
care
of
this
anyways
.
The
reason
for
this
prefetch
is
just
the
non
temporal
hint
to
avoid
cache
pollution
.
Hopefully
this
will
be
handled
properly
by
the
hardware
.
*/
prefetchnta
64
(%
rdi
)
loop_no_prefetch
:
/
*
r10
:
temp5
,
rbp
:
temp6
,
r14
temp7
,
r13
temp8
*/
.
p2align
4
.
Lloop
:
source
movq
(%
rdi
),%
rbx
source
...
...
@@ -104,175 +117,136 @@ loop_no_prefetch:
source
movq
24
(%
rdi
),%
rdx
dest
mov
nti
%
rbx
,(%
rsi
)
dest
mov
nti
%
r8
,
8
(%
rsi
)
dest
mov
nti
%
r11
,
16
(%
rsi
)
dest
mov
nti
%
rdx
,
24
(%
rsi
)
source
mov
q
32
(%
rdi
),%
r10
source
mov
q
40
(%
rdi
),%
rbp
source
mov
q
48
(%
rdi
),%
r14
source
mov
q
56
(%
rdi
),%
r13
addq
%
rbx
,%
rax
ignore
2
f
prefetch
5
*
64
(%
rdi
)
2
:
adcq
%
rbx
,%
rax
adcq
%
r8
,%
rax
adcq
%
r11
,%
rax
adcq
%
rdx
,%
rax
adcq
%
r10
,%
rax
adcq
%
rbp
,%
rax
adcq
%
r14
,%
rax
adcq
%
r13
,%
rax
source
movq
32
(%
rdi
),%
rbx
source
movq
40
(%
rdi
),%
r8
source
movq
48
(%
rdi
),%
r11
source
movq
56
(%
rdi
),%
rdx
decl
%
r12d
dest
mov
nti
%
rbx
,
32
(%
rsi
)
mov
q
%
rbx
,
(%
rsi
)
dest
mov
nti
%
r8
,
40
(%
rsi
)
mov
q
%
r8
,
8
(%
rsi
)
dest
mov
nti
%
r11
,
48
(%
rsi
)
mov
q
%
r11
,
16
(%
rsi
)
dest
mov
nti
%
rdx
,
56
(%
rsi
)
mov
q
%
rdx
,
24
(%
rsi
)
adcq
%
rbx
,%
rax
adcq
%
r8
,%
rax
adcq
%
r11
,%
rax
adcq
%
rdx
,%
rax
dest
movq
%
r10
,
32
(%
rsi
)
dest
movq
%
rbp
,
40
(%
rsi
)
dest
movq
%
r14
,
48
(%
rsi
)
dest
movq
%
r13
,
56
(%
rsi
)
adcq
%
r9
,%
rax
/*
add
in
carry
*/
ignore
3
f
prefetchw
5
*
64
(%
rsi
)
3
:
addq
%
r10
,%
rdi
addq
%
r10
,%
rsi
leaq
64
(%
rdi
),%
rdi
leaq
64
(%
rsi
)
,%
rsi
decq
%
r12
jz
loop_no_prefetch
/*
previous
to
last
iteration
?
*/
jns
loop
jnz
.
Lloop
adcq
%
r9
,%
rax
/
*
do
last
upto
56
bytes
*/
handle_tail
:
.
L
handle_tail
:
/
*
ecx
:
count
*/
movl
%
ecx
,%
r10d
andl
$
63
,%
ecx
shrl
$
3
,%
ecx
jz
fold
jz
.
L
fold
clc
movl
$
8
,%
edx
loop_8
:
.
p2align
4
.
L
loop_8
:
source
movq
(%
rdi
),%
rbx
adcq
%
rbx
,%
rax
dest
movnti
%
rbx
,(%
rsi
)
leaq
(%
rsi
,%
rdx
),%
rsi
/*
preserve
carry
*/
leaq
(%
rdi
,%
rdx
),%
rdi
decl
%
ecx
jnz
loop_8
dest
movq
%
rbx
,(%
rsi
)
leaq
8
(%
rsi
),%
rsi
/*
preserve
carry
*/
leaq
8
(%
rdi
),%
rdi
jnz
.
Lloop_8
adcq
%
r9
,%
rax
/*
add
in
carry
*/
fold
:
.
Lfold
:
/
*
reduce
checksum
to
32
bits
*/
movl
%
eax
,%
ebx
shrq
$
32
,%
rax
addq
%
rbx
,%
rax
addl
%
ebx
,%
eax
adcl
%
r9d
,%
eax
/
*
do
last
upto
6
bytes
*/
handle_7
:
.
L
handle_7
:
movl
%
r10d
,%
ecx
andl
$
7
,%
ecx
shrl
$
1
,%
ecx
jz
handle_1
jz
.
L
handle_1
movl
$
2
,%
edx
xorl
%
ebx
,%
ebx
clc
loop_1
:
.
p2align
4
.
Lloop_1
:
source
movw
(%
rdi
),%
bx
adc
q
%
rbx
,%
r
ax
adc
l
%
ebx
,%
e
ax
dest
movw
%
bx
,(%
rsi
)
addq
%
rdx
,%
rdi
addq
%
rdx
,%
rsi
decl
%
ecx
jnz
loop_1
adcw
%
r9w
,%
ax
/*
add
in
carry
*/
movw
%
bx
,(%
rsi
)
leaq
2
(%
rdi
),%
rdi
leaq
2
(%
rsi
),%
rsi
jnz
.
Lloop_1
adcl
%
r9d
,%
eax
/*
add
in
carry
*/
/
*
handle
last
odd
byte
*/
handle_1
:
.
Lhandle_1
:
testl
$
1
,%
r10d
jz
ende
jz
.
L
ende
xorl
%
ebx
,%
ebx
source
movb
(%
rdi
),%
bl
dest
movb
%
bl
,(%
rsi
)
add
w
%
bx
,%
ax
adc
w
%
r9w
,%
ax
/*
carry
*/
add
l
%
ebx
,%
e
ax
adc
l
%
r9d
,%
e
ax
/*
carry
*/
ende
:
sfence
popq
%
r15
popq
%
r14
popq
%
r12
popq
%
rbx
.
Lende
:
movq
2
*
8
(%
rsp
),%
rbx
movq
3
*
8
(%
rsp
),%
r12
movq
4
*
8
(%
rsp
),%
r14
movq
5
*
8
(%
rsp
),%
r13
movq
6
*
8
(%
rsp
),%
rbp
addq
$
7
*
8
,%
rsp
ret
#ifdef FIX_ALIGNMENT
/
*
align
source
to
8
bytes
.
*/
/
*
r8d
:
unalignedness
,
ecx
len
*/
bad_alignment
:
testl
$
1
,%
edi
jnz
odd_source
/
*
compute
distance
to
next
aligned
position
*/
movl
$
8
,%
r8d
xchgl
%
r8d
,%
ecx
subl
%
r8d
,%
ecx
/
*
handle
unaligned
part
*/
shrl
$
1
,%
ecx
xorl
%
ebx
,%
ebx
movl
$
2
,%
r10d
align_loop
:
source
movw
(%
rdi
),%
bx
addq
%
rbx
,%
rax
/*
carry
cannot
happen
*/
dest
movw
%
bx
,(%
rsi
)
addq
%
r10
,%
rdi
addq
%
r10
,%
rsi
decl
%
ecx
jnz
align_loop
jmp
after_bad_alignment
/
*
weird
case
.
need
to
swap
the
sum
at
the
end
because
the
spec
requires
16
bit
words
of
the
sum
to
be
always
paired
.
handle
it
recursively
because
it
should
be
rather
rare
.
*/
odd_source
:
/
*
copy
odd
byte
*/
xorl
%
ebx
,%
ebx
source
movb
(%
rdi
),%
bl
addl
%
ebx
,%
eax
/*
add
to
old
checksum
*/
adcl
$
0
,%
ecx
dest
movb
%
al
,(%
rsi
)
/
*
fix
arguments
*/
movl
%
eax
,%
ecx
incq
%
rsi
incq
%
rdi
decq
%
rdx
call
csum_partial_copy_generic
bswap
%
eax
/*
this
should
work
,
but
check
*/
jmp
ende
#endif
/
*
Exception
handlers
.
Very
simple
,
zeroing
is
done
in
the
wrappers
*/
bad_source
:
movl
$
-
EFAULT
,(%
r14
)
jmp
ende
.
Lbad_source
:
movq
(%
rsp
),%
rax
movl
$
-
EFAULT
,(%
rax
)
jmp
.
Lende
bad_dest
:
movl
$
-
EFAULT
,(%
r15
)
jmp
ende
.
Lbad_dest
:
movq
8
(%
rsp
),%
rax
movl
$
-
EFAULT
,(%
rax
)
jmp
.
Lende
arch/x86_64/lib/csum-partial.c
View file @
bd91ad93
...
...
@@ -7,35 +7,39 @@
#include <linux/compiler.h>
#include <linux/module.h>
#include <asm/checksum.h>
/* Better way for this sought */
static
inline
unsigned
short
from64to16
(
unsigned
long
x
)
#define __force_inline inline __attribute__((always_inline))
static
inline
unsigned
short
from32to16
(
unsigned
a
)
{
/* add up 32-bit words for 33 bits */
x
=
(
x
&
0xffffffff
)
+
(
x
>>
32
);
/* add up 16-bit and 17-bit words for 17+c bits */
x
=
(
x
&
0xffff
)
+
(
x
>>
16
);
/* add up 16-bit and 2-bit for 16+c bit */
x
=
(
x
&
0xffff
)
+
(
x
>>
16
);
/* add up carry.. */
x
=
(
x
&
0xffff
)
+
(
x
>>
16
);
return
x
;
unsigned
short
b
=
a
>>
16
;
asm
(
"addw %w2,%w0
\n\t
"
"adcw $0,%w0
\n
"
:
"=r"
(
b
)
:
"0"
(
b
),
"r"
(
a
));
return
b
;
}
/*
* Do a 64-bit checksum on an arbitrary memory area.
* Returns a 32bit checksum.
*
* This isn't a great routine, but it's not _horrible_ either.
* We rely on the compiler to unroll.
* This isn't as time critical as it used to be because many NICs
* do hardware checksumming these days.
*
* Things tried and found to not make it faster:
* Manual Prefetching
* Unrolling to an 128 bytes inner loop.
* Using interleaving with more registers to break the carry chains.
*/
static
inline
unsigned
do_csum
(
const
unsigned
char
*
buff
,
int
len
)
static
__force_inline
unsigned
do_csum
(
const
unsigned
char
*
buff
,
unsigned
len
)
{
int
odd
,
count
;
unsigned
odd
,
count
;
unsigned
long
result
=
0
;
if
(
len
<=
0
)
goto
out
;
if
(
unlikely
(
len
==
0
)
)
return
result
;
odd
=
1
&
(
unsigned
long
)
buff
;
if
(
unlikely
(
odd
))
{
result
=
*
buff
<<
8
;
...
...
@@ -45,7 +49,7 @@ static inline unsigned do_csum(const unsigned char * buff, int len)
count
=
len
>>
1
;
/* nr of 16-bit words.. */
if
(
count
)
{
if
(
2
&
(
unsigned
long
)
buff
)
{
result
+=
*
(
unsigned
short
*
)
buff
;
result
+=
*
(
unsigned
short
*
)
buff
;
count
--
;
len
-=
2
;
buff
+=
2
;
...
...
@@ -59,18 +63,41 @@ static inline unsigned do_csum(const unsigned char * buff, int len)
buff
+=
4
;
}
count
>>=
1
;
/* nr of 64-bit words.. */
if
(
count
)
{
/* main loop using 64byte blocks */
unsigned
long
zero
=
0
;
do
{
asm
(
" addq %1,%0
\n
"
" adcq %2,%0
\n
"
unsigned
count64
=
count
>>
3
;
while
(
count64
)
{
asm
(
"addq 0*8(%[src]),%[res]
\n\t
"
"adcq 1*8(%[src]),%[res]
\n\t
"
"adcq 2*8(%[src]),%[res]
\n\t
"
"adcq 3*8(%[src]),%[res]
\n\t
"
"adcq 4*8(%[src]),%[res]
\n\t
"
"adcq 5*8(%[src]),%[res]
\n\t
"
"adcq 6*8(%[src]),%[res]
\n\t
"
"adcq 7*8(%[src]),%[res]
\n\t
"
"adcq %[zero],%[res]"
:
[
res
]
"=r"
(
result
)
:
[
src
]
"r"
(
buff
),
[
zero
]
"r"
(
zero
),
"[res]"
(
result
));
buff
+=
64
;
count64
--
;
}
/* last upto 7 8byte blocks */
count
%=
8
;
while
(
count
)
{
asm
(
"addq %1,%0
\n\t
"
"adcq %2,%0
\n
"
:
"=r"
(
result
)
:
"m"
(
*
buff
),
"r"
(
zero
),
"0"
(
result
));
count
--
;
:
"m"
(
*
(
unsigned
long
*
)
buff
),
"r"
(
zero
),
"0"
(
result
));
--
count
;
buff
+=
8
;
}
while
(
count
);
result
=
(
result
&
0xffffffff
)
+
(
result
>>
32
);
}
result
=
add32_with_carry
(
result
>>
32
,
result
&
0xffffffff
);
if
(
len
&
4
)
{
result
+=
*
(
unsigned
int
*
)
buff
;
buff
+=
4
;
...
...
@@ -83,10 +110,11 @@ static inline unsigned do_csum(const unsigned char * buff, int len)
}
if
(
len
&
1
)
result
+=
*
buff
;
result
=
from64to16
(
result
);
if
(
unlikely
(
odd
))
return
((
result
>>
8
)
&
0xff
)
|
((
result
&
0xff
)
<<
8
);
out:
result
=
add32_with_carry
(
result
>>
32
,
result
&
0xffffffff
);
if
(
unlikely
(
odd
))
{
result
=
from32to16
(
result
);
result
=
((
result
>>
8
)
&
0xff
)
|
((
result
&
0xff
)
<<
8
);
}
return
result
;
}
...
...
@@ -102,18 +130,11 @@ static inline unsigned do_csum(const unsigned char * buff, int len)
*
* it's best to have buff aligned on a 64-bit boundary
*/
unsigned
int
csum_partial
(
const
unsigned
char
*
buff
,
int
len
,
unsigned
int
sum
)
unsigned
csum_partial
(
const
unsigned
char
*
buff
,
unsigned
len
,
unsigned
sum
)
{
unsigned
result
=
do_csum
(
buff
,
len
);
/* add in old sum, and carry.. */
asm
(
"addl %1,%0
\n\t
"
"adcl $0,%0"
:
"=r"
(
result
)
:
"r"
(
sum
),
"0"
(
result
));
return
result
;
return
add32_with_carry
(
do_csum
(
buff
,
len
),
sum
);
}
//EXPORT_SYMBOL(csum_partial);
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
...
...
@@ -123,4 +144,3 @@ unsigned short ip_compute_csum(unsigned char * buff, int len)
return
~
csum_partial
(
buff
,
len
,
0
);
}
EXPORT_SYMBOL
(
ip_compute_csum
);
arch/x86_64/lib/csum-wrappers.c
View file @
bd91ad93
/* Copyright 2002 Andi Kleen, SuSE Labs.
/* Copyright 2002
,2003
Andi Kleen, SuSE Labs.
* Subject to the GNU Public License v.2
*
* Wrappers of assembly checksum functions for x86-64.
...
...
@@ -7,18 +7,6 @@
#include <asm/checksum.h>
#include <linux/module.h>
/* Better way for this sought */
static
inline
unsigned
from64to32
(
unsigned
long
x
)
{
/* add up 32-bit words for 33 bits */
x
=
(
x
&
0xffffffff
)
+
(
x
>>
32
);
/* add up 16-bit and 17-bit words for 17+c bits */
x
=
(
x
&
0xffff
)
+
(
x
>>
16
);
/* add up 16-bit and 2-bit for 16+c bit */
x
=
(
x
&
0xffff
)
+
(
x
>>
16
);
return
x
;
}
/**
* csum_partial_copy_from_user - Copy and checksum from user space.
* @src: source address (user space)
...
...
@@ -36,14 +24,32 @@ csum_partial_copy_from_user(const char *src, char *dst,
{
*
errp
=
0
;
if
(
likely
(
access_ok
(
VERIFY_READ
,
src
,
len
)))
{
unsigned
long
sum
;
sum
=
csum_partial_copy_generic
(
src
,
dst
,
len
,
isum
,
errp
,
NULL
);
/* Why 6, not 7? To handle odd addresses aligned we
would need to do considerable complications to fix the
checksum which is defined as an 16bit accumulator. The
fix alignment code is primarily for performance
compatibility with 32bit and that will handle odd
addresses slowly too. */
if
(
unlikely
((
unsigned
long
)
src
&
6
))
{
while
(((
unsigned
long
)
src
&
6
)
&&
len
>=
2
)
{
__u16
val16
;
*
errp
=
__get_user
(
val16
,
(
__u16
*
)
src
);
if
(
*
errp
)
return
isum
;
*
(
__u16
*
)
dst
=
val16
;
isum
=
add32_with_carry
(
isum
,
val16
);
src
+=
2
;
dst
+=
2
;
len
-=
2
;
}
}
isum
=
csum_partial_copy_generic
(
src
,
dst
,
len
,
isum
,
errp
,
NULL
);
if
(
likely
(
*
errp
==
0
))
return
from64to32
(
sum
);
return
isum
;
}
*
errp
=
-
EFAULT
;
memset
(
dst
,
0
,
len
);
return
0
;
return
isum
;
}
EXPORT_SYMBOL
(
csum_partial_copy_from_user
);
...
...
@@ -67,8 +73,22 @@ csum_partial_copy_to_user(const char *src, char *dst,
*
errp
=
-
EFAULT
;
return
0
;
}
if
(
unlikely
((
unsigned
long
)
dst
&
6
))
{
while
(((
unsigned
long
)
dst
&
6
)
&&
len
>=
2
)
{
__u16
val16
=
*
(
__u16
*
)
src
;
isum
=
add32_with_carry
(
isum
,
val16
);
*
errp
=
__put_user
(
val16
,
(
__u16
*
)
dst
);
if
(
*
errp
)
return
isum
;
src
+=
2
;
dst
+=
2
;
len
-=
2
;
}
}
*
errp
=
0
;
return
from64to32
(
csum_partial_copy_generic
(
src
,
dst
,
len
,
isum
,
NULL
,
errp
)
);
return
csum_partial_copy_generic
(
src
,
dst
,
len
,
isum
,
NULL
,
errp
);
}
EXPORT_SYMBOL
(
csum_partial_copy_to_user
);
...
...
@@ -85,11 +105,9 @@ EXPORT_SYMBOL(csum_partial_copy_to_user);
unsigned
int
csum_partial_copy_nocheck
(
const
char
*
src
,
char
*
dst
,
int
len
,
unsigned
int
sum
)
{
return
from64to32
(
csum_partial_copy_generic
(
src
,
dst
,
len
,
sum
,
NULL
,
NULL
)
);
return
csum_partial_copy_generic
(
src
,
dst
,
len
,
sum
,
NULL
,
NULL
);
}
//EXPORT_SYMBOL(csum_partial_copy_nocheck);
unsigned
short
csum_ipv6_magic
(
struct
in6_addr
*
saddr
,
struct
in6_addr
*
daddr
,
__u32
len
,
unsigned
short
proto
,
unsigned
int
sum
)
{
...
...
@@ -103,7 +121,7 @@ unsigned short csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
" adcq $0,%[sum]
\n
"
:
[
sum
]
"=r"
(
sum64
)
:
"[sum]"
(
rest
),[
saddr
]
"r"
(
saddr
),
[
daddr
]
"r"
(
daddr
));
return
csum_fold
(
from64to32
(
sum64
));
return
csum_fold
(
add32_with_carry
(
sum64
&
0xffffffff
,
sum64
>>
32
));
}
EXPORT_SYMBOL
(
csum_ipv6_magic
);
arch/x86_64/lib/memcpy.S
View file @
bd91ad93
...
...
@@ -12,103 +12,78 @@
*
rax
original
destination
*/
//
#
define
FIX_ALIGNMENT
.
globl
__memcpy
.
globl
memcpy
.
p2align
.
p2align
4
__memcpy
:
memcpy
:
pushq
%
rbx
movq
%
rdi
,%
rax
#ifdef FIX_ALIGNMENT
movl
%
edi
,%
ecx
andl
$
7
,%
ecx
jnz
bad_alignment
after_bad_alignment
:
#endif
movq
%
rdx
,%
rcx
movl
$
64
,%
ebx
shrq
$
6
,%
rcx
jz
handle_tail
movl
%
edx
,%
ecx
shrl
$
6
,%
ecx
jz
.
Lhandle_tail
.
p2align
4
.
Lloop_64
:
decl
%
ecx
loop_64
:
movq
(%
rsi
),%
r11
movq
8
(%
rsi
),%
r8
movq
2
*
8
(%
rsi
),%
r9
movq
3
*
8
(%
rsi
),%
r10
movq
%
r11
,(%
rdi
)
movq
%
r8
,
1
*
8
(%
rdi
)
movq
2
*
8
(%
rsi
),%
r9
movq
3
*
8
(%
rsi
),%
r10
movq
%
r9
,
2
*
8
(%
rdi
)
movq
%
r10
,
3
*
8
(%
rdi
)
movq
4
*
8
(%
rsi
),%
r11
movq
5
*
8
(%
rsi
),%
r8
movq
6
*
8
(%
rsi
),%
r9
movq
7
*
8
(%
rsi
),%
r10
movq
%
r11
,
4
*
8
(%
rdi
)
movq
%
r8
,
5
*
8
(%
rdi
)
movq
6
*
8
(%
rsi
),%
r9
movq
7
*
8
(%
rsi
),%
r10
movq
%
r9
,
6
*
8
(%
rdi
)
movq
%
r10
,
7
*
8
(%
rdi
)
addq
%
rbx
,%
rsi
addq
%
rbx
,%
rdi
decl
%
ecx
jnz
loop_64
leaq
64
(%
rsi
),%
rsi
leaq
64
(%
rdi
),%
rdi
jnz
.
Lloop_64
handle_tail
:
.
L
handle_tail
:
movl
%
edx
,%
ecx
andl
$
63
,%
ecx
shrl
$
3
,%
ecx
jz
handle_7
movl
$
8
,%
ebx
loop_8
:
jz
.
Lhandle_7
.
p2align
4
.
Lloop_8
:
decl
%
ecx
movq
(%
rsi
),%
r8
movq
%
r8
,(%
rdi
)
addq
%
rbx
,%
rdi
addq
%
rbx
,%
rsi
decl
%
ecx
jnz
loop_8
leaq
8
(%
rdi
),%
rdi
leaq
8
(%
rsi
),%
rsi
jnz
.
Lloop_8
handle_7
:
.
L
handle_7
:
movl
%
edx
,%
ecx
andl
$
7
,%
ecx
jz
ende
loop_1
:
jz
.
Lende
.
p2align
4
.
Lloop_1
:
movb
(%
rsi
),%
r8b
movb
%
r8b
,(%
rdi
)
incq
%
rdi
incq
%
rsi
decl
%
ecx
jnz
loop_1
jnz
.
L
loop_1
ende
:
sfence
.
Lende
:
popq
%
rbx
ret
#ifdef FIX_ALIGNMENT
/
*
align
destination
*/
/
*
This
is
simpleminded
.
For
bigger
blocks
it
may
make
sense
to
align
src
and
dst
to
their
aligned
subset
and
handle
the
rest
separately
*/
bad_alignment
:
movl
$
8
,%
r9d
subl
%
ecx
,%
r9d
movl
%
r9d
,%
ecx
subq
%
r9
,%
rdx
js
small_alignment
jz
small_alignment
align_1
:
movb
(%
rsi
),%
r8b
movb
%
r8b
,(%
rdi
)
incq
%
rdi
incq
%
rsi
decl
%
ecx
jnz
align_1
jmp
after_bad_alignment
small_alignment
:
addq
%
r9
,%
rdx
jmp
handle_7
#endif
arch/x86_64/lib/memset.S
View file @
bd91ad93
...
...
@@ -11,7 +11,7 @@
*/
.
globl
__memset
.
globl
memset
.
p2align
.
p2align
4
memset
:
__memset
:
movq
%
rdi
,%
r10
...
...
@@ -25,15 +25,16 @@ __memset:
/
*
align
dst
*/
movl
%
edi
,%
r9d
andl
$
7
,%
r9d
jnz
bad_alignment
after_bad_alignment
:
jnz
.
L
bad_alignment
.
L
after_bad_alignment
:
movq
%
r11
,%
rcx
movl
$
64
,%
r8d
shrq
$
6
,%
rcx
jz
handle_tail
movl
%
r11d
,%
ecx
shrl
$
6
,%
ecx
jz
.
Lhandle_tail
loop_64
:
.
p2align
4
.
Lloop_64
:
decl
%
ecx
movq
%
rax
,(%
rdi
)
movq
%
rax
,
8
(%
rdi
)
movq
%
rax
,
16
(%
rdi
)
...
...
@@ -42,43 +43,45 @@ loop_64:
movq
%
rax
,
40
(%
rdi
)
movq
%
rax
,
48
(%
rdi
)
movq
%
rax
,
56
(%
rdi
)
addq
%
r8
,%
rdi
decl
%
ecx
jnz
loop_64
leaq
64
(%
rdi
),%
rdi
jnz
.
Lloop_64
/
*
Handle
tail
in
loops
.
The
loops
should
be
faster
than
hard
to
predict
jump
tables
.
*/
handle_tail
:
.
p2align
4
.
Lhandle_tail
:
movl
%
r11d
,%
ecx
andl
$
63
&(
~
7
),%
ecx
jz
handle_7
jz
.
L
handle_7
shrl
$
3
,%
ecx
loop_8
:
movq
%
rax
,(%
rdi
)
addq
$
8
,%
rdi
.
p2align
4
.
Lloop_8
:
decl
%
ecx
jnz
loop_8
movq
%
rax
,(%
rdi
)
leaq
8
(%
rdi
),%
rdi
jnz
.
Lloop_8
handle_7
:
.
L
handle_7
:
movl
%
r11d
,%
ecx
andl
$
7
,%
ecx
jz
ende
loop_1
:
movb
%
al
,(%
rdi
)
addq
$
1
,%
rdi
jz
.
Lende
.
p2align
4
.
Lloop_1
:
decl
%
ecx
jnz
loop_1
movb
%
al
,(%
rdi
)
leaq
1
(%
rdi
),%
rdi
jnz
.
Lloop_1
ende
:
.
L
ende
:
movq
%
r10
,%
rax
ret
bad_alignment
:
.
L
bad_alignment
:
cmpq
$
7
,%
r11
jbe
handle_7
jbe
.
L
handle_7
movq
%
rax
,(%
rdi
)
/*
unaligned
store
*/
movq
$
8
,%
r8
subq
%
r9
,%
r8
addq
%
r8
,%
rdi
subq
%
r8
,%
r11
jmp
after_bad_alignment
jmp
.
L
after_bad_alignment
arch/x86_64/vmlinux.lds.S
View file @
bd91ad93
...
...
@@ -50,10 +50,10 @@ SECTIONS
.
xtime_lock
:
AT
((
LOADADDR
(
.
vsyscall_0
)
+
SIZEOF
(
.
vsyscall_0
)
+
63
)
&
~
(
63
))
{
*(
.
xtime_lock
)
}
xtime_lock
=
LOADADDR
(
.
xtime_lock
)
;
.
=
ALIGN
(
16
)
;
.
hpet
:
AT
((
LOADADDR
(
.
xtime_lock
)
+
SIZEOF
(
.
xtime_lock
)
+
15
)
&
~
(
15
))
{
*(
.
hpet
)
}
hpet
=
LOADADDR
(
.
hpet
)
;
.
vxtime
:
AT
((
LOADADDR
(
.
xtime_lock
)
+
SIZEOF
(
.
xtime_lock
)
+
15
)
&
~
(
15
))
{
*(
.
vxtime
)
}
vxtime
=
LOADADDR
(
.
vxtime
)
;
.
=
ALIGN
(
16
)
;
.
wall_jiffies
:
AT
((
LOADADDR
(
.
hpet
)
+
SIZEOF
(
.
hpet
)
+
15
)
&
~
(
15
))
{
*(
.
wall_jiffies
)
}
.
wall_jiffies
:
AT
((
LOADADDR
(
.
vxtime
)
+
SIZEOF
(
.
vxtime
)
+
15
)
&
~
(
15
))
{
*(
.
wall_jiffies
)
}
wall_jiffies
=
LOADADDR
(
.
wall_jiffies
)
;
.
=
ALIGN
(
16
)
;
.
sys_tz
:
AT
((
LOADADDR
(
.
wall_jiffies
)
+
SIZEOF
(
.
wall_jiffies
)
+
15
)
&
~
(
15
))
{
*(
.
sys_tz
)
}
...
...
@@ -105,6 +105,7 @@ SECTIONS
__con_initcall_start
=
.
;
.
con_initcall
.
init
:
{
*(
.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
SECURITY_INIT
.
=
ALIGN
(
4096
)
;
__initramfs_start
=
.
;
.
init.ramfs
:
{
*(
.
init
.
ramfs
)
}
...
...
drivers/char/rocket.c
View file @
bd91ad93
...
...
@@ -40,23 +40,12 @@
*/
/****** Defines ******/
#include <linux/config.h>
#include <linux/version.h>
#ifdef PCI_NUM_RESOURCES
#define PCI_BASE_ADDRESS(dev, r) ((dev)->resource[r].start)
#else
#define PCI_BASE_ADDRESS(dev, r) ((dev)->base_address[r])
#endif
#ifndef VERSION_CODE
# define VERSION_CODE(vers,rel,seq) ( ((vers)<<16) | ((rel)<<8) | (seq) )
#endif
#if LINUX_VERSION_CODE < VERSION_CODE(2,2,9)
/* No version < 2.2 */
# error "This kernel is too old: not supported by this file"
#endif
#define ROCKET_PARANOIA_CHECK
#define ROCKET_DISABLE_SIMUSAGE
...
...
@@ -72,45 +61,12 @@
#undef REV_PCI_ORDER
#undef ROCKET_DEBUG_IO
/* CAUTION!!!!! The TIME_STAT Function relies on the Pentium 64 bit
* register. For various reasons related to 1.2.13, the test for this
* register is omitted from this driver. If you are going to enable
* this option, make sure you are running a Pentium CPU and that a
* cat of /proc/cpuinfo shows ability TS Counters as Yes. Warning part
* done, don't cry to me if you enable this options and things won't
* work. If it gives you any problems, then disable the option. The code
* in this function is pretty straight forward, if it breaks on your
* CPU, there is probably something funny about your CPU.
*/
#undef TIME_STAT
/* For performing timing statistics on driver. */
/* Produces printks, one every TIME_COUNTER loops, eats */
/* some of your CPU time. Good for testing or */
/* other checking, otherwise, leave it undefed */
/* Doug Ledford */
#define TIME_STAT_CPU 100
/* This needs to be set to your processor speed */
/* For example, 100Mhz CPU, set this to 100 */
#define TIME_COUNTER 180000
/* This is how many iterations to run before */
/* performing the printk statements. */
/* 6000 = 1 minute, 360000 = 1 hour, etc. */
/* Since time_stat is long long, this */
/* Can be really high if you want :) */
#undef TIME_STAT_VERBOSE
/* Undef this if you want a terse log message. */
#if LINUX_VERSION_CODE < VERSION_CODE(2,4,0)
#define TTY_DRIVER_NO_DEVFS 0
#endif
#define POLL_PERIOD HZ/100
/* Polling period .01 seconds (10ms) */
/****** Kernel includes ******/
#ifdef MODVERSIONS
#if LINUX_VERSION_CODE < VERSION_CODE(2,5,00)
#include <linux/modversions.h>
#else
#include <config/modversions.h>
#endif
#endif
#include <linux/module.h>
...
...
@@ -118,13 +74,7 @@
#include <linux/major.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#if LINUX_VERSION_CODE < VERSION_CODE(2,4,0)
#include <linux/malloc.h>
#else
#include <linux/slab.h>
#endif
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/timer.h>
...
...
@@ -153,20 +103,8 @@
#include "rocket_int.h"
#include "rocket.h"
#ifdef LOCAL_ROCKET_H
#include "version.h"
#else
#define ROCKET_VERSION "2.08"
#define ROCKET_DATE "02-June-2003"
#endif
/* LOCAL_ROCKET_H */
/*
* All of the compatibilty code so we can compile serial.c against
* older kernels is hidden in rocket_compat.h
*/
#if defined(LOCAL_ROCKET_H) || (LINUX_VERSION_CODE < VERSION_CODE(2,3,23))
#include "rocket_compat.h"
#endif
#define ROCKET_VERSION "2.09"
#define ROCKET_DATE "12-June-2003"
/****** RocketPort Local Variables ******/
...
...
@@ -205,13 +143,6 @@ static int is_PCI[NUM_BOARDS];
static
rocketModel_t
rocketModel
[
NUM_BOARDS
];
static
int
max_board
;
#ifdef TIME_STAT
static
unsigned
long
long
time_stat
;
static
unsigned
long
time_stat_short
;
static
unsigned
long
time_stat_long
;
static
unsigned
long
time_counter
;
#endif
/*
* The following arrays define the interrupt bits corresponding to each AIOP.
* These bits are different between the ISA and regular PCI boards and the
...
...
@@ -241,7 +172,7 @@ static unsigned char lineNumbers[MAX_RP_PORTS];
static
unsigned
long
nextLineNumber
;
/***** RocketPort Static Prototypes *********/
static
int
__init
init_ISA
(
int
i
,
int
*
reserved_controller
);
static
int
__init
init_ISA
(
int
i
);
static
void
rp_wait_until_sent
(
struct
tty_struct
*
tty
,
int
timeout
);
static
void
rp_flush_buffer
(
struct
tty_struct
*
tty
);
static
void
rmSpeakerReset
(
CONTROLLER_T
*
CtlP
,
unsigned
long
model
);
...
...
@@ -564,16 +495,6 @@ static void rp_do_poll(unsigned long dummy)
unsigned
char
AiopMask
;
Word_t
bit
;
#ifdef TIME_STAT
unsigned
long
low
=
0
,
high
=
0
,
loop_time
;
unsigned
long
long
time_stat_tmp
=
0
,
time_stat_tmp2
=
0
;
__asm__
(
".byte 0x0f,0x31"
:
"=a"
(
low
),
"=d"
(
high
));
time_stat_tmp
=
high
;
time_stat_tmp
<<=
32
;
time_stat_tmp
+=
low
;
#endif
/* TIME_STAT */
/* Walk through all the boards (ctrl's) */
for
(
ctrl
=
0
;
ctrl
<
max_board
;
ctrl
++
)
{
if
(
rcktpt_io_addr
[
ctrl
]
<=
0
)
...
...
@@ -635,48 +556,6 @@ static void rp_do_poll(unsigned long dummy)
*/
if
(
atomic_read
(
&
rp_num_ports_open
))
mod_timer
(
&
rocket_timer
,
jiffies
+
POLL_PERIOD
);
#ifdef TIME_STAT
__asm__
(
".byte 0x0f,0x31"
:
"=a"
(
low
),
"=d"
(
high
));
time_stat_tmp2
=
high
;
time_stat_tmp2
<<=
32
;
time_stat_tmp2
+=
low
;
time_stat_tmp2
-=
time_stat_tmp
;
time_stat
+=
time_stat_tmp2
;
if
(
time_counter
==
0
)
time_stat_short
=
time_stat_long
=
time_stat_tmp2
;
else
{
if
(
time_stat_tmp2
<
time_stat_short
)
time_stat_short
=
time_stat_tmp2
;
else
if
(
time_stat_tmp2
>
time_stat_long
)
time_stat_long
=
time_stat_tmp2
;
}
if
(
++
time_counter
==
TIME_COUNTER
)
{
loop_time
=
(
unsigned
long
)
(((
unsigned
long
)
(
time_stat
>>
32
)
*
((
unsigned
long
)
(
0xffffffff
)
/
(
TIME_STAT_CPU
*
TIME_COUNTER
)))
+
((
unsigned
long
)
time_stat
/
(
TIME_STAT_CPU
*
TIME_COUNTER
)));
#ifdef TIME_STAT_VERBOSE
printk
(
KERN_INFO
"rp_do_poll: Interrupt Timings
\n
"
);
printk
(
KERN_INFO
" %5ld iterations; %ld us min,
\n
"
,
(
long
)
TIME_COUNTER
,
(
time_stat_short
/
TIME_STAT_CPU
));
printk
(
KERN_INFO
" %5ld us max, %ld us average per iteration.
\n
"
,
(
time_stat_long
/
TIME_STAT_CPU
),
loop_time
);
printk
(
KERN_INFO
"We want to use < 5,000 us for an iteration.
\n
"
);
#else
/* TIME_STAT_VERBOSE */
printk
(
KERN_INFO
"rp: %ld loops: %ld min, %ld max, %ld us/loop.
\n
"
,
(
long
)
TIME_COUNTER
,
(
time_stat_short
/
TIME_STAT_CPU
),
(
time_stat_long
/
TIME_STAT_CPU
),
loop_time
);
#endif
/* TIME_STAT_VERBOSE */
time_counter
=
time_stat
=
0
;
time_stat_short
=
time_stat_long
=
0
;
}
#endif
/* TIME_STAT */
}
/*
...
...
@@ -762,10 +641,8 @@ static void init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev)
spin_lock_init
(
&
info
->
slock
);
sema_init
(
&
info
->
write_sem
,
1
);
rp_table
[
line
]
=
info
;
#if LINUX_VERSION_CODE > VERSION_CODE(2,5,0)
if
(
pci_dev
)
tty_register_device
(
rocket_driver
,
line
,
&
pci_dev
->
dev
);
#endif
}
/*
...
...
@@ -1039,12 +916,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
CHANNEL_t
*
cp
;
unsigned
long
page
;
#if LINUX_VERSION_CODE > VERSION_CODE(2,5,0)
line
=
TTY_GET_LINE
(
tty
);
#else
line
=
MINOR
(
tty
->
device
)
-
TTY_DRIVER_MINOR_START
(
tty
);
#endif
if
((
line
<
0
)
||
(
line
>=
MAX_RP_PORTS
)
||
((
info
=
rp_table
[
line
])
==
NULL
))
return
-
ENXIO
;
...
...
@@ -1070,9 +942,6 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
info
->
tty
=
tty
;
if
(
info
->
count
++
==
0
)
{
#if ((LINUX_VERSION_CODE < VERSION_CODE(2,5,0)) && defined(MODULE))
MOD_INC_USE_COUNT
;
#endif
atomic_inc
(
&
rp_num_ports_open
);
#ifdef ROCKET_DEBUG_OPEN
...
...
@@ -1254,10 +1123,6 @@ static void rp_close(struct tty_struct *tty, struct file *filp)
info
->
flags
&=
~
(
ROCKET_INITIALIZED
|
ROCKET_CLOSING
|
ROCKET_NORMAL_ACTIVE
);
tty
->
closing
=
0
;
wake_up_interruptible
(
&
info
->
close_wait
);
#if ((LINUX_VERSION_CODE < VERSION_CODE(2,5,0)) && defined(MODULE))
MOD_DEC_USE_COUNT
;
#endif
atomic_dec
(
&
rp_num_ports_open
);
#ifdef ROCKET_DEBUG_OPEN
...
...
@@ -1406,8 +1271,6 @@ static int set_modem_info(struct r_port *info, unsigned int cmd,
return
0
;
}
#if LINUX_VERSION_CODE > VERSION_CODE(2,5,0)
/*
* Returns the state of the serial modem control lines. These next 2 functions
* are the way kernel versions > 2.5 handle modem control lines rather than IOCTLs.
...
...
@@ -1450,8 +1313,6 @@ static int rp_tiocmset(struct tty_struct *tty, struct file *file,
return
0
;
}
#endif
/* Linux > 2.5 */
static
int
get_config
(
struct
r_port
*
info
,
struct
rocket_config
*
retinfo
)
{
struct
rocket_config
tmp
;
...
...
@@ -1781,12 +1642,8 @@ static void rp_hangup(struct tty_struct *tty)
rp_flush_buffer
(
tty
);
if
(
info
->
flags
&
ROCKET_CLOSING
)
return
;
if
(
info
->
count
)
{
#if ((LINUX_VERSION_CODE < VERSION_CODE(2,5,0)) && defined(MODULE))
MOD_DEC_USE_COUNT
;
#endif
if
(
info
->
count
)
atomic_dec
(
&
rp_num_ports_open
);
}
clear_bit
((
info
->
aiop
*
8
)
+
info
->
chan
,
(
void
*
)
&
xmit_flags
[
info
->
board
]);
info
->
count
=
0
;
...
...
@@ -2344,17 +2201,9 @@ __init int register_PCI(int i, struct pci_dev *dev)
for
(
aiop
=
0
;
aiop
<
max_num_aiops
;
aiop
++
)
ctlp
->
AiopNumChan
[
aiop
]
=
ports_per_aiop
;
#if LINUX_VERSION_CODE < VERSION_CODE(2,3,99)
printk
(
KERN_INFO
"Comtrol PCI controller #%d ID 0x%x found at 0x%lx, "
"%d AIOP(s) (%s)
\n
"
,
i
,
dev
->
device
,
rcktpt_io_addr
[
i
],
num_aiops
,
rocketModel
[
i
].
modelString
);
#else
printk
(
"Comtrol PCI controller #%d ID 0x%x found in bus:slot:fn %s at address %04lx, "
printk
(
"Comtrol PCI controller #%d ID 0x%x found in bus:slot:fn %s at address %04lx, "
"%d AIOP(s) (%s)
\n
"
,
i
,
dev
->
device
,
dev
->
slot_name
,
rcktpt_io_addr
[
i
],
num_aiops
,
rocketModel
[
i
].
modelString
);
#endif
printk
(
KERN_INFO
"Installing %s, creating /dev/ttyR%d - %ld
\n
"
,
rocketModel
[
i
].
modelString
,
rocketModel
[
i
].
startingPortNumber
,
...
...
@@ -2391,9 +2240,6 @@ __init int register_PCI(int i, struct pci_dev *dev)
return
(
1
);
}
#if LINUX_VERSION_CODE > VERSION_CODE(2,3,99)
/* Linux version 2.4 and greater */
/*
* Probes for PCI cards, inits them if found
* Input: board_found = number of ISA boards already found, or the
...
...
@@ -2413,51 +2259,6 @@ static int __init init_PCI(int boards_found)
return
(
count
);
}
#else
/* Linux version 2.2 */
/*
* Linux 2.2 pci_find_device() does not allow a search of all devices for a certain vendor,
* you have to try each device ID. Comtrol device ID's are 0x0000 -0x000F for the original
* boards. Newer board are 0x08xx (see upci_ids[]).
*/
static
int
__init
init_PCI
(
int
boards_found
)
{
int
j
,
count
=
0
;
struct
pci_dev
*
dev
=
NULL
;
static
int
upci_ids
[]
=
{
PCI_DEVICE_ID_URP32INTF
,
PCI_DEVICE_ID_URP8INTF
,
PCI_DEVICE_ID_URP16INTF
,
PCI_DEVICE_ID_CRP16INTF
,
PCI_DEVICE_ID_URP8OCTA
,
PCI_DEVICE_ID_UPCI_RM3_8PORT
,
PCI_DEVICE_ID_UPCI_RM3_4PORT
};
#define NUM_UPCI_IDS (sizeof(upci_ids) / sizeof(upci_ids[0]))
/* Try finding devices with PCI ID's 0x0000 - 0x000F */
for
(
j
=
0
;
j
<
16
;
j
++
)
{
while
((
dev
=
pci_find_device
(
PCI_VENDOR_ID_RP
,
j
,
dev
)))
{
register_PCI
(
count
+
boards_found
,
dev
);
count
++
;
}
}
/* Now try finding the UPCI devices, which have PCI ID's 0x0800 - 0x080F */
for
(
j
=
0
;
j
<
NUM_UPCI_IDS
;
j
++
)
{
while
((
dev
=
pci_find_device
(
PCI_VENDOR_ID_RP
,
upci_ids
[
j
],
dev
)))
{
register_PCI
(
count
+
boards_found
,
dev
);
count
++
;
}
}
return
(
count
);
}
#endif
/* Linux version 2.2/2.4 */
#endif
/* CONFIG_PCI */
/*
...
...
@@ -2465,7 +2266,7 @@ static int __init init_PCI(int boards_found)
* Input: i = the board number to look for
* Returns: 1 if board found, 0 else
*/
static
int
__init
init_ISA
(
int
i
,
int
*
reserved_controller
)
static
int
__init
init_ISA
(
int
i
)
{
int
num_aiops
,
num_chan
=
0
,
total_num_chan
=
0
;
int
aiop
,
chan
;
...
...
@@ -2473,20 +2274,16 @@ static int __init init_ISA(int i, int *reserved_controller)
CONTROLLER_t
*
ctlp
;
char
*
type_string
;
if
(
rcktpt_io_addr
[
i
]
==
0
||
controller
==
0
)
/* If io_addr is zero, no board configured */
if
(
rcktpt_io_addr
[
i
]
==
0
)
return
(
0
);
if
(
check_region
(
rcktpt_io_addr
[
i
],
64
))
{
printk
(
KERN_INFO
"RocketPort board address 0x%lx in use...
\n
"
,
rcktpt_io_addr
[
i
]);
/* Reserve the IO region */
if
(
!
request_region
(
rcktpt_io_addr
[
i
],
64
,
"Comtrol RocketPort"
))
{
printk
(
KERN_INFO
"Unable to reserve IO region for configured ISA RocketPort at address 0x%lx, board not installed...
\n
"
,
rcktpt_io_addr
[
i
]);
rcktpt_io_addr
[
i
]
=
0
;
return
(
0
);
}
if
(
rcktpt_io_addr
[
i
]
+
0x40
==
controller
)
{
*
reserved_controller
=
1
;
request_region
(
rcktpt_io_addr
[
i
],
68
,
"Comtrol RocketPort"
);
}
else
{
request_region
(
rcktpt_io_addr
[
i
],
64
,
"Comtrol RocketPort"
);
}
ctlp
=
sCtlNumToCtlPtr
(
i
);
...
...
@@ -2522,24 +2319,22 @@ static int __init init_ISA(int i, int *reserved_controller)
for
(
aiop
=
0
;
aiop
<
MAX_AIOPS_PER_BOARD
;
aiop
++
)
aiopio
[
aiop
]
=
rcktpt_io_addr
[
i
]
+
(
aiop
*
0x400
);
num_aiops
=
sInitController
(
ctlp
,
i
,
controller
+
(
i
*
0x400
),
aiopio
,
MAX_AIOPS_PER_BOARD
,
0
,
FREQ_DIS
,
0
);
num_aiops
=
sInitController
(
ctlp
,
i
,
controller
+
(
i
*
0x400
),
aiopio
,
MAX_AIOPS_PER_BOARD
,
0
,
FREQ_DIS
,
0
);
if
(
ctlp
->
boardType
==
ROCKET_TYPE_PC104
)
{
sEnAiop
(
ctlp
,
2
);
/* only one AIOPIC, but these */
sEnAiop
(
ctlp
,
3
);
/* CSels used for other stuff */
}
/* If something went wrong initing the AIOP's release the ISA IO memory */
if
(
num_aiops
<=
0
)
{
if
(
rcktpt_io_addr
[
i
]
+
0x40
==
controller
)
{
*
reserved_controller
=
0
;
release_region
(
rcktpt_io_addr
[
i
],
68
);
}
else
{
release_region
(
rcktpt_io_addr
[
i
],
64
);
}
release_region
(
rcktpt_io_addr
[
i
],
64
);
rcktpt_io_addr
[
i
]
=
0
;
return
(
0
);
}
rocketModel
[
i
].
startingPortNumber
=
nextLineNumber
;
for
(
aiop
=
0
;
aiop
<
num_aiops
;
aiop
++
)
{
sResetAiopByNum
(
ctlp
,
aiop
);
sEnAiop
(
ctlp
,
aiop
);
...
...
@@ -2565,9 +2360,9 @@ static int __init init_ISA(int i, int *reserved_controller)
rocketModel
[
i
].
numPorts
=
total_num_chan
;
rocketModel
[
i
].
model
=
MODEL_ISA
;
printk
(
KERN_INFO
"
Comtrol ISA controller #%d found at 0x%lx, "
"%d AIOPs %s
\n
"
,
i
,
rcktpt_io_addr
[
i
],
num_aiops
,
type_string
);
printk
(
KERN_INFO
"
RocketPort ISA card #%d found at 0x%lx - %d AIOPs %s
\n
"
,
i
,
rcktpt_io_addr
[
i
],
num_aiops
,
type_string
);
printk
(
KERN_INFO
"Installing %s, creating /dev/ttyR%d - %ld
\n
"
,
rocketModel
[
i
].
modelString
,
rocketModel
[
i
].
startingPortNumber
,
...
...
@@ -2595,10 +2390,8 @@ static struct tty_operations rocket_ops = {
.
break_ctl
=
rp_break
,
.
send_xchar
=
rp_send_xchar
,
.
wait_until_sent
=
rp_wait_until_sent
,
#if (LINUX_VERSION_CODE > VERSION_CODE(2,5,0))
.
tiocmget
=
rp_tiocmget
,
.
tiocmset
=
rp_tiocmset
,
#endif
/* Kernel > 2.5 */
};
/*
...
...
@@ -2607,7 +2400,6 @@ static struct tty_operations rocket_ops = {
int
__init
rp_init
(
void
)
{
int
retval
,
pci_boards_found
,
isa_boards_found
,
i
;
int
reserved_controller
=
0
;
printk
(
KERN_INFO
"RocketPort device driver module, version %s, %s
\n
"
,
ROCKET_VERSION
,
ROCKET_DATE
);
...
...
@@ -2634,12 +2426,20 @@ int __init rp_init(void)
nextLineNumber
=
0
;
memset
(
rocketModel
,
0
,
sizeof
(
rocketModel
));
if
(
board1
&&
controller
==
0
)
controller
=
board1
+
0x40
;
/*
* If board 1 is non-zero, there is at least one ISA configured. If controller is
* zero, use the default controller IO address of board1 + 0x40.
*/
if
(
board1
)
{
if
(
controller
==
0
)
controller
=
board1
+
0x40
;
}
else
{
controller
=
0
;
/* Used as a flag, meaning no ISA boards */
}
if
(
controller
&&
check_region
(
controller
,
4
))
{
printk
(
KERN_INFO
"Controller IO addresses in use, unloading driver.
\n
"
);
p
ut_tty_driver
(
rocket_driv
er
);
/* If an ISA card is configured, reserve the 4 byte IO space for the Mudbac controller */
if
(
controller
&&
(
!
request_region
(
controller
,
4
,
"Comtrol RocketPort"
)))
{
p
rintk
(
KERN_INFO
"Unable to reserve IO region for first configured ISA RocketPort controller 0x%lx. Driver exiting
\n
"
,
controll
er
);
return
-
EBUSY
;
}
...
...
@@ -2663,9 +2463,7 @@ int __init rp_init(void)
* driver with the tty layer.
*/
#if (LINUX_VERSION_CODE > VERSION_CODE(2,5,0))
rocket_driver
->
owner
=
THIS_MODULE
;
#endif
/* Kernel > 2.5 */
rocket_driver
->
flags
=
TTY_DRIVER_NO_DEVFS
;
rocket_driver
->
devfs_name
=
"tts/R"
;
rocket_driver
->
name
=
"ttyR"
;
...
...
@@ -2701,7 +2499,7 @@ int __init rp_init(void)
pci_boards_found
=
0
;
for
(
i
=
0
;
i
<
NUM_BOARDS
;
i
++
)
{
if
(
init_ISA
(
i
,
&
reserved_controller
))
if
(
init_ISA
(
i
))
isa_boards_found
++
;
}
...
...
@@ -2720,13 +2518,6 @@ int __init rp_init(void)
return
-
ENXIO
;
}
if
(
isa_boards_found
)
{
if
(
reserved_controller
==
0
)
request_region
(
controller
,
4
,
"Comtrol RocketPort"
);
}
else
{
controller
=
0
;
}
return
0
;
}
...
...
@@ -2736,7 +2527,6 @@ static void rp_cleanup_module(void)
{
int
retval
;
int
i
;
int
released_controller
=
0
;
del_timer_sync
(
&
rocket_timer
);
...
...
@@ -2754,14 +2544,9 @@ static void rp_cleanup_module(void)
for
(
i
=
0
;
i
<
NUM_BOARDS
;
i
++
)
{
if
(
rcktpt_io_addr
[
i
]
<=
0
||
is_PCI
[
i
])
continue
;
if
(
rcktpt_io_addr
[
i
]
+
0x40
==
controller
)
{
released_controller
++
;
release_region
(
rcktpt_io_addr
[
i
],
68
);
}
else
{
release_region
(
rcktpt_io_addr
[
i
],
64
);
}
release_region
(
rcktpt_io_addr
[
i
],
64
);
}
if
(
controller
&&
released_controller
==
0
)
if
(
controller
)
release_region
(
controller
,
4
);
}
#endif
...
...
drivers/char/rocket.h
View file @
bd91ad93
/*
* rocket.h --- the exported interface of the rocket driver to
* its configuration program.
* rocket.h --- the exported interface of the rocket driver to its configuration program.
*
* Written by Theodore Ts'o, Copyright 1997.
* Copyright 1997 Comtrol Corporation.
*
* Copyright 1994, 1997, 2003 Comtrol Corporation. All Rights Reserved.
*
* The following source code is subject to Comtrol Corporation's
* Developer's License Agreement.
*
* This source code is protected by United States copyright law and
* international copyright treaties.
*
* This source code may only be used to develop software products that
* will operate with Comtrol brand hardware.
*
* You may not reproduce nor distribute this source code in its original
* form but must produce a derivative work which includes portions of
* this source code only.
*
* The portions of this source code which you use in your derivative
* work must bear Comtrol's copyright notice:
*
* Copyright 1994 Comtrol Corporation.
*
*/
/* Model Information Struct */
...
...
drivers/char/rocket_int.h
View file @
bd91ad93
...
...
@@ -2,26 +2,7 @@
* rocket_int.h --- internal header file for rocket.c
*
* Written by Theodore Ts'o, Copyright 1997.
*
* Copyright 1994, 1997, 2003 Comtrol Corporation. All Rights Reserved.
*
* The following source code is subject to Comtrol Corporation's
* Developer's License Agreement.
*
* This source code is protected by United States copyright law and
* international copyright treaties.
*
* This source code may only be used to develop software products that
* will operate with Comtrol brand hardware.
*
* You may not reproduce nor distribute this source code in its original
* form but must produce a derivative work which includes portions of
* this source code only.
*
* The portions of this source code which you use in your derivative
* work must bear Comtrol's copyright notice:
*
* Copyright 1994 Comtrol Corporation.
* Copyright 1997 Comtrol Corporation.
*
*/
...
...
@@ -98,17 +79,9 @@ static inline unsigned short sInW(unsigned short port)
#define sInW(a) (inw_p(a))
#endif
/* ROCKET_DEBUG_IO */
/* This is used to move arrays of bytes so byte swapping isn't
* appropriate. On Linux 2.3 and above outsw is the same as
* outsw_ns, but we use the old form for compatibility with
* old kernels. */
#if defined(__BIG_ENDIAN) && (LINUX_VERSION_CODE < VERSION_CODE(2,3,0))
#define sOutStrW(port, addr, count) if (count) outsw_ns(port, addr, count)
#define sInStrW(port, addr, count) if (count) insw_ns(port, addr, count)
#else
/* This is used to move arrays of bytes so byte swapping isn't appropriate. */
#define sOutStrW(port, addr, count) if (count) outsw(port, addr, count)
#define sInStrW(port, addr, count) if (count) insw(port, addr, count)
#endif
#define CTL_SIZE 8
#define AIOP_CTL_SIZE 4
...
...
@@ -1318,11 +1291,7 @@ struct r_port {
/* Compact PCI device */
#define PCI_DEVICE_ID_CRP16INTF 0x0903
/* Rocketport Compact PCI 16 port w/external I/F */
/* Taking care of some kernel incompatibilities... */
#if LINUX_VERSION_CODE > VERSION_CODE(2,5,68)
#define TTY_GET_LINE(t) t->index
#define TTY_DRIVER_MINOR_START(t) t->driver->minor_start
#define TTY_DRIVER_SUBTYPE(t) t->driver->subtype
#define TTY_DRIVER_NAME(t) t->driver->name
...
...
@@ -1330,15 +1299,4 @@ struct r_port {
#define TTY_DRIVER_FLUSH_BUFFER_EXISTS(t) t->driver->flush_buffer
#define TTY_DRIVER_FLUSH_BUFFER(t) t->driver->flush_buffer(t)
#else
#define TTY_GET_LINE(t) minor(t->device) - TTY_DRIVER_MINOR_START(t)
#define TTY_DRIVER_MINOR_START(t) t->driver.minor_start
#define TTY_DRIVER_SUBTYPE(t) t->driver.subtype
#define TTY_DRIVER_NAME(t) t->driver.name
#define TTY_DRIVER_NAME_BASE(t) t->driver.name_base
#define TTY_DRIVER_FLUSH_BUFFER_EXISTS(t) t->driver.flush_buffer
#define TTY_DRIVER_FLUSH_BUFFER(t) t->driver.flush_buffer(t)
#endif
drivers/ieee1394/dv1394.c
View file @
bd91ad93
...
...
@@ -2289,16 +2289,15 @@ static void ir_tasklet_func(unsigned long data)
/* get the descriptor based on packet_buffer cursor */
f
=
video
->
frames
[
video
->
current_packet
/
MAX_PACKETS
];
block
=
&
(
f
->
descriptor_pool
[
video
->
current_packet
%
MAX_PACKETS
]);
xferstatus
=
le
16_to_cpu
(
block
->
u
.
in
.
il
.
q
[
3
]
>>
16
)
;
xferstatus
=
le
32_to_cpu
(
block
->
u
.
in
.
il
.
q
[
3
])
>>
16
;
xferstatus
&=
0x1F
;
irq_printk
(
"ir_tasklet_func: xferStatus/resCount [%d] = 0x%08x
\n
"
,
i
,
le32_to_cpu
(
block
->
u
.
in
.
il
.
q
[
3
])
);
/* get the current frame */
f
=
video
->
frames
[
video
->
active_frame
];
/* exclude empty packet */
if
(
packet_length
>
8
&&
xferstatus
==
0x11
)
{
irq_printk
(
"ir_tasklet_func: xferStatus/resCount [%d] = 0x%08x
\n
"
,
i
,
le32_to_cpu
(
block
->
u
.
in
.
il
.
q
[
3
])
);
/* check for start of frame */
/* DRD> Changed to check section type ([0]>>5==0)
and dif sequence ([1]>>4==0) */
...
...
@@ -2380,7 +2379,7 @@ static void ir_tasklet_func(unsigned long data)
}
else
{
prev
->
u
.
in
.
il
.
q
[
0
]
|=
3
<<
20
;
/* enable interrupt */
}
prev
->
u
.
in
.
il
.
q
[
2
]
=
(
cpu_to_le32
(
next_dma
)
|
1
);
/* set Z=1 */
prev
->
u
.
in
.
il
.
q
[
2
]
=
cpu_to_le32
(
next_dma
|
1
);
/* set Z=1 */
wmb
();
/* wake up DMA in case it fell asleep */
...
...
drivers/ieee1394/eth1394.c
View file @
bd91ad93
...
...
@@ -86,7 +86,7 @@
#define TRACE() printk(KERN_ERR "eth1394:%s[%d] ---- TRACE\n", __FUNCTION__, __LINE__)
static
char
version
[]
__devinitdata
=
"$Rev: 9
45
$ Ben Collins <bcollins@debian.org>"
;
"$Rev: 9
51
$ Ben Collins <bcollins@debian.org>"
;
struct
fragment_info
{
struct
list_head
list
;
...
...
@@ -1424,6 +1424,12 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
struct
packet_task
*
ptask
;
struct
node_entry
*
ne
;
if
(
skb_is_nonlinear
(
skb
))
{
ret
=
skb_linearize
(
skb
,
kmflags
);
if
(
ret
)
goto
fail
;
}
ptask
=
kmem_cache_alloc
(
packet_task_cache
,
kmflags
);
if
(
ptask
==
NULL
)
{
ret
=
-
ENOMEM
;
...
...
drivers/ieee1394/ohci1394.c
View file @
bd91ad93
...
...
@@ -164,7 +164,7 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
printk(level "%s_%d: " fmt "\n" , OHCI1394_DRIVER_NAME, card , ## args)
static
char
version
[]
__devinitdata
=
"$Rev: 9
3
8 $ Ben Collins <bcollins@debian.org>"
;
"$Rev: 9
4
8 $ Ben Collins <bcollins@debian.org>"
;
/* Module Parameters */
static
int
phys_dma
=
1
;
...
...
drivers/pci/pci.ids
View file @
bd91ad93
...
...
@@ -3752,6 +3752,8 @@
0005 Rocketport 8 port w/octa cable
0006 Rocketport 8 port w/RJ11 connectors
0007 Rocketport 4 port w/RJ11 connectors
0008 Rocketport 8 port w/ DB78 SNI (Siemens) connector
0009 Rocketport 16 port w/ DB78 SNI (Siemens) connector
000a Rocketport Plus 4 port
000b Rocketport Plus 8 port
000c RocketModem 6 port
...
...
drivers/scsi/scsi_scan.c
View file @
bd91ad93
...
...
@@ -619,12 +619,12 @@ static int scsi_add_lun(Scsi_Device *sdev, char *inq_result, int *bflags)
if
(
inq_result
[
7
]
&
0x10
)
sdev
->
sdtr
=
1
;
scsi_device_register
(
sdev
);
sprintf
(
sdev
->
devfs_name
,
"scsi/host%d/bus%d/target%d/lun%d"
,
sdev
->
host
->
host_no
,
sdev
->
channel
,
sdev
->
id
,
sdev
->
lun
);
scsi_device_register
(
sdev
);
/*
* End driverfs/devfs code.
*/
...
...
fs/fs-writeback.c
View file @
bd91ad93
...
...
@@ -260,8 +260,21 @@ sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc)
struct
address_space
*
mapping
=
inode
->
i_mapping
;
struct
backing_dev_info
*
bdi
=
mapping
->
backing_dev_info
;
if
(
bdi
->
memory_backed
)
if
(
bdi
->
memory_backed
)
{
if
(
sb
==
blockdev_superblock
)
{
/*
* Dirty memory-backed blockdev: the ramdisk
* driver does this.
*/
list_move
(
&
inode
->
i_list
,
&
sb
->
s_dirty
);
continue
;
}
/*
* Assume that all inodes on this superblock are memory
* backed. Skip the superblock.
*/
break
;
}
if
(
wbc
->
nonblocking
&&
bdi_write_congested
(
bdi
))
{
wbc
->
encountered_congestion
=
1
;
...
...
fs/namei.c
View file @
bd91ad93
...
...
@@ -325,7 +325,7 @@ static inline int exec_permission_lite(struct inode *inode)
return
-
EACCES
;
ok:
return
security_inode_permission
_lite
(
inode
,
MAY_EXEC
);
return
security_inode_permission
(
inode
,
MAY_EXEC
);
}
/*
...
...
include/asm-generic/vmlinux.lds.h
View file @
bd91ad93
...
...
@@ -45,3 +45,9 @@
*(__ksymtab_strings) \
}
#define SECURITY_INIT \
.security_initcall.init : { \
__security_initcall_start = .; \
*(.security_initcall.init) \
__security_initcall_end = .; \
}
include/asm-i386/fixmap.h
View file @
bd91ad93
...
...
@@ -107,6 +107,14 @@ extern void __set_fixmap (enum fixed_addresses idx,
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
/*
* This is the range that is readable by user mode, and things
* acting like user mode such as get_user_pages.
*/
#define FIXADDR_USER_START (__fix_to_virt(FIX_VSYSCALL))
#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
extern
void
__this_fixmap_does_not_exist
(
void
);
/*
...
...
include/asm-x86_64/checksum.h
View file @
bd91ad93
...
...
@@ -125,7 +125,7 @@ csum_tcpudp_magic(unsigned long saddr, unsigned long daddr,
* Before filling it in it needs to be csum_fold()'ed.
* buff should be aligned to a 64bit boundary if possible.
*/
extern
unsigned
int
csum_partial
(
const
unsigned
char
*
buff
,
int
len
,
unsigned
int
sum
);
extern
unsigned
int
csum_partial
(
const
unsigned
char
*
buff
,
unsigned
len
,
unsigned
int
sum
);
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
#define HAVE_CSUM_COPY_USER 1
...
...
@@ -179,4 +179,14 @@ extern unsigned short
csum_ipv6_magic
(
struct
in6_addr
*
saddr
,
struct
in6_addr
*
daddr
,
__u32
len
,
unsigned
short
proto
,
unsigned
int
sum
);
static
inline
unsigned
add32_with_carry
(
unsigned
a
,
unsigned
b
)
{
asm
(
"addl %2,%0
\n\t
"
"adcl $0,%0"
:
"=r"
(
a
)
:
"0"
(
a
),
"r"
(
b
));
return
a
;
}
#endif
include/asm-x86_64/fixmap.h
View file @
bd91ad93
...
...
@@ -35,6 +35,8 @@
enum
fixed_addresses
{
VSYSCALL_LAST_PAGE
,
VSYSCALL_FIRST_PAGE
=
VSYSCALL_LAST_PAGE
+
((
VSYSCALL_END
-
VSYSCALL_START
)
>>
PAGE_SHIFT
)
-
1
,
VSYSCALL_HPET
,
FIX_HPET_BASE
,
#ifdef CONFIG_X86_LOCAL_APIC
FIX_APIC_BASE
,
/* local (CPU) APIC) -- required for SMP or not */
#endif
...
...
include/asm-x86_64/mc146818rtc.h
View file @
bd91ad93
...
...
@@ -24,6 +24,11 @@ outb_p((addr),RTC_PORT(0)); \
outb_p((val),RTC_PORT(1)); \
})
#ifndef CONFIG_HPET_TIMER
#define RTC_IRQ 8
#else
/* Temporary workaround due to IRQ routing problem. */
#define RTC_IRQ 0
#endif
#endif
/* _ASM_MC146818RTC_H */
include/asm-x86_64/processor.h
View file @
bd91ad93
...
...
@@ -313,7 +313,7 @@ extern inline void sync_core(void)
#define ARCH_HAS_PREFETCH
static
inline
void
prefetch
(
void
*
x
)
{
asm
volatile
(
"2: prefetch
nta
%0
\n
1:
\t
"
asm
volatile
(
"2: prefetch
t0
%0
\n
1:
\t
"
".section __ex_table,
\"
a
\"\n\t
"
" .align 8
\n\t
"
" .quad 2b,1b
\n\t
"
...
...
include/asm-x86_64/proto.h
View file @
bd91ad93
...
...
@@ -54,6 +54,8 @@ extern void swap_low_mappings(void);
extern
void
oops_begin
(
void
);
extern
void
die
(
const
char
*
,
struct
pt_regs
*
,
long
);
extern
void
__die
(
const
char
*
str
,
struct
pt_regs
*
regs
,
long
err
);
extern
void
__show_regs
(
struct
pt_regs
*
regs
);
extern
void
show_regs
(
struct
pt_regs
*
regs
);
extern
int
map_syscall32
(
struct
mm_struct
*
mm
,
unsigned
long
address
);
extern
char
*
syscall32_page
;
...
...
include/asm-x86_64/timex.h
View file @
bd91ad93
...
...
@@ -30,6 +30,34 @@ static inline cycles_t get_cycles (void)
extern
unsigned
int
cpu_khz
;
extern
struct
hpet_data
hpet
;
/*
* Documentation on HPET can be found at:
* http://www.intel.com/ial/home/sp/pcmmspec.htm
* ftp://download.intel.com/ial/home/sp/mmts098.pdf
*/
#define HPET_ID 0x000
#define HPET_PERIOD 0x004
#define HPET_CFG 0x010
#define HPET_STATUS 0x020
#define HPET_COUNTER 0x0f0
#define HPET_T0_CFG 0x100
#define HPET_T0_CMP 0x108
#define HPET_T0_ROUTE 0x110
#define HPET_ID_VENDOR 0xffff0000
#define HPET_ID_LEGSUP 0x00008000
#define HPET_ID_NUMBER 0x00000f00
#define HPET_ID_REV 0x000000ff
#define HPET_CFG_ENABLE 0x001
#define HPET_CFG_LEGACY 0x002
#define HPET_T0_ENABLE 0x004
#define HPET_T0_PERIODIC 0x008
#define HPET_T0_SETVAL 0x040
#define HPET_T0_32BIT 0x100
extern
struct
vxtime_data
vxtime
;
#endif
include/asm-x86_64/vsyscall.h
View file @
bd91ad93
...
...
@@ -15,7 +15,7 @@ enum vsyscall_num {
#ifdef __KERNEL__
#define __section_
hpet __attribute__ ((unused, __section__ (".hpet
"), aligned(16)))
#define __section_
vxtime __attribute__ ((unused, __section__ (".vxtime
"), aligned(16)))
#define __section_wall_jiffies __attribute__ ((unused, __section__ (".wall_jiffies"), aligned(16)))
#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
#define __section_sys_tz __attribute__ ((unused, __section__ (".sys_tz"), aligned(16)))
...
...
@@ -23,22 +23,24 @@ enum vsyscall_num {
#define __section_xtime __attribute__ ((unused, __section__ (".xtime"), aligned(16)))
#define __section_xtime_lock __attribute__ ((unused, __section__ (".xtime_lock"), aligned(L1_CACHE_BYTES)))
#define VXTIME_TSC 1
#define VXTIME_HPET 2
struct
hpet
_data
{
long
address
;
/*
base address */
struct
vxtime
_data
{
long
hpet_address
;
/* HPET
base address */
unsigned
long
hz
;
/* HPET clocks / sec */
int
trigger
;
/* value at last interrupt */
int
last
;
int
offset
;
unsigned
long
last_tsc
;
long
ticks
;
long
quot
;
long
tsc_quot
;
int
mode
;
};
#define hpet_readl(a) readl(fix_to_virt(FIX_HPET_BASE) + a)
#define hpet_writel(d,a) writel(d, fix_to_virt(FIX_HPET_BASE) + a)
/* vsyscall space (readonly) */
extern
struct
hpet_data
__hpet
;
extern
struct
vxtime_data
__vxtime
;
extern
struct
timespec
__xtime
;
extern
volatile
unsigned
long
__jiffies
;
extern
unsigned
long
__wall_jiffies
;
...
...
@@ -46,7 +48,7 @@ extern struct timezone __sys_tz;
extern
seqlock_t
__xtime_lock
;
/* kernel space (writeable) */
extern
struct
hpet_data
hpet
;
extern
struct
vxtime_data
vxtime
;
extern
unsigned
long
wall_jiffies
;
extern
struct
timezone
sys_tz
;
extern
int
sysctl_vsyscall
;
...
...
include/linux/init.h
View file @
bd91ad93
...
...
@@ -64,6 +64,7 @@ typedef int (*initcall_t)(void);
typedef
void
(
*
exitcall_t
)(
void
);
extern
initcall_t
__con_initcall_start
,
__con_initcall_end
;
extern
initcall_t
__security_initcall_start
,
__security_initcall_end
;
#endif
#ifndef MODULE
...
...
@@ -96,6 +97,9 @@ extern initcall_t __con_initcall_start, __con_initcall_end;
#define console_initcall(fn) \
static initcall_t __initcall_##fn __attribute__ ((unused,__section__ (".con_initcall.init")))=fn
#define security_initcall(fn) \
static initcall_t __initcall_##fn __attribute__ ((unused,__section__ (".security_initcall.init"))) = fn
struct
obs_kernel_param
{
const
char
*
str
;
int
(
*
setup_func
)(
char
*
);
...
...
@@ -143,6 +147,8 @@ struct obs_kernel_param {
#define device_initcall(fn) module_init(fn)
#define late_initcall(fn) module_init(fn)
#define security_initcall(fn) module_init(fn)
/* These macros create a dummy inline: gcc 2.9x does not count alias
as usage, hence the `unused function' warning when __init functions
are declared static. We use the dummy __*_module_inline functions
...
...
include/linux/pci_ids.h
View file @
bd91ad93
...
...
@@ -1399,6 +1399,8 @@
#define PCI_DEVICE_ID_RP8OCTA 0x0005
#define PCI_DEVICE_ID_RP8J 0x0006
#define PCI_DEVICE_ID_RP4J 0x0007
#define PCI_DEVICE_ID_RP8SNI 0x0008
#define PCI_DEVICE_ID_RP16SNI 0x0009
#define PCI_DEVICE_ID_RPP4 0x000A
#define PCI_DEVICE_ID_RPP8 0x000B
#define PCI_DEVICE_ID_RP8M 0x000C
...
...
include/linux/security.h
View file @
bd91ad93
...
...
@@ -46,7 +46,6 @@ extern void cap_capset_set (struct task_struct *target, kernel_cap_t *effective,
extern
int
cap_bprm_set_security
(
struct
linux_binprm
*
bprm
);
extern
void
cap_bprm_compute_creds
(
struct
linux_binprm
*
bprm
);
extern
int
cap_task_post_setuid
(
uid_t
old_ruid
,
uid_t
old_euid
,
uid_t
old_suid
,
int
flags
);
extern
void
cap_task_kmod_set_label
(
void
);
extern
void
cap_task_reparent_to_init
(
struct
task_struct
*
p
);
extern
int
cap_syslog
(
int
type
);
...
...
@@ -328,16 +327,6 @@ struct swap_info_struct;
* @inode contains the inode structure to check.
* @mask contains the permission mask.
* Return 0 if permission is granted.
* @inode_permission_lite:
* Check permission before accessing an inode. This hook is
* currently only called when checking MAY_EXEC access during
* pathname resolution. The dcache lock is held and thus modules
* that could sleep or contend the lock should return -EAGAIN to
* inform the kernel to drop the lock and try again calling the
* full permission hook.
* @inode contains the inode structure to check.
* @mask contains the permission mask.
* Return 0 if permission is granted.
* @inode_setattr:
* Check permission before setting file attributes. Note that the kernel
* call to notify_change is performed from several locations, whenever
...
...
@@ -607,10 +596,6 @@ struct swap_info_struct;
* @arg4 contains a argument.
* @arg5 contains a argument.
* Return 0 if permission is granted.
* @task_kmod_set_label:
* Set the security attributes in current->security for the kernel module
* loader thread, so that it has the permissions needed to perform its
* function.
* @task_reparent_to_init:
* Set the security attributes in @p->security for a kernel thread that
* is being reparented to the init task.
...
...
@@ -1057,7 +1042,6 @@ struct security_operations {
int
(
*
inode_readlink
)
(
struct
dentry
*
dentry
);
int
(
*
inode_follow_link
)
(
struct
dentry
*
dentry
,
struct
nameidata
*
nd
);
int
(
*
inode_permission
)
(
struct
inode
*
inode
,
int
mask
);
int
(
*
inode_permission_lite
)
(
struct
inode
*
inode
,
int
mask
);
int
(
*
inode_setattr
)
(
struct
dentry
*
dentry
,
struct
iattr
*
attr
);
int
(
*
inode_getattr
)
(
struct
vfsmount
*
mnt
,
struct
dentry
*
dentry
);
void
(
*
inode_delete
)
(
struct
inode
*
inode
);
...
...
@@ -1111,7 +1095,6 @@ struct security_operations {
int
(
*
task_prctl
)
(
int
option
,
unsigned
long
arg2
,
unsigned
long
arg3
,
unsigned
long
arg4
,
unsigned
long
arg5
);
void
(
*
task_kmod_set_label
)
(
void
);
void
(
*
task_reparent_to_init
)
(
struct
task_struct
*
p
);
void
(
*
task_to_inode
)(
struct
task_struct
*
p
,
struct
inode
*
inode
);
...
...
@@ -1471,12 +1454,6 @@ static inline int security_inode_permission (struct inode *inode, int mask)
return
security_ops
->
inode_permission
(
inode
,
mask
);
}
static
inline
int
security_inode_permission_lite
(
struct
inode
*
inode
,
int
mask
)
{
return
security_ops
->
inode_permission_lite
(
inode
,
mask
);
}
static
inline
int
security_inode_setattr
(
struct
dentry
*
dentry
,
struct
iattr
*
attr
)
{
...
...
@@ -1692,11 +1669,6 @@ static inline int security_task_prctl (int option, unsigned long arg2,
return
security_ops
->
task_prctl
(
option
,
arg2
,
arg3
,
arg4
,
arg5
);
}
static
inline
void
security_task_kmod_set_label
(
void
)
{
security_ops
->
task_kmod_set_label
();
}
static
inline
void
security_task_reparent_to_init
(
struct
task_struct
*
p
)
{
security_ops
->
task_reparent_to_init
(
p
);
...
...
@@ -2108,12 +2080,6 @@ static inline int security_inode_permission (struct inode *inode, int mask)
return
0
;
}
static
inline
int
security_inode_permission_lite
(
struct
inode
*
inode
,
int
mask
)
{
return
0
;
}
static
inline
int
security_inode_setattr
(
struct
dentry
*
dentry
,
struct
iattr
*
attr
)
{
...
...
@@ -2321,11 +2287,6 @@ static inline int security_task_prctl (int option, unsigned long arg2,
return
0
;
}
static
inline
void
security_task_kmod_set_label
(
void
)
{
cap_task_kmod_set_label
();
}
static
inline
void
security_task_reparent_to_init
(
struct
task_struct
*
p
)
{
cap_task_reparent_to_init
(
p
);
...
...
init/main.c
View file @
bd91ad93
...
...
@@ -439,8 +439,8 @@ asmlinkage void __init start_kernel(void)
pte_chain_init
();
fork_init
(
num_physpages
);
proc_caches_init
();
security_scaffolding_startup
();
buffer_init
();
security_scaffolding_startup
();
vfs_caches_init
(
num_physpages
);
radix_tree_init
();
signals_init
();
...
...
kernel/sys.c
View file @
bd91ad93
...
...
@@ -831,13 +831,11 @@ asmlinkage long sys_getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid)
asmlinkage
long
sys_setfsuid
(
uid_t
uid
)
{
int
old_fsuid
;
int
retval
;
retval
=
security_task_setuid
(
uid
,
(
uid_t
)
-
1
,
(
uid_t
)
-
1
,
LSM_SETID_FS
);
if
(
retval
)
return
retval
;
old_fsuid
=
current
->
fsuid
;
if
(
security_task_setuid
(
uid
,
(
uid_t
)
-
1
,
(
uid_t
)
-
1
,
LSM_SETID_FS
))
return
old_fsuid
;
if
(
uid
==
current
->
uid
||
uid
==
current
->
euid
||
uid
==
current
->
suid
||
uid
==
current
->
fsuid
||
capable
(
CAP_SETUID
))
...
...
@@ -850,9 +848,7 @@ asmlinkage long sys_setfsuid(uid_t uid)
current
->
fsuid
=
uid
;
}
retval
=
security_task_post_setuid
(
old_fsuid
,
(
uid_t
)
-
1
,
(
uid_t
)
-
1
,
LSM_SETID_FS
);
if
(
retval
)
return
retval
;
security_task_post_setuid
(
old_fsuid
,
(
uid_t
)
-
1
,
(
uid_t
)
-
1
,
LSM_SETID_FS
);
return
old_fsuid
;
}
...
...
@@ -863,13 +859,11 @@ asmlinkage long sys_setfsuid(uid_t uid)
asmlinkage
long
sys_setfsgid
(
gid_t
gid
)
{
int
old_fsgid
;
int
retval
;
retval
=
security_task_setgid
(
gid
,
(
gid_t
)
-
1
,
(
gid_t
)
-
1
,
LSM_SETID_FS
);
if
(
retval
)
return
retval
;
old_fsgid
=
current
->
fsgid
;
if
(
security_task_setgid
(
gid
,
(
gid_t
)
-
1
,
(
gid_t
)
-
1
,
LSM_SETID_FS
))
return
old_fsgid
;
if
(
gid
==
current
->
gid
||
gid
==
current
->
egid
||
gid
==
current
->
sgid
||
gid
==
current
->
fsgid
||
capable
(
CAP_SETGID
))
...
...
mm/memory.c
View file @
bd91ad93
...
...
@@ -689,15 +689,16 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
vma
=
find_extend_vma
(
mm
,
start
);
#ifdef FIXADDR_START
if
(
!
vma
&&
start
>=
FIXADDR_START
&&
start
<
FIXADDR_TOP
)
{
#ifdef FIXADDR_USER_START
if
(
!
vma
&&
start
>=
FIXADDR_USER_START
&&
start
<
FIXADDR_USER_END
)
{
static
struct
vm_area_struct
fixmap_vma
=
{
/* Catch users - if there are any valid
ones, we can make this be "&init_mm" or
something. */
.
vm_mm
=
NULL
,
.
vm_start
=
FIXADDR_START
,
.
vm_end
=
FIXADDR_
TOP
,
.
vm_start
=
FIXADDR_
USER_
START
,
.
vm_end
=
FIXADDR_
USER_END
,
.
vm_page_prot
=
PAGE_READONLY
,
.
vm_flags
=
VM_READ
|
VM_EXEC
,
};
...
...
@@ -705,6 +706,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
pgd_t
*
pgd
;
pmd_t
*
pmd
;
pte_t
*
pte
;
if
(
write
)
/* user fixmap pages are read-only */
return
i
?
:
-
EFAULT
;
pgd
=
pgd_offset_k
(
pg
);
if
(
!
pgd
)
return
i
?
:
-
EFAULT
;
...
...
@@ -712,8 +715,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if
(
!
pmd
)
return
i
?
:
-
EFAULT
;
pte
=
pte_offset_kernel
(
pmd
,
pg
);
if
(
!
pte
||
!
pte_present
(
*
pte
)
||
!
pte_user
(
*
pte
)
||
!
(
write
?
pte_write
(
*
pte
)
:
pte_read
(
*
pte
)))
if
(
!
pte
||
!
pte_present
(
*
pte
))
return
i
?
:
-
EFAULT
;
if
(
pages
)
{
pages
[
i
]
=
pte_page
(
*
pte
);
...
...
security/capability.c
View file @
bd91ad93
...
...
@@ -248,12 +248,6 @@ int cap_task_post_setuid (uid_t old_ruid, uid_t old_euid, uid_t old_suid,
return
0
;
}
void
cap_task_kmod_set_label
(
void
)
{
cap_set_full
(
current
->
cap_effective
);
return
;
}
void
cap_task_reparent_to_init
(
struct
task_struct
*
p
)
{
p
->
cap_effective
=
CAP_INIT_EFF_SET
;
...
...
@@ -278,7 +272,6 @@ EXPORT_SYMBOL(cap_capset_set);
EXPORT_SYMBOL
(
cap_bprm_set_security
);
EXPORT_SYMBOL
(
cap_bprm_compute_creds
);
EXPORT_SYMBOL
(
cap_task_post_setuid
);
EXPORT_SYMBOL
(
cap_task_kmod_set_label
);
EXPORT_SYMBOL
(
cap_task_reparent_to_init
);
EXPORT_SYMBOL
(
cap_syslog
);
...
...
@@ -298,7 +291,6 @@ static struct security_operations capability_ops = {
.
bprm_set_security
=
cap_bprm_set_security
,
.
task_post_setuid
=
cap_task_post_setuid
,
.
task_kmod_set_label
=
cap_task_kmod_set_label
,
.
task_reparent_to_init
=
cap_task_reparent_to_init
,
.
syslog
=
cap_syslog
,
...
...
@@ -348,7 +340,7 @@ static void __exit capability_exit (void)
}
}
module_init
(
capability_init
);
security_initcall
(
capability_init
);
module_exit
(
capability_exit
);
MODULE_DESCRIPTION
(
"Standard Linux Capabilities Security Module"
);
...
...
security/dummy.c
View file @
bd91ad93
...
...
@@ -308,11 +308,6 @@ static int dummy_inode_permission (struct inode *inode, int mask)
return
0
;
}
static
int
dummy_inode_permission_lite
(
struct
inode
*
inode
,
int
mask
)
{
return
0
;
}
static
int
dummy_inode_setattr
(
struct
dentry
*
dentry
,
struct
iattr
*
iattr
)
{
return
0
;
...
...
@@ -517,11 +512,6 @@ static int dummy_task_prctl (int option, unsigned long arg2, unsigned long arg3,
return
0
;
}
static
void
dummy_task_kmod_set_label
(
void
)
{
return
;
}
static
void
dummy_task_reparent_to_init
(
struct
task_struct
*
p
)
{
p
->
euid
=
p
->
fsuid
=
0
;
...
...
@@ -831,7 +821,6 @@ void security_fixup_ops (struct security_operations *ops)
set_to_dummy_if_null
(
ops
,
inode_readlink
);
set_to_dummy_if_null
(
ops
,
inode_follow_link
);
set_to_dummy_if_null
(
ops
,
inode_permission
);
set_to_dummy_if_null
(
ops
,
inode_permission_lite
);
set_to_dummy_if_null
(
ops
,
inode_setattr
);
set_to_dummy_if_null
(
ops
,
inode_getattr
);
set_to_dummy_if_null
(
ops
,
inode_delete
);
...
...
@@ -871,7 +860,6 @@ void security_fixup_ops (struct security_operations *ops)
set_to_dummy_if_null
(
ops
,
task_wait
);
set_to_dummy_if_null
(
ops
,
task_kill
);
set_to_dummy_if_null
(
ops
,
task_prctl
);
set_to_dummy_if_null
(
ops
,
task_kmod_set_label
);
set_to_dummy_if_null
(
ops
,
task_reparent_to_init
);
set_to_dummy_if_null
(
ops
,
task_to_inode
);
set_to_dummy_if_null
(
ops
,
ipc_permission
);
...
...
security/root_plug.c
View file @
bd91ad93
...
...
@@ -94,7 +94,6 @@ static struct security_operations rootplug_security_ops = {
.
bprm_set_security
=
cap_bprm_set_security
,
.
task_post_setuid
=
cap_task_post_setuid
,
.
task_kmod_set_label
=
cap_task_kmod_set_label
,
.
task_reparent_to_init
=
cap_task_reparent_to_init
,
.
bprm_check_security
=
rootplug_bprm_check_security
,
...
...
@@ -135,7 +134,7 @@ static void __exit rootplug_exit (void)
printk
(
KERN_INFO
"Root Plug module removed
\n
"
);
}
module_init
(
rootplug_init
);
security_initcall
(
rootplug_init
);
module_exit
(
rootplug_exit
);
MODULE_DESCRIPTION
(
"Root Plug sample LSM module, written for Linux Journal article"
);
...
...
security/security.c
View file @
bd91ad93
...
...
@@ -38,12 +38,22 @@ static inline int verify (struct security_operations *ops)
return
0
;
}
static
void
__init
do_security_initcalls
(
void
)
{
initcall_t
*
call
;
call
=
&
__security_initcall_start
;
while
(
call
<
&
__security_initcall_end
)
{
(
*
call
)();
call
++
;
}
}
/**
* security_scaffolding_startup - initialzes the security scaffolding framework
*
* This should be called early in the kernel initialization sequence.
*/
int
security_scaffolding_startup
(
void
)
int
__init
security_scaffolding_startup
(
void
)
{
printk
(
KERN_INFO
"Security Scaffold v"
SECURITY_SCAFFOLD_VERSION
" initialized
\n
"
);
...
...
@@ -55,6 +65,7 @@ int security_scaffolding_startup (void)
}
security_ops
=
&
dummy_security_ops
;
do_security_initcalls
();
return
0
;
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment