Commit 4d7048f5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xtensa-20191201' of git://github.com/jcmvbkbc/linux-xtensa

Pull Xtensa updates from Max Filippov:

 - add support for execute in place (XIP) kernels

 - improvements in inline assembly: use named arguments and "m"
   constraints where possible

 - improve stack dumping

 - clean up system_call code and syscall tracing

 - various small fixes and cleanups

* tag 'xtensa-20191201' of git://github.com/jcmvbkbc/linux-xtensa: (30 commits)
  xtensa: clean up system_call/xtensa_rt_sigreturn interaction
  xtensa: fix system_call interaction with ptrace
  xtensa: rearrange syscall tracing
  xtensa: fix syscall_set_return_value
  xtensa: drop unneeded headers from coprocessor.S
  xtensa: entry: Remove unneeded need_resched() loop
  xtensa: use MEMBLOCK_ALLOC_ANYWHERE for KASAN shadow map
  xtensa: fix TLB sanity checker
  xtensa: get rid of __ARCH_USE_5LEVEL_HACK
  xtensa: mm: fix PMD folding implementation
  xtensa: make stack dump size configurable
  xtensa: improve stack dumping
  xtensa: use "m" constraint instead of "r" in futex.h assembly
  xtensa: use "m" constraint instead of "a" in cmpxchg.h assembly
  xtensa: use named assembly arguments in cmpxchg.h
  xtensa: use "m" constraint instead of "a" in atomic.h assembly
  xtensa: use named assembly arguments in atomic.h
  xtensa: use "m" constraint instead of "a" in bitops.h assembly
  xtensa: use named assembly arguments in bitops.h
  xtensa: use macros to generate *_bit and test_and_*_bit functions
  ...
parents 043cf468 9d9043f6
...@@ -30,5 +30,5 @@ ...@@ -30,5 +30,5 @@
| um: | TODO | | um: | TODO |
| unicore32: | TODO | | unicore32: | TODO |
| x86: | ok | | x86: | ok |
| xtensa: | TODO | | xtensa: | ok |
----------------------- -----------------------
...@@ -21,8 +21,8 @@ config XTENSA ...@@ -21,8 +21,8 @@ config XTENSA
select GENERIC_PCI_IOMAP select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK select GENERIC_SCHED_CLOCK
select GENERIC_STRNCPY_FROM_USER if KASAN select GENERIC_STRNCPY_FROM_USER if KASAN
select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KASAN if MMU select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS select HAVE_DMA_CONTIGUOUS
...@@ -215,151 +215,6 @@ config HOTPLUG_CPU ...@@ -215,151 +215,6 @@ config HOTPLUG_CPU
Say N if you want to disable CPU hotplug. Say N if you want to disable CPU hotplug.
config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
bool "Initialize Xtensa MMU inside the Linux kernel code"
depends on !XTENSA_VARIANT_FSF && !XTENSA_VARIANT_DC232B
default y if XTENSA_VARIANT_DC233C || XTENSA_VARIANT_CUSTOM
help
Earlier version initialized the MMU in the exception vector
before jumping to _startup in head.S and had an advantage that
it was possible to place a software breakpoint at 'reset' and
then enter your normal kernel breakpoints once the MMU was mapped
to the kernel mappings (0XC0000000).
This unfortunately won't work for U-Boot and likely also wont
work for using KEXEC to have a hot kernel ready for doing a
KDUMP.
So now the MMU is initialized in head.S but it's necessary to
use hardware breakpoints (gdb 'hbreak' cmd) to break at _startup.
xt-gdb can't place a Software Breakpoint in the 0XD region prior
to mapping the MMU and after mapping even if the area of low memory
was mapped gdb wouldn't remove the breakpoint on hitting it as the
PC wouldn't match. Since Hardware Breakpoints are recommended for
Linux configurations it seems reasonable to just assume they exist
and leave this older mechanism for unfortunate souls that choose
not to follow Tensilica's recommendation.
Selecting this will cause U-Boot to set the KERNEL Load and Entry
address at 0x00003000 instead of the mapped std of 0xD0003000.
If in doubt, say Y.
config MEMMAP_CACHEATTR
hex "Cache attributes for the memory address space"
depends on !MMU
default 0x22222222
help
These cache attributes are set up for noMMU systems. Each hex digit
specifies cache attributes for the corresponding 512MB memory
region: bits 0..3 -- for addresses 0x00000000..0x1fffffff,
bits 4..7 -- for addresses 0x20000000..0x3fffffff, and so on.
Cache attribute values are specific for the MMU type.
For region protection MMUs:
1: WT cached,
2: cache bypass,
4: WB cached,
f: illegal.
For ful MMU:
bit 0: executable,
bit 1: writable,
bits 2..3:
0: cache bypass,
1: WB cache,
2: WT cache,
3: special (c and e are illegal, f is reserved).
For MPU:
0: illegal,
1: WB cache,
2: WB, no-write-allocate cache,
3: WT cache,
4: cache bypass.
config KSEG_PADDR
hex "Physical address of the KSEG mapping"
depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX && MMU
default 0x00000000
help
This is the physical address where KSEG is mapped. Please refer to
the chosen KSEG layout help for the required address alignment.
Unpacked kernel image (including vectors) must be located completely
within KSEG.
Physical memory below this address is not available to linux.
If unsure, leave the default value here.
config KERNEL_LOAD_ADDRESS
hex "Kernel load address"
default 0x60003000 if !MMU
default 0x00003000 if MMU && INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
default 0xd0003000 if MMU && !INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
help
This is the address where the kernel is loaded.
It is virtual address for MMUv2 configurations and physical address
for all other configurations.
If unsure, leave the default value here.
config VECTORS_OFFSET
hex "Kernel vectors offset"
default 0x00003000
help
This is the offset of the kernel image from the relocatable vectors
base.
If unsure, leave the default value here.
choice
prompt "KSEG layout"
depends on MMU
default XTENSA_KSEG_MMU_V2
config XTENSA_KSEG_MMU_V2
bool "MMUv2: 128MB cached + 128MB uncached"
help
MMUv2 compatible kernel memory map: TLB way 5 maps 128MB starting
at KSEG_PADDR to 0xd0000000 with cache and to 0xd8000000
without cache.
KSEG_PADDR must be aligned to 128MB.
config XTENSA_KSEG_256M
bool "256MB cached + 256MB uncached"
depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
help
TLB way 6 maps 256MB starting at KSEG_PADDR to 0xb0000000
with cache and to 0xc0000000 without cache.
KSEG_PADDR must be aligned to 256MB.
config XTENSA_KSEG_512M
bool "512MB cached + 512MB uncached"
depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
help
TLB way 6 maps 512MB starting at KSEG_PADDR to 0xa0000000
with cache and to 0xc0000000 without cache.
KSEG_PADDR must be aligned to 256MB.
endchoice
config HIGHMEM
bool "High Memory Support"
depends on MMU
help
Linux can use the full amount of RAM in the system by
default. However, the default MMUv2 setup only maps the
lowermost 128 MB of memory linearly to the areas starting
at 0xd0000000 (cached) and 0xd8000000 (uncached).
When there are more than 128 MB memory in the system not
all of it can be "permanently mapped" by the kernel.
The physical memory that's not permanently mapped is called
"high memory".
If you are compiling a kernel which will never run on a
machine with more than 128 MB total physical RAM, answer
N here.
If unsure, say Y.
config FAST_SYSCALL_XTENSA config FAST_SYSCALL_XTENSA
bool "Enable fast atomic syscalls" bool "Enable fast atomic syscalls"
default n default n
...@@ -446,6 +301,9 @@ config XTENSA_CALIBRATE_CCOUNT ...@@ -446,6 +301,9 @@ config XTENSA_CALIBRATE_CCOUNT
config SERIAL_CONSOLE config SERIAL_CONSOLE
def_bool n def_bool n
config PLATFORM_HAVE_XIP
def_bool n
menu "Platform options" menu "Platform options"
choice choice
...@@ -472,6 +330,7 @@ config XTENSA_PLATFORM_XTFPGA ...@@ -472,6 +330,7 @@ config XTENSA_PLATFORM_XTFPGA
select PLATFORM_WANT_DEFAULT_MEM if !MMU select PLATFORM_WANT_DEFAULT_MEM if !MMU
select SERIAL_CONSOLE select SERIAL_CONSOLE
select XTENSA_CALIBRATE_CCOUNT select XTENSA_CALIBRATE_CCOUNT
select PLATFORM_HAVE_XIP
help help
XTFPGA is the name of Tensilica board family (LX60, LX110, LX200, ML605). XTFPGA is the name of Tensilica board family (LX60, LX110, LX200, ML605).
This hardware is capable of running a full Linux distribution. This hardware is capable of running a full Linux distribution.
...@@ -563,34 +422,6 @@ config SIMDISK1_FILENAME ...@@ -563,34 +422,6 @@ config SIMDISK1_FILENAME
Another simulated disk in a host file for a buildroot-independent Another simulated disk in a host file for a buildroot-independent
storage. storage.
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
default "11"
help
The kernel memory allocator divides physically contiguous memory
blocks into "zones", where each zone is a power of two number of
pages. This option selects the largest power of two that the kernel
keeps in the memory allocator. If you need to allocate very large
blocks of physically contiguous memory, then you may need to
increase this value.
This config option is actually maximum order plus one. For example,
a value of 11 means that the largest free memory block is 2^10 pages.
config PLATFORM_WANT_DEFAULT_MEM
def_bool n
config DEFAULT_MEM_START
hex
prompt "PAGE_OFFSET/PHYS_OFFSET" if !MMU && PLATFORM_WANT_DEFAULT_MEM
default 0x60000000 if PLATFORM_WANT_DEFAULT_MEM
default 0x00000000
help
This is the base address used for both PAGE_OFFSET and PHYS_OFFSET
in noMMU configurations.
If unsure, leave the default value here.
config XTFPGA_LCD config XTFPGA_LCD
bool "Enable XTFPGA LCD driver" bool "Enable XTFPGA LCD driver"
depends on XTENSA_PLATFORM_XTFPGA depends on XTENSA_PLATFORM_XTFPGA
...@@ -621,6 +452,221 @@ config XTFPGA_LCD_8BIT_ACCESS ...@@ -621,6 +452,221 @@ config XTFPGA_LCD_8BIT_ACCESS
only be used with 8-bit interface. Please consult prototyping user only be used with 8-bit interface. Please consult prototyping user
guide for your board for the correct interface width. guide for your board for the correct interface width.
comment "Kernel memory layout"
config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
bool "Initialize Xtensa MMU inside the Linux kernel code"
depends on !XTENSA_VARIANT_FSF && !XTENSA_VARIANT_DC232B
default y if XTENSA_VARIANT_DC233C || XTENSA_VARIANT_CUSTOM
help
Earlier version initialized the MMU in the exception vector
before jumping to _startup in head.S and had an advantage that
it was possible to place a software breakpoint at 'reset' and
then enter your normal kernel breakpoints once the MMU was mapped
to the kernel mappings (0XC0000000).
This unfortunately won't work for U-Boot and likely also wont
work for using KEXEC to have a hot kernel ready for doing a
KDUMP.
So now the MMU is initialized in head.S but it's necessary to
use hardware breakpoints (gdb 'hbreak' cmd) to break at _startup.
xt-gdb can't place a Software Breakpoint in the 0XD region prior
to mapping the MMU and after mapping even if the area of low memory
was mapped gdb wouldn't remove the breakpoint on hitting it as the
PC wouldn't match. Since Hardware Breakpoints are recommended for
Linux configurations it seems reasonable to just assume they exist
and leave this older mechanism for unfortunate souls that choose
not to follow Tensilica's recommendation.
Selecting this will cause U-Boot to set the KERNEL Load and Entry
address at 0x00003000 instead of the mapped std of 0xD0003000.
If in doubt, say Y.
config XIP_KERNEL
bool "Kernel Execute-In-Place from ROM"
depends on PLATFORM_HAVE_XIP
help
Execute-In-Place allows the kernel to run from non-volatile storage
directly addressable by the CPU, such as NOR flash. This saves RAM
space since the text section of the kernel is not loaded from flash
to RAM. Read-write sections, such as the data section and stack,
are still copied to RAM. The XIP kernel is not compressed since
it has to run directly from flash, so it will take more space to
store it. The flash address used to link the kernel object files,
and for storing it, is configuration dependent. Therefore, if you
say Y here, you must know the proper physical address where to
store the kernel image depending on your own flash memory usage.
Also note that the make target becomes "make xipImage" rather than
"make Image" or "make uImage". The final kernel binary to put in
ROM memory will be arch/xtensa/boot/xipImage.
If unsure, say N.
config MEMMAP_CACHEATTR
hex "Cache attributes for the memory address space"
depends on !MMU
default 0x22222222
help
These cache attributes are set up for noMMU systems. Each hex digit
specifies cache attributes for the corresponding 512MB memory
region: bits 0..3 -- for addresses 0x00000000..0x1fffffff,
bits 4..7 -- for addresses 0x20000000..0x3fffffff, and so on.
Cache attribute values are specific for the MMU type.
For region protection MMUs:
1: WT cached,
2: cache bypass,
4: WB cached,
f: illegal.
For ful MMU:
bit 0: executable,
bit 1: writable,
bits 2..3:
0: cache bypass,
1: WB cache,
2: WT cache,
3: special (c and e are illegal, f is reserved).
For MPU:
0: illegal,
1: WB cache,
2: WB, no-write-allocate cache,
3: WT cache,
4: cache bypass.
config KSEG_PADDR
hex "Physical address of the KSEG mapping"
depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX && MMU
default 0x00000000
help
This is the physical address where KSEG is mapped. Please refer to
the chosen KSEG layout help for the required address alignment.
Unpacked kernel image (including vectors) must be located completely
within KSEG.
Physical memory below this address is not available to linux.
If unsure, leave the default value here.
config KERNEL_VIRTUAL_ADDRESS
hex "Kernel virtual address"
depends on MMU && XIP_KERNEL
default 0xd0003000
help
This is the virtual address where the XIP kernel is mapped.
XIP kernel may be mapped into KSEG or KIO region, virtual address
provided here must match kernel load address provided in
KERNEL_LOAD_ADDRESS.
config KERNEL_LOAD_ADDRESS
hex "Kernel load address"
default 0x60003000 if !MMU
default 0x00003000 if MMU && INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
default 0xd0003000 if MMU && !INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
help
This is the address where the kernel is loaded.
It is virtual address for MMUv2 configurations and physical address
for all other configurations.
If unsure, leave the default value here.
config VECTORS_OFFSET
hex "Kernel vectors offset"
default 0x00003000
depends on !XIP_KERNEL
help
This is the offset of the kernel image from the relocatable vectors
base.
If unsure, leave the default value here.
config XIP_DATA_ADDR
hex "XIP kernel data virtual address"
depends on XIP_KERNEL
default 0x00000000
help
This is the virtual address where XIP kernel data is copied.
It must be within KSEG if MMU is used.
config PLATFORM_WANT_DEFAULT_MEM
def_bool n
config DEFAULT_MEM_START
hex
prompt "PAGE_OFFSET/PHYS_OFFSET" if !MMU && PLATFORM_WANT_DEFAULT_MEM
default 0x60000000 if PLATFORM_WANT_DEFAULT_MEM
default 0x00000000
help
This is the base address used for both PAGE_OFFSET and PHYS_OFFSET
in noMMU configurations.
If unsure, leave the default value here.
choice
prompt "KSEG layout"
depends on MMU
default XTENSA_KSEG_MMU_V2
config XTENSA_KSEG_MMU_V2
bool "MMUv2: 128MB cached + 128MB uncached"
help
MMUv2 compatible kernel memory map: TLB way 5 maps 128MB starting
at KSEG_PADDR to 0xd0000000 with cache and to 0xd8000000
without cache.
KSEG_PADDR must be aligned to 128MB.
config XTENSA_KSEG_256M
bool "256MB cached + 256MB uncached"
depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
help
TLB way 6 maps 256MB starting at KSEG_PADDR to 0xb0000000
with cache and to 0xc0000000 without cache.
KSEG_PADDR must be aligned to 256MB.
config XTENSA_KSEG_512M
bool "512MB cached + 512MB uncached"
depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
help
TLB way 6 maps 512MB starting at KSEG_PADDR to 0xa0000000
with cache and to 0xc0000000 without cache.
KSEG_PADDR must be aligned to 256MB.
endchoice
config HIGHMEM
bool "High Memory Support"
depends on MMU
help
Linux can use the full amount of RAM in the system by
default. However, the default MMUv2 setup only maps the
lowermost 128 MB of memory linearly to the areas starting
at 0xd0000000 (cached) and 0xd8000000 (uncached).
When there are more than 128 MB memory in the system not
all of it can be "permanently mapped" by the kernel.
The physical memory that's not permanently mapped is called
"high memory".
If you are compiling a kernel which will never run on a
machine with more than 128 MB total physical RAM, answer
N here.
If unsure, say Y.
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
default "11"
help
The kernel memory allocator divides physically contiguous memory
blocks into "zones", where each zone is a power of two number of
pages. This option selects the largest power of two that the kernel
keeps in the memory allocator. If you need to allocate very large
blocks of physically contiguous memory, then you may need to
increase this value.
This config option is actually maximum order plus one. For example,
a value of 11 means that the largest free memory block is 2^10 pages.
endmenu endmenu
menu "Power management options" menu "Power management options"
......
...@@ -31,3 +31,10 @@ config S32C1I_SELFTEST ...@@ -31,3 +31,10 @@ config S32C1I_SELFTEST
It is easy to make wrong hardware configuration, this test should catch it early. It is easy to make wrong hardware configuration, this test should catch it early.
Say 'N' on stable hardware. Say 'N' on stable hardware.
config PRINT_STACK_DEPTH
int "Stack depth to print" if DEBUG_KERNEL
default 64
help
This option allows you to set the stack depth that the kernel
prints in stack traces.
...@@ -87,7 +87,7 @@ drivers-$(CONFIG_OPROFILE) += arch/xtensa/oprofile/ ...@@ -87,7 +87,7 @@ drivers-$(CONFIG_OPROFILE) += arch/xtensa/oprofile/
boot := arch/xtensa/boot boot := arch/xtensa/boot
all Image zImage uImage: vmlinux all Image zImage uImage xipImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@ $(Q)$(MAKE) $(build)=$(boot) $@
archheaders: archheaders:
...@@ -97,4 +97,5 @@ define archhelp ...@@ -97,4 +97,5 @@ define archhelp
@echo '* Image - Kernel ELF image with reset vector' @echo '* Image - Kernel ELF image with reset vector'
@echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)' @echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)'
@echo '* uImage - U-Boot wrapped image' @echo '* uImage - U-Boot wrapped image'
@echo ' xipImage - XIP image'
endef endef
...@@ -29,6 +29,7 @@ all: $(boot-y) ...@@ -29,6 +29,7 @@ all: $(boot-y)
Image: boot-elf Image: boot-elf
zImage: boot-redboot zImage: boot-redboot
uImage: $(obj)/uImage uImage: $(obj)/uImage
xipImage: $(obj)/xipImage
boot-elf boot-redboot: $(addprefix $(obj)/,$(subdir-y)) boot-elf boot-redboot: $(addprefix $(obj)/,$(subdir-y))
$(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS) $(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS)
...@@ -50,3 +51,7 @@ UIMAGE_COMPRESSION = gzip ...@@ -50,3 +51,7 @@ UIMAGE_COMPRESSION = gzip
$(obj)/uImage: vmlinux.bin.gz FORCE $(obj)/uImage: vmlinux.bin.gz FORCE
$(call if_changed,uimage) $(call if_changed,uimage)
$(Q)$(kecho) ' Kernel: $@ is ready' $(Q)$(kecho) ' Kernel: $@ is ready'
$(obj)/xipImage: vmlinux FORCE
$(call if_changed,objcopy)
$(Q)$(kecho) ' Kernel: $@ is ready'
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_IRQ_TIME_ACCOUNTING=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_MEMCG=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_DEBUG=y
CONFIG_NAMESPACES=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PROFILING=y
CONFIG_XTENSA_VARIANT_DC233C=y
CONFIG_XTENSA_UNALIGNED_USER=y
CONFIG_XIP_KERNEL=y
CONFIG_XIP_DATA_ADDR=0xd0000000
CONFIG_KERNEL_VIRTUAL_ADDRESS=0xe6000000
CONFIG_KERNEL_LOAD_ADDRESS=0xf6000000
CONFIG_XTENSA_KSEG_512M=y
CONFIG_HIGHMEM=y
CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0"
CONFIG_USE_OF=y
CONFIG_BUILTIN_DTB_SOURCE="kc705"
# CONFIG_PARSE_BOOTPARAM is not set
CONFIG_OPROFILE=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_COMPACTION is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_AURORA is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_MARVELL_PHY=y
# CONFIG_WLAN is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_DEVKMEM=y
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=y
# CONFIG_VGA_CONSOLE is not set
# CONFIG_USB_SUPPORT is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT3_FS=y
CONFIG_FANOTIFY=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_SUNRPC_DEBUG=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHEDSTATS=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_STACKTRACE=y
CONFIG_RCU_TRACE=y
# CONFIG_FTRACE is not set
# CONFIG_S32C1I_SELFTEST is not set
...@@ -11,6 +11,7 @@ generic-y += exec.h ...@@ -11,6 +11,7 @@ generic-y += exec.h
generic-y += extable.h generic-y += extable.h
generic-y += fb.h generic-y += fb.h
generic-y += hardirq.h generic-y += hardirq.h
generic-y += hw_irq.h
generic-y += irq_regs.h generic-y += irq_regs.h
generic-y += irq_work.h generic-y += irq_work.h
generic-y += kdebug.h generic-y += kdebug.h
...@@ -30,6 +31,7 @@ generic-y += qspinlock.h ...@@ -30,6 +31,7 @@ generic-y += qspinlock.h
generic-y += sections.h generic-y += sections.h
generic-y += topology.h generic-y += topology.h
generic-y += trace_clock.h generic-y += trace_clock.h
generic-y += user.h
generic-y += vga.h generic-y += vga.h
generic-y += word-at-a-time.h generic-y += word-at-a-time.h
generic-y += xor.h generic-y += xor.h
...@@ -64,13 +64,13 @@ static inline void atomic_##op(int i, atomic_t *v) \ ...@@ -64,13 +64,13 @@ static inline void atomic_##op(int i, atomic_t *v) \
int result; \ int result; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: l32ex %1, %3\n" \ "1: l32ex %[tmp], %[addr]\n" \
" " #op " %0, %1, %2\n" \ " " #op " %[result], %[tmp], %[i]\n" \
" s32ex %0, %3\n" \ " s32ex %[result], %[addr]\n" \
" getex %0\n" \ " getex %[result]\n" \
" beqz %0, 1b\n" \ " beqz %[result], 1b\n" \
: "=&a" (result), "=&a" (tmp) \ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
: "a" (i), "a" (v) \ : [i] "a" (i), [addr] "a" (v) \
: "memory" \ : "memory" \
); \ ); \
} \ } \
...@@ -82,14 +82,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -82,14 +82,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
int result; \ int result; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: l32ex %1, %3\n" \ "1: l32ex %[tmp], %[addr]\n" \
" " #op " %0, %1, %2\n" \ " " #op " %[result], %[tmp], %[i]\n" \
" s32ex %0, %3\n" \ " s32ex %[result], %[addr]\n" \
" getex %0\n" \ " getex %[result]\n" \
" beqz %0, 1b\n" \ " beqz %[result], 1b\n" \
" " #op " %0, %1, %2\n" \ " " #op " %[result], %[tmp], %[i]\n" \
: "=&a" (result), "=&a" (tmp) \ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
: "a" (i), "a" (v) \ : [i] "a" (i), [addr] "a" (v) \
: "memory" \ : "memory" \
); \ ); \
\ \
...@@ -103,13 +103,13 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ ...@@ -103,13 +103,13 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
int result; \ int result; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: l32ex %1, %3\n" \ "1: l32ex %[tmp], %[addr]\n" \
" " #op " %0, %1, %2\n" \ " " #op " %[result], %[tmp], %[i]\n" \
" s32ex %0, %3\n" \ " s32ex %[result], %[addr]\n" \
" getex %0\n" \ " getex %[result]\n" \
" beqz %0, 1b\n" \ " beqz %[result], 1b\n" \
: "=&a" (result), "=&a" (tmp) \ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
: "a" (i), "a" (v) \ : [i] "a" (i), [addr] "a" (v) \
: "memory" \ : "memory" \
); \ ); \
\ \
...@@ -124,13 +124,14 @@ static inline void atomic_##op(int i, atomic_t * v) \ ...@@ -124,13 +124,14 @@ static inline void atomic_##op(int i, atomic_t * v) \
int result; \ int result; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: l32i %1, %3, 0\n" \ "1: l32i %[tmp], %[mem]\n" \
" wsr %1, scompare1\n" \ " wsr %[tmp], scompare1\n" \
" " #op " %0, %1, %2\n" \ " " #op " %[result], %[tmp], %[i]\n" \
" s32c1i %0, %3, 0\n" \ " s32c1i %[result], %[mem]\n" \
" bne %0, %1, 1b\n" \ " bne %[result], %[tmp], 1b\n" \
: "=&a" (result), "=&a" (tmp) \ : [result] "=&a" (result), [tmp] "=&a" (tmp), \
: "a" (i), "a" (v) \ [mem] "+m" (*v) \
: [i] "a" (i) \
: "memory" \ : "memory" \
); \ ); \
} \ } \
...@@ -142,14 +143,15 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ ...@@ -142,14 +143,15 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
int result; \ int result; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: l32i %1, %3, 0\n" \ "1: l32i %[tmp], %[mem]\n" \
" wsr %1, scompare1\n" \ " wsr %[tmp], scompare1\n" \
" " #op " %0, %1, %2\n" \ " " #op " %[result], %[tmp], %[i]\n" \
" s32c1i %0, %3, 0\n" \ " s32c1i %[result], %[mem]\n" \
" bne %0, %1, 1b\n" \ " bne %[result], %[tmp], 1b\n" \
" " #op " %0, %0, %2\n" \ " " #op " %[result], %[result], %[i]\n" \
: "=&a" (result), "=&a" (tmp) \ : [result] "=&a" (result), [tmp] "=&a" (tmp), \
: "a" (i), "a" (v) \ [mem] "+m" (*v) \
: [i] "a" (i) \
: "memory" \ : "memory" \
); \ ); \
\ \
...@@ -163,13 +165,14 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \ ...@@ -163,13 +165,14 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \
int result; \ int result; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: l32i %1, %3, 0\n" \ "1: l32i %[tmp], %[mem]\n" \
" wsr %1, scompare1\n" \ " wsr %[tmp], scompare1\n" \
" " #op " %0, %1, %2\n" \ " " #op " %[result], %[tmp], %[i]\n" \
" s32c1i %0, %3, 0\n" \ " s32c1i %[result], %[mem]\n" \
" bne %0, %1, 1b\n" \ " bne %[result], %[tmp], 1b\n" \
: "=&a" (result), "=&a" (tmp) \ : [result] "=&a" (result), [tmp] "=&a" (tmp), \
: "a" (i), "a" (v) \ [mem] "+m" (*v) \
: [i] "a" (i) \
: "memory" \ : "memory" \
); \ ); \
\ \
...@@ -184,14 +187,14 @@ static inline void atomic_##op(int i, atomic_t * v) \ ...@@ -184,14 +187,14 @@ static inline void atomic_##op(int i, atomic_t * v) \
unsigned int vval; \ unsigned int vval; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" rsil a15, "__stringify(TOPLEVEL)"\n"\ " rsil a15, "__stringify(TOPLEVEL)"\n" \
" l32i %0, %2, 0\n" \ " l32i %[result], %[mem]\n" \
" " #op " %0, %0, %1\n" \ " " #op " %[result], %[result], %[i]\n" \
" s32i %0, %2, 0\n" \ " s32i %[result], %[mem]\n" \
" wsr a15, ps\n" \ " wsr a15, ps\n" \
" rsync\n" \ " rsync\n" \
: "=&a" (vval) \ : [result] "=&a" (vval), [mem] "+m" (*v) \
: "a" (i), "a" (v) \ : [i] "a" (i) \
: "a15", "memory" \ : "a15", "memory" \
); \ ); \
} \ } \
...@@ -203,13 +206,13 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ ...@@ -203,13 +206,13 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" rsil a15,"__stringify(TOPLEVEL)"\n" \ " rsil a15,"__stringify(TOPLEVEL)"\n" \
" l32i %0, %2, 0\n" \ " l32i %[result], %[mem]\n" \
" " #op " %0, %0, %1\n" \ " " #op " %[result], %[result], %[i]\n" \
" s32i %0, %2, 0\n" \ " s32i %[result], %[mem]\n" \
" wsr a15, ps\n" \ " wsr a15, ps\n" \
" rsync\n" \ " rsync\n" \
: "=&a" (vval) \ : [result] "=&a" (vval), [mem] "+m" (*v) \
: "a" (i), "a" (v) \ : [i] "a" (i) \
: "a15", "memory" \ : "a15", "memory" \
); \ ); \
\ \
...@@ -223,13 +226,14 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \ ...@@ -223,13 +226,14 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" rsil a15,"__stringify(TOPLEVEL)"\n" \ " rsil a15,"__stringify(TOPLEVEL)"\n" \
" l32i %0, %3, 0\n" \ " l32i %[result], %[mem]\n" \
" " #op " %1, %0, %2\n" \ " " #op " %[tmp], %[result], %[i]\n" \
" s32i %1, %3, 0\n" \ " s32i %[tmp], %[mem]\n" \
" wsr a15, ps\n" \ " wsr a15, ps\n" \
" rsync\n" \ " rsync\n" \
: "=&a" (vval), "=&a" (tmp) \ : [result] "=&a" (vval), [tmp] "=&a" (tmp), \
: "a" (i), "a" (v) \ [mem] "+m" (*v) \
: [i] "a" (i) \
: "a15", "memory" \ : "a15", "memory" \
); \ ); \
\ \
......
...@@ -98,247 +98,112 @@ static inline unsigned long __fls(unsigned long word) ...@@ -98,247 +98,112 @@ static inline unsigned long __fls(unsigned long word)
#if XCHAL_HAVE_EXCLUSIVE #if XCHAL_HAVE_EXCLUSIVE
static inline void set_bit(unsigned int bit, volatile unsigned long *p) #define BIT_OP(op, insn, inv) \
{ static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
unsigned long tmp; { \
unsigned long mask = 1UL << (bit & 31); unsigned long tmp; \
unsigned long mask = 1UL << (bit & 31); \
p += bit >> 5; \
p += bit >> 5; \
__asm__ __volatile__( \
"1: l32ex %0, %2\n" __asm__ __volatile__( \
" or %0, %0, %1\n" "1: l32ex %[tmp], %[addr]\n" \
" s32ex %0, %2\n" " "insn" %[tmp], %[tmp], %[mask]\n" \
" getex %0\n" " s32ex %[tmp], %[addr]\n" \
" beqz %0, 1b\n" " getex %[tmp]\n" \
: "=&a" (tmp) " beqz %[tmp], 1b\n" \
: "a" (mask), "a" (p) : [tmp] "=&a" (tmp) \
: "memory"); : [mask] "a" (inv mask), [addr] "a" (p) \
} : "memory"); \
}
static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
{ #define TEST_AND_BIT_OP(op, insn, inv) \
unsigned long tmp; static inline int \
unsigned long mask = 1UL << (bit & 31); test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
{ \
p += bit >> 5; unsigned long tmp, value; \
unsigned long mask = 1UL << (bit & 31); \
__asm__ __volatile__( \
"1: l32ex %0, %2\n" p += bit >> 5; \
" and %0, %0, %1\n" \
" s32ex %0, %2\n" __asm__ __volatile__( \
" getex %0\n" "1: l32ex %[value], %[addr]\n" \
" beqz %0, 1b\n" " "insn" %[tmp], %[value], %[mask]\n" \
: "=&a" (tmp) " s32ex %[tmp], %[addr]\n" \
: "a" (~mask), "a" (p) " getex %[tmp]\n" \
: "memory"); " beqz %[tmp], 1b\n" \
} : [tmp] "=&a" (tmp), [value] "=&a" (value) \
: [mask] "a" (inv mask), [addr] "a" (p) \
static inline void change_bit(unsigned int bit, volatile unsigned long *p) : "memory"); \
{ \
unsigned long tmp; return value & mask; \
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32ex %0, %2\n"
" xor %0, %0, %1\n"
" s32ex %0, %2\n"
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp)
: "a" (mask), "a" (p)
: "memory");
}
static inline int
test_and_set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32ex %1, %3\n"
" or %0, %1, %2\n"
" s32ex %0, %3\n"
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
return value & mask;
}
static inline int
test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32ex %1, %3\n"
" and %0, %1, %2\n"
" s32ex %0, %3\n"
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (~mask), "a" (p)
: "memory");
return value & mask;
}
static inline int
test_and_change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32ex %1, %3\n"
" xor %0, %1, %2\n"
" s32ex %0, %3\n"
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
return value & mask;
} }
#elif XCHAL_HAVE_S32C1I #elif XCHAL_HAVE_S32C1I
static inline void set_bit(unsigned int bit, volatile unsigned long *p) #define BIT_OP(op, insn, inv) \
{ static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
unsigned long tmp, value; { \
unsigned long mask = 1UL << (bit & 31); unsigned long tmp, value; \
unsigned long mask = 1UL << (bit & 31); \
p += bit >> 5; \
p += bit >> 5; \
__asm__ __volatile__( \
"1: l32i %1, %3, 0\n" __asm__ __volatile__( \
" wsr %1, scompare1\n" "1: l32i %[value], %[mem]\n" \
" or %0, %1, %2\n" " wsr %[value], scompare1\n" \
" s32c1i %0, %3, 0\n" " "insn" %[tmp], %[value], %[mask]\n" \
" bne %0, %1, 1b\n" " s32c1i %[tmp], %[mem]\n" \
: "=&a" (tmp), "=&a" (value) " bne %[tmp], %[value], 1b\n" \
: "a" (mask), "a" (p) : [tmp] "=&a" (tmp), [value] "=&a" (value), \
: "memory"); [mem] "+m" (*p) \
} : [mask] "a" (inv mask) \
: "memory"); \
static inline void clear_bit(unsigned int bit, volatile unsigned long *p) }
{
unsigned long tmp, value; #define TEST_AND_BIT_OP(op, insn, inv) \
unsigned long mask = 1UL << (bit & 31); static inline int \
test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
p += bit >> 5; { \
unsigned long tmp, value; \
__asm__ __volatile__( unsigned long mask = 1UL << (bit & 31); \
"1: l32i %1, %3, 0\n" \
" wsr %1, scompare1\n" p += bit >> 5; \
" and %0, %1, %2\n" \
" s32c1i %0, %3, 0\n" __asm__ __volatile__( \
" bne %0, %1, 1b\n" "1: l32i %[value], %[mem]\n" \
: "=&a" (tmp), "=&a" (value) " wsr %[value], scompare1\n" \
: "a" (~mask), "a" (p) " "insn" %[tmp], %[value], %[mask]\n" \
: "memory"); " s32c1i %[tmp], %[mem]\n" \
" bne %[tmp], %[value], 1b\n" \
: [tmp] "=&a" (tmp), [value] "=&a" (value), \
[mem] "+m" (*p) \
: [mask] "a" (inv mask) \
: "memory"); \
\
return tmp & mask; \
} }
static inline void change_bit(unsigned int bit, volatile unsigned long *p) #else
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" xor %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
}
static inline int #define BIT_OP(op, insn, inv)
test_and_set_bit(unsigned int bit, volatile unsigned long *p) #define TEST_AND_BIT_OP(op, insn, inv)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" or %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
return tmp & mask;
}
static inline int #include <asm-generic/bitops/atomic.h>
test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" and %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (~mask), "a" (p)
: "memory");
return tmp & mask;
}
static inline int #endif /* XCHAL_HAVE_S32C1I */
test_and_change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" xor %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
return tmp & mask;
}
#else #define BIT_OPS(op, insn, inv) \
BIT_OP(op, insn, inv) \
TEST_AND_BIT_OP(op, insn, inv)
#include <asm-generic/bitops/atomic.h> BIT_OPS(set, "or", )
BIT_OPS(clear, "and", ~)
BIT_OPS(change, "xor", )
#endif /* XCHAL_HAVE_S32C1I */ #undef BIT_OPS
#undef BIT_OP
#undef TEST_AND_BIT_OP
#include <asm-generic/bitops/find.h> #include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h> #include <asm-generic/bitops/le.h>
......
...@@ -31,4 +31,10 @@ ...@@ -31,4 +31,10 @@
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
/*
* R/O after init is actually writable, it cannot go to .rodata
* according to vmlinux linker script.
*/
#define __ro_after_init __read_mostly
#endif /* _XTENSA_CACHE_H */ #endif /* _XTENSA_CACHE_H */
...@@ -27,25 +27,25 @@ __cmpxchg_u32(volatile int *p, int old, int new) ...@@ -27,25 +27,25 @@ __cmpxchg_u32(volatile int *p, int old, int new)
unsigned long tmp, result; unsigned long tmp, result;
__asm__ __volatile__( __asm__ __volatile__(
"1: l32ex %0, %3\n" "1: l32ex %[result], %[addr]\n"
" bne %0, %4, 2f\n" " bne %[result], %[cmp], 2f\n"
" mov %1, %2\n" " mov %[tmp], %[new]\n"
" s32ex %1, %3\n" " s32ex %[tmp], %[addr]\n"
" getex %1\n" " getex %[tmp]\n"
" beqz %1, 1b\n" " beqz %[tmp], 1b\n"
"2:\n" "2:\n"
: "=&a" (result), "=&a" (tmp) : [result] "=&a" (result), [tmp] "=&a" (tmp)
: "a" (new), "a" (p), "a" (old) : [new] "a" (new), [addr] "a" (p), [cmp] "a" (old)
: "memory" : "memory"
); );
return result; return result;
#elif XCHAL_HAVE_S32C1I #elif XCHAL_HAVE_S32C1I
__asm__ __volatile__( __asm__ __volatile__(
" wsr %2, scompare1\n" " wsr %[cmp], scompare1\n"
" s32c1i %0, %1, 0\n" " s32c1i %[new], %[mem]\n"
: "+a" (new) : [new] "+a" (new), [mem] "+m" (*p)
: "a" (p), "a" (old) : [cmp] "a" (old)
: "memory" : "memory"
); );
...@@ -53,14 +53,14 @@ __cmpxchg_u32(volatile int *p, int old, int new) ...@@ -53,14 +53,14 @@ __cmpxchg_u32(volatile int *p, int old, int new)
#else #else
__asm__ __volatile__( __asm__ __volatile__(
" rsil a15, "__stringify(TOPLEVEL)"\n" " rsil a15, "__stringify(TOPLEVEL)"\n"
" l32i %0, %1, 0\n" " l32i %[old], %[mem]\n"
" bne %0, %2, 1f\n" " bne %[old], %[cmp], 1f\n"
" s32i %3, %1, 0\n" " s32i %[new], %[mem]\n"
"1:\n" "1:\n"
" wsr a15, ps\n" " wsr a15, ps\n"
" rsync\n" " rsync\n"
: "=&a" (old) : [old] "=&a" (old), [mem] "+m" (*p)
: "a" (p), "a" (old), "r" (new) : [cmp] "a" (old), [new] "r" (new)
: "a15", "memory"); : "a15", "memory");
return old; return old;
#endif #endif
...@@ -129,13 +129,13 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) ...@@ -129,13 +129,13 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
unsigned long tmp, result; unsigned long tmp, result;
__asm__ __volatile__( __asm__ __volatile__(
"1: l32ex %0, %3\n" "1: l32ex %[result], %[addr]\n"
" mov %1, %2\n" " mov %[tmp], %[val]\n"
" s32ex %1, %3\n" " s32ex %[tmp], %[addr]\n"
" getex %1\n" " getex %[tmp]\n"
" beqz %1, 1b\n" " beqz %[tmp], 1b\n"
: "=&a" (result), "=&a" (tmp) : [result] "=&a" (result), [tmp] "=&a" (tmp)
: "a" (val), "a" (m) : [val] "a" (val), [addr] "a" (m)
: "memory" : "memory"
); );
...@@ -143,13 +143,14 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) ...@@ -143,13 +143,14 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#elif XCHAL_HAVE_S32C1I #elif XCHAL_HAVE_S32C1I
unsigned long tmp, result; unsigned long tmp, result;
__asm__ __volatile__( __asm__ __volatile__(
"1: l32i %1, %2, 0\n" "1: l32i %[tmp], %[mem]\n"
" mov %0, %3\n" " mov %[result], %[val]\n"
" wsr %1, scompare1\n" " wsr %[tmp], scompare1\n"
" s32c1i %0, %2, 0\n" " s32c1i %[result], %[mem]\n"
" bne %0, %1, 1b\n" " bne %[result], %[tmp], 1b\n"
: "=&a" (result), "=&a" (tmp) : [result] "=&a" (result), [tmp] "=&a" (tmp),
: "a" (m), "a" (val) [mem] "+m" (*m)
: [val] "a" (val)
: "memory" : "memory"
); );
return result; return result;
...@@ -157,12 +158,12 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) ...@@ -157,12 +158,12 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
unsigned long tmp; unsigned long tmp;
__asm__ __volatile__( __asm__ __volatile__(
" rsil a15, "__stringify(TOPLEVEL)"\n" " rsil a15, "__stringify(TOPLEVEL)"\n"
" l32i %0, %1, 0\n" " l32i %[tmp], %[mem]\n"
" s32i %2, %1, 0\n" " s32i %[val], %[mem]\n"
" wsr a15, ps\n" " wsr a15, ps\n"
" rsync\n" " rsync\n"
: "=&a" (tmp) : [tmp] "=&a" (tmp), [mem] "+m" (*m)
: "a" (m), "a" (val) : [val] "a" (val)
: "a15", "memory"); : "a15", "memory");
return tmp; return tmp;
#endif #endif
......
...@@ -78,8 +78,10 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr) ...@@ -78,8 +78,10 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
#define kmap_get_fixmap_pte(vaddr) \ #define kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel( \ pte_offset_kernel( \
pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \ pmd_offset(pud_offset(p4d_offset(pgd_offset_k(vaddr), \
(vaddr) \ (vaddr)), \
) (vaddr)), \
(vaddr)), \
(vaddr))
#endif #endif
...@@ -43,10 +43,10 @@ ...@@ -43,10 +43,10 @@
#elif XCHAL_HAVE_S32C1I #elif XCHAL_HAVE_S32C1I
#define __futex_atomic_op(insn, ret, old, uaddr, arg) \ #define __futex_atomic_op(insn, ret, old, uaddr, arg) \
__asm__ __volatile( \ __asm__ __volatile( \
"1: l32i %[oldval], %[addr], 0\n" \ "1: l32i %[oldval], %[mem]\n" \
insn "\n" \ insn "\n" \
" wsr %[oldval], scompare1\n" \ " wsr %[oldval], scompare1\n" \
"2: s32c1i %[newval], %[addr], 0\n" \ "2: s32c1i %[newval], %[mem]\n" \
" bne %[newval], %[oldval], 1b\n" \ " bne %[newval], %[oldval], 1b\n" \
" movi %[newval], 0\n" \ " movi %[newval], 0\n" \
"3:\n" \ "3:\n" \
...@@ -60,9 +60,9 @@ ...@@ -60,9 +60,9 @@
" .section __ex_table,\"a\"\n" \ " .section __ex_table,\"a\"\n" \
" .long 1b, 5b, 2b, 5b\n" \ " .long 1b, 5b, 2b, 5b\n" \
" .previous\n" \ " .previous\n" \
: [oldval] "=&r" (old), [newval] "=&r" (ret) \ : [oldval] "=&r" (old), [newval] "=&r" (ret), \
: [addr] "r" (uaddr), [oparg] "r" (arg), \ [mem] "+m" (*(uaddr)) \
[fault] "I" (-EFAULT) \ : [oparg] "r" (arg), [fault] "I" (-EFAULT) \
: "memory") : "memory")
#endif #endif
......
/*
* include/asm-xtensa/hw_irq.h
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
* Copyright (C) 2002 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_HW_IRQ_H
#define _XTENSA_HW_IRQ_H
#endif
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#ifndef _XTENSA_INITIALIZE_MMU_H #ifndef _XTENSA_INITIALIZE_MMU_H
#define _XTENSA_INITIALIZE_MMU_H #define _XTENSA_INITIALIZE_MMU_H
#include <linux/init.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/vectors.h> #include <asm/vectors.h>
...@@ -183,7 +184,7 @@ ...@@ -183,7 +184,7 @@
#endif #endif
#if XCHAL_HAVE_MPU #if XCHAL_HAVE_MPU
.data __REFCONST
.align 4 .align 4
.Lattribute_table: .Lattribute_table:
.long 0x000000, 0x1fff00, 0x1ddf00, 0x1eef00 .long 0x000000, 0x1fff00, 0x1ddf00, 0x1eef00
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#ifndef _XTENSA_KMEM_LAYOUT_H #ifndef _XTENSA_KMEM_LAYOUT_H
#define _XTENSA_KMEM_LAYOUT_H #define _XTENSA_KMEM_LAYOUT_H
#include <asm/core.h>
#include <asm/types.h> #include <asm/types.h>
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
...@@ -65,6 +66,34 @@ ...@@ -65,6 +66,34 @@
#endif #endif
/* KIO definition */
#if XCHAL_HAVE_PTP_MMU
#define XCHAL_KIO_CACHED_VADDR 0xe0000000
#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
#else
#define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR
#define XCHAL_KIO_DEFAULT_PADDR 0x90000000
#endif
#define XCHAL_KIO_SIZE 0x10000000
#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
#ifndef __ASSEMBLY__
extern unsigned long xtensa_kio_paddr;
static inline unsigned long xtensa_get_kio_paddr(void)
{
return xtensa_kio_paddr;
}
#endif
#else
#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
#endif
/* KERNEL_STACK definition */
#ifndef CONFIG_KASAN #ifndef CONFIG_KASAN
#define KERNEL_STACK_SHIFT 13 #define KERNEL_STACK_SHIFT 13
#else #else
......
...@@ -169,7 +169,18 @@ static inline unsigned long ___pa(unsigned long va) ...@@ -169,7 +169,18 @@ static inline unsigned long ___pa(unsigned long va)
if (off >= XCHAL_KSEG_SIZE) if (off >= XCHAL_KSEG_SIZE)
off -= XCHAL_KSEG_SIZE; off -= XCHAL_KSEG_SIZE;
#ifndef CONFIG_XIP_KERNEL
return off + PHYS_OFFSET; return off + PHYS_OFFSET;
#else
if (off < XCHAL_KSEG_SIZE)
return off + PHYS_OFFSET;
off -= XCHAL_KSEG_SIZE;
if (off >= XCHAL_KIO_SIZE)
off -= XCHAL_KIO_SIZE;
return off + XCHAL_KIO_PADDR;
#endif
} }
#define __pa(x) ___pa((unsigned long)(x)) #define __pa(x) ___pa((unsigned long)(x))
#else #else
......
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
#ifndef _XTENSA_PGTABLE_H #ifndef _XTENSA_PGTABLE_H
#define _XTENSA_PGTABLE_H #define _XTENSA_PGTABLE_H
#define __ARCH_USE_5LEVEL_HACK
#include <asm/page.h> #include <asm/page.h>
#include <asm/kmem_layout.h> #include <asm/kmem_layout.h>
#include <asm-generic/pgtable-nopmd.h> #include <asm-generic/pgtable-nopmd.h>
...@@ -371,9 +370,6 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) ...@@ -371,9 +370,6 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
#define pgd_index(address) ((address) >> PGDIR_SHIFT) #define pgd_index(address) ((address) >> PGDIR_SHIFT)
/* Find an entry in the second-level page table.. */
#define pmd_offset(dir,address) ((pmd_t*)(dir))
/* Find an entry in the third-level page table.. */ /* Find an entry in the third-level page table.. */
#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir,addr) \ #define pte_offset_kernel(dir,addr) \
......
...@@ -195,6 +195,7 @@ struct thread_struct { ...@@ -195,6 +195,7 @@ struct thread_struct {
/* Clearing a0 terminates the backtrace. */ /* Clearing a0 terminates the backtrace. */
#define start_thread(regs, new_pc, new_sp) \ #define start_thread(regs, new_pc, new_sp) \
do { \ do { \
unsigned long syscall = (regs)->syscall; \
memset((regs), 0, sizeof(*(regs))); \ memset((regs), 0, sizeof(*(regs))); \
(regs)->pc = (new_pc); \ (regs)->pc = (new_pc); \
(regs)->ps = USER_PS_VALUE; \ (regs)->ps = USER_PS_VALUE; \
...@@ -204,7 +205,7 @@ struct thread_struct { ...@@ -204,7 +205,7 @@ struct thread_struct {
(regs)->depc = 0; \ (regs)->depc = 0; \
(regs)->windowbase = 0; \ (regs)->windowbase = 0; \
(regs)->windowstart = 1; \ (regs)->windowstart = 1; \
(regs)->syscall = NO_SYSCALL; \ (regs)->syscall = syscall; \
} while (0) } while (0)
/* Forward declaration */ /* Forward declaration */
......
...@@ -51,7 +51,7 @@ static inline void syscall_set_return_value(struct task_struct *task, ...@@ -51,7 +51,7 @@ static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
int error, long val) int error, long val)
{ {
regs->areg[0] = (long) error ? error : val; regs->areg[2] = (long) error ? error : val;
} }
#define SYSCALL_MAX_ARGS 6 #define SYSCALL_MAX_ARGS 6
...@@ -79,7 +79,7 @@ static inline void syscall_set_arguments(struct task_struct *task, ...@@ -79,7 +79,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
regs->areg[reg[i]] = args[i]; regs->areg[reg[i]] = args[i];
} }
asmlinkage long xtensa_rt_sigreturn(struct pt_regs*); asmlinkage long xtensa_rt_sigreturn(void);
asmlinkage long xtensa_shmat(int, char __user *, int); asmlinkage long xtensa_shmat(int, char __user *, int);
asmlinkage long xtensa_fadvise64_64(int, int, asmlinkage long xtensa_fadvise64_64(int, int,
unsigned long long, unsigned long long); unsigned long long, unsigned long long);
......
...@@ -132,13 +132,13 @@ do { \ ...@@ -132,13 +132,13 @@ do { \
#define __check_align_1 "" #define __check_align_1 ""
#define __check_align_2 \ #define __check_align_2 \
" _bbci.l %[addr], 0, 1f \n" \ " _bbci.l %[mem] * 0, 1f \n" \
" movi %[err], %[efault] \n" \ " movi %[err], %[efault] \n" \
" _j 2f \n" " _j 2f \n"
#define __check_align_4 \ #define __check_align_4 \
" _bbsi.l %[addr], 0, 0f \n" \ " _bbsi.l %[mem] * 0, 0f \n" \
" _bbci.l %[addr], 1, 1f \n" \ " _bbci.l %[mem] * 0 + 1, 1f \n" \
"0: movi %[err], %[efault] \n" \ "0: movi %[err], %[efault] \n" \
" _j 2f \n" " _j 2f \n"
...@@ -154,7 +154,7 @@ do { \ ...@@ -154,7 +154,7 @@ do { \
#define __put_user_asm(x_, addr_, err_, align, insn, cb)\ #define __put_user_asm(x_, addr_, err_, align, insn, cb)\
__asm__ __volatile__( \ __asm__ __volatile__( \
__check_align_##align \ __check_align_##align \
"1: "insn" %[x], %[addr], 0 \n" \ "1: "insn" %[x], %[mem] \n" \
"2: \n" \ "2: \n" \
" .section .fixup,\"ax\" \n" \ " .section .fixup,\"ax\" \n" \
" .align 4 \n" \ " .align 4 \n" \
...@@ -167,8 +167,8 @@ __asm__ __volatile__( \ ...@@ -167,8 +167,8 @@ __asm__ __volatile__( \
" .section __ex_table,\"a\" \n" \ " .section __ex_table,\"a\" \n" \
" .long 1b, 5b \n" \ " .long 1b, 5b \n" \
" .previous" \ " .previous" \
:[err] "+r"(err_), [tmp] "=r"(cb) \ :[err] "+r"(err_), [tmp] "=r"(cb), [mem] "=m"(*(addr_)) \
:[x] "r"(x_), [addr] "r"(addr_), [efault] "i"(-EFAULT)) :[x] "r"(x_), [efault] "i"(-EFAULT))
#define __get_user_nocheck(x, ptr, size) \ #define __get_user_nocheck(x, ptr, size) \
({ \ ({ \
...@@ -222,7 +222,7 @@ do { \ ...@@ -222,7 +222,7 @@ do { \
u32 __x = 0; \ u32 __x = 0; \
__asm__ __volatile__( \ __asm__ __volatile__( \
__check_align_##align \ __check_align_##align \
"1: "insn" %[x], %[addr], 0 \n" \ "1: "insn" %[x], %[mem] \n" \
"2: \n" \ "2: \n" \
" .section .fixup,\"ax\" \n" \ " .section .fixup,\"ax\" \n" \
" .align 4 \n" \ " .align 4 \n" \
...@@ -236,7 +236,7 @@ do { \ ...@@ -236,7 +236,7 @@ do { \
" .long 1b, 5b \n" \ " .long 1b, 5b \n" \
" .previous" \ " .previous" \
:[err] "+r"(err_), [tmp] "=r"(cb), [x] "+r"(__x) \ :[err] "+r"(err_), [tmp] "=r"(cb), [x] "+r"(__x) \
:[addr] "r"(addr_), [efault] "i"(-EFAULT)); \ :[mem] "m"(*(addr_)), [efault] "i"(-EFAULT)); \
(x_) = (__force __typeof__(*(addr_)))__x; \ (x_) = (__force __typeof__(*(addr_)))__x; \
} while (0) } while (0)
......
/*
* include/asm-xtensa/user.h
*
* Xtensa Processor version.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_USER_H
#define _XTENSA_USER_H
/* This file usually defines a 'struct user' structure. However, it it only
* used for a.out file, which are not supported on Xtensa.
*/
#endif /* _XTENSA_USER_H */
...@@ -21,50 +21,18 @@ ...@@ -21,50 +21,18 @@
#include <asm/core.h> #include <asm/core.h>
#include <asm/kmem_layout.h> #include <asm/kmem_layout.h>
#if XCHAL_HAVE_PTP_MMU #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
#define XCHAL_KIO_CACHED_VADDR 0xe0000000 #ifdef CONFIG_KERNEL_VIRTUAL_ADDRESS
#define XCHAL_KIO_BYPASS_VADDR 0xf0000000 #define KERNELOFFSET CONFIG_KERNEL_VIRTUAL_ADDRESS
#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
#else #else
#define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR #define KERNELOFFSET (CONFIG_KERNEL_LOAD_ADDRESS + \
#define XCHAL_KIO_DEFAULT_PADDR 0x90000000 XCHAL_KSEG_CACHED_VADDR - \
#endif
#define XCHAL_KIO_SIZE 0x10000000
#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
#ifndef __ASSEMBLY__
extern unsigned long xtensa_kio_paddr;
static inline unsigned long xtensa_get_kio_paddr(void)
{
return xtensa_kio_paddr;
}
#endif
#else
#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
#endif
#if defined(CONFIG_MMU)
#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
/* Image Virtual Start Address */
#define KERNELOFFSET (XCHAL_KSEG_CACHED_VADDR + \
CONFIG_KERNEL_LOAD_ADDRESS - \
XCHAL_KSEG_PADDR) XCHAL_KSEG_PADDR)
#endif
#else #else
#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS #define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
#endif #endif
#else /* !defined(CONFIG_MMU) */
/* MMU Not being used - Virtual == Physical */
/* Location of the start of the kernel text, _start */
#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
#endif /* CONFIG_MMU */
#define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR) #define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR)
#ifdef CONFIG_VECTORS_OFFSET #ifdef CONFIG_VECTORS_OFFSET
#define VECBASE_VADDR (KERNELOFFSET - CONFIG_VECTORS_OFFSET) #define VECBASE_VADDR (KERNELOFFSET - CONFIG_VECTORS_OFFSET)
......
...@@ -15,17 +15,9 @@ ...@@ -15,17 +15,9 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/processor.h>
#include <asm/coprocessor.h> #include <asm/coprocessor.h>
#include <asm/thread_info.h>
#include <asm/asm-uaccess.h>
#include <asm/unistd.h>
#include <asm/ptrace.h>
#include <asm/current.h> #include <asm/current.h>
#include <asm/pgtable.h> #include <asm/regs.h>
#include <asm/page.h>
#include <asm/signal.h>
#include <asm/tlbflush.h>
#if XTENSA_HAVE_COPROCESSORS #if XTENSA_HAVE_COPROCESSORS
......
...@@ -529,7 +529,7 @@ common_exception_return: ...@@ -529,7 +529,7 @@ common_exception_return:
l32i a4, a2, TI_PRE_COUNT l32i a4, a2, TI_PRE_COUNT
bnez a4, 4f bnez a4, 4f
call4 preempt_schedule_irq call4 preempt_schedule_irq
j 1b j 4f
#endif #endif
#if XTENSA_FAKE_NMI #if XTENSA_FAKE_NMI
...@@ -1876,8 +1876,7 @@ ENDPROC(fast_store_prohibited) ...@@ -1876,8 +1876,7 @@ ENDPROC(fast_store_prohibited)
ENTRY(system_call) ENTRY(system_call)
/* reserve 4 bytes on stack for function parameter */ abi_entry_default
abi_entry(4)
/* regs->syscall = regs->areg[2] */ /* regs->syscall = regs->areg[2] */
...@@ -1892,11 +1891,10 @@ ENTRY(system_call) ...@@ -1892,11 +1891,10 @@ ENTRY(system_call)
mov a6, a2 mov a6, a2
call4 do_syscall_trace_enter call4 do_syscall_trace_enter
beqz a6, .Lsyscall_exit
l32i a7, a2, PT_SYSCALL l32i a7, a2, PT_SYSCALL
1: 1:
s32i a7, a1, 4
/* syscall = sys_call_table[syscall_nr] */ /* syscall = sys_call_table[syscall_nr] */
movi a4, sys_call_table movi a4, sys_call_table
...@@ -1906,8 +1904,6 @@ ENTRY(system_call) ...@@ -1906,8 +1904,6 @@ ENTRY(system_call)
addx4 a4, a7, a4 addx4 a4, a7, a4
l32i a4, a4, 0 l32i a4, a4, 0
movi a5, sys_ni_syscall;
beq a4, a5, 1f
/* Load args: arg0 - arg5 are passed via regs. */ /* Load args: arg0 - arg5 are passed via regs. */
...@@ -1918,25 +1914,19 @@ ENTRY(system_call) ...@@ -1918,25 +1914,19 @@ ENTRY(system_call)
l32i a10, a2, PT_AREG8 l32i a10, a2, PT_AREG8
l32i a11, a2, PT_AREG9 l32i a11, a2, PT_AREG9
/* Pass one additional argument to the syscall: pt_regs (on stack) */
s32i a2, a1, 0
callx4 a4 callx4 a4
1: /* regs->areg[2] = return_value */ 1: /* regs->areg[2] = return_value */
s32i a6, a2, PT_AREG2 s32i a6, a2, PT_AREG2
bnez a3, 1f bnez a3, 1f
abi_ret(4) .Lsyscall_exit:
abi_ret_default
1: 1:
l32i a4, a1, 4
l32i a3, a2, PT_SYSCALL
s32i a4, a2, PT_SYSCALL
mov a6, a2 mov a6, a2
call4 do_syscall_trace_leave call4 do_syscall_trace_leave
s32i a3, a2, PT_SYSCALL abi_ret_default
abi_ret(4)
ENDPROC(system_call) ENDPROC(system_call)
......
...@@ -260,6 +260,13 @@ ENTRY(_startup) ...@@ -260,6 +260,13 @@ ENTRY(_startup)
___invalidate_icache_all a2 a3 ___invalidate_icache_all a2 a3
isync isync
#ifdef CONFIG_XIP_KERNEL
/* Setup bootstrap CPU stack in XIP kernel */
movi a1, start_info
l32i a1, a1, 0
#endif
movi a6, 0 movi a6, 0
xsr a6, excsave1 xsr a6, excsave1
...@@ -355,7 +362,7 @@ ENDPROC(cpu_restart) ...@@ -355,7 +362,7 @@ ENDPROC(cpu_restart)
* DATA section * DATA section
*/ */
.section ".data.init.refok" __REFDATA
.align 4 .align 4
ENTRY(start_info) ENTRY(start_info)
.long init_thread_union + KERNEL_STACK_SIZE .long init_thread_union + KERNEL_STACK_SIZE
......
...@@ -264,6 +264,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, ...@@ -264,6 +264,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
&regs->areg[XCHAL_NUM_AREGS - len/4], len); &regs->areg[XCHAL_NUM_AREGS - len/4], len);
} }
childregs->syscall = regs->syscall;
/* The thread pointer is passed in the '4th argument' (= a5) */ /* The thread pointer is passed in the '4th argument' (= a5) */
if (clone_flags & CLONE_SETTLS) if (clone_flags & CLONE_SETTLS)
childregs->threadptr = childregs->areg[5]; childregs->threadptr = childregs->areg[5];
......
...@@ -542,14 +542,28 @@ long arch_ptrace(struct task_struct *child, long request, ...@@ -542,14 +542,28 @@ long arch_ptrace(struct task_struct *child, long request,
return ret; return ret;
} }
void do_syscall_trace_enter(struct pt_regs *regs) void do_syscall_trace_leave(struct pt_regs *regs);
int do_syscall_trace_enter(struct pt_regs *regs)
{ {
if (regs->syscall == NO_SYSCALL)
regs->areg[2] = -ENOSYS;
if (test_thread_flag(TIF_SYSCALL_TRACE) && if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs)) tracehook_report_syscall_entry(regs)) {
regs->areg[2] = -ENOSYS;
regs->syscall = NO_SYSCALL; regs->syscall = NO_SYSCALL;
return 0;
}
if (regs->syscall == NO_SYSCALL) {
do_syscall_trace_leave(regs);
return 0;
}
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, syscall_get_nr(current, regs)); trace_sys_enter(regs, syscall_get_nr(current, regs));
return 1;
} }
void do_syscall_trace_leave(struct pt_regs *regs) void do_syscall_trace_leave(struct pt_regs *regs)
......
...@@ -308,6 +308,10 @@ extern char _Level6InterruptVector_text_end; ...@@ -308,6 +308,10 @@ extern char _Level6InterruptVector_text_end;
extern char _SecondaryResetVector_text_start; extern char _SecondaryResetVector_text_start;
extern char _SecondaryResetVector_text_end; extern char _SecondaryResetVector_text_end;
#endif #endif
#ifdef CONFIG_XIP_KERNEL
extern char _xip_start[];
extern char _xip_end[];
#endif
static inline int __init_memblock mem_reserve(unsigned long start, static inline int __init_memblock mem_reserve(unsigned long start,
unsigned long end) unsigned long end)
...@@ -339,6 +343,9 @@ void __init setup_arch(char **cmdline_p) ...@@ -339,6 +343,9 @@ void __init setup_arch(char **cmdline_p)
#endif #endif
mem_reserve(__pa(_stext), __pa(_end)); mem_reserve(__pa(_stext), __pa(_end));
#ifdef CONFIG_XIP_KERNEL
mem_reserve(__pa(_xip_start), __pa(_xip_end));
#endif
#ifdef CONFIG_VECTORS_OFFSET #ifdef CONFIG_VECTORS_OFFSET
mem_reserve(__pa(&_WindowVectors_text_start), mem_reserve(__pa(&_WindowVectors_text_start),
......
...@@ -236,9 +236,9 @@ restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame) ...@@ -236,9 +236,9 @@ restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame)
* Do a signal return; undo the signal stack. * Do a signal return; undo the signal stack.
*/ */
asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3, asmlinkage long xtensa_rt_sigreturn(void)
long a4, long a5, struct pt_regs *regs)
{ {
struct pt_regs *regs = current_pt_regs();
struct rt_sigframe __user *frame; struct rt_sigframe __user *frame;
sigset_t set; sigset_t set;
int ret; int ret;
......
...@@ -491,32 +491,27 @@ void show_trace(struct task_struct *task, unsigned long *sp) ...@@ -491,32 +491,27 @@ void show_trace(struct task_struct *task, unsigned long *sp)
pr_info("Call Trace:\n"); pr_info("Call Trace:\n");
walk_stackframe(sp, show_trace_cb, NULL); walk_stackframe(sp, show_trace_cb, NULL);
#ifndef CONFIG_KALLSYMS
pr_cont("\n");
#endif
} }
static int kstack_depth_to_print = 24; #define STACK_DUMP_ENTRY_SIZE 4
#define STACK_DUMP_LINE_SIZE 32
static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
void show_stack(struct task_struct *task, unsigned long *sp) void show_stack(struct task_struct *task, unsigned long *sp)
{ {
int i = 0; size_t len;
unsigned long *stack;
if (!sp) if (!sp)
sp = stack_pointer(task); sp = stack_pointer(task);
stack = sp;
pr_info("Stack:\n"); len = min((-(size_t)sp) & (THREAD_SIZE - STACK_DUMP_ENTRY_SIZE),
kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE);
for (i = 0; i < kstack_depth_to_print; i++) { pr_info("Stack:\n");
if (kstack_end(sp)) print_hex_dump(KERN_INFO, " ", DUMP_PREFIX_NONE,
break; STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
pr_cont(" %08lx", *sp++); sp, len, false);
if (i % 8 == 7) show_trace(task, sp);
pr_cont("\n");
}
show_trace(task, stack);
} }
DEFINE_SPINLOCK(die_lock); DEFINE_SPINLOCK(die_lock);
......
...@@ -119,7 +119,7 @@ SECTIONS ...@@ -119,7 +119,7 @@ SECTIONS
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
*(.fixup)
} }
_etext = .; _etext = .;
PROVIDE (etext = .); PROVIDE (etext = .);
...@@ -128,12 +128,11 @@ SECTIONS ...@@ -128,12 +128,11 @@ SECTIONS
RO_DATA(4096) RO_DATA(4096)
/* Relocation table */
.fixup : { *(.fixup) }
/* Data section */ /* Data section */
#ifdef CONFIG_XIP_KERNEL
INIT_TEXT_SECTION(PAGE_SIZE)
#else
_sdata = .; _sdata = .;
RW_DATA(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE) RW_DATA(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
_edata = .; _edata = .;
...@@ -147,6 +146,11 @@ SECTIONS ...@@ -147,6 +146,11 @@ SECTIONS
.init.data : .init.data :
{ {
INIT_DATA INIT_DATA
}
#endif
.init.rodata :
{
. = ALIGN(0x4); . = ALIGN(0x4);
__tagtable_begin = .; __tagtable_begin = .;
*(.taglist) *(.taglist)
...@@ -187,11 +191,15 @@ SECTIONS ...@@ -187,11 +191,15 @@ SECTIONS
RELOCATE_ENTRY(_DebugInterruptVector_text, RELOCATE_ENTRY(_DebugInterruptVector_text,
.DebugInterruptVector.text); .DebugInterruptVector.text);
#endif #endif
#ifdef CONFIG_XIP_KERNEL
RELOCATE_ENTRY(_xip_data, .data);
RELOCATE_ENTRY(_xip_init_data, .init.data);
#else
#if defined(CONFIG_SMP) #if defined(CONFIG_SMP)
RELOCATE_ENTRY(_SecondaryResetVector_text, RELOCATE_ENTRY(_SecondaryResetVector_text,
.SecondaryResetVector.text); .SecondaryResetVector.text);
#endif #endif
#endif
__boot_reloc_table_end = ABSOLUTE(.) ; __boot_reloc_table_end = ABSOLUTE(.) ;
...@@ -278,7 +286,7 @@ SECTIONS ...@@ -278,7 +286,7 @@ SECTIONS
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
#endif #endif
#if defined(CONFIG_SMP) #if !defined(CONFIG_XIP_KERNEL) && defined(CONFIG_SMP)
SECTION_VECTOR (_SecondaryResetVector_text, SECTION_VECTOR (_SecondaryResetVector_text,
.SecondaryResetVector.text, .SecondaryResetVector.text,
...@@ -291,12 +299,48 @@ SECTIONS ...@@ -291,12 +299,48 @@ SECTIONS
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
#ifndef CONFIG_XIP_KERNEL
__init_end = .; __init_end = .;
BSS_SECTION(0, 8192, 0) BSS_SECTION(0, 8192, 0)
#endif
_end = .; _end = .;
#ifdef CONFIG_XIP_KERNEL
. = CONFIG_XIP_DATA_ADDR;
_xip_start = .;
#undef LOAD_OFFSET
#define LOAD_OFFSET \
(CONFIG_XIP_DATA_ADDR - (LOADADDR(.dummy) + SIZEOF(.dummy) + 3) & ~ 3)
_xip_data_start = .;
_sdata = .;
RW_DATA(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
_edata = .;
_xip_data_end = .;
/* Initialization data: */
STRUCT_ALIGN();
_xip_init_data_start = .;
__init_begin = .;
.init.data :
{
INIT_DATA
}
_xip_init_data_end = .;
__init_end = .;
BSS_SECTION(0, 8192, 0)
_xip_end = .;
#undef LOAD_OFFSET
#endif
DWARF_DEBUG DWARF_DEBUG
.xt.prop 0 : { KEEP(*(.xt.prop .xt.prop.* .gnu.linkonce.prop.*)) } .xt.prop 0 : { KEEP(*(.xt.prop .xt.prop.* .gnu.linkonce.prop.*)) }
......
...@@ -197,6 +197,8 @@ void do_page_fault(struct pt_regs *regs) ...@@ -197,6 +197,8 @@ void do_page_fault(struct pt_regs *regs)
struct mm_struct *act_mm = current->active_mm; struct mm_struct *act_mm = current->active_mm;
int index = pgd_index(address); int index = pgd_index(address);
pgd_t *pgd, *pgd_k; pgd_t *pgd, *pgd_k;
p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k; pmd_t *pmd, *pmd_k;
pte_t *pte_k; pte_t *pte_k;
...@@ -211,8 +213,18 @@ void do_page_fault(struct pt_regs *regs) ...@@ -211,8 +213,18 @@ void do_page_fault(struct pt_regs *regs)
pgd_val(*pgd) = pgd_val(*pgd_k); pgd_val(*pgd) = pgd_val(*pgd_k);
pmd = pmd_offset(pgd, address); p4d = p4d_offset(pgd, address);
pmd_k = pmd_offset(pgd_k, address); p4d_k = p4d_offset(pgd_k, address);
if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
goto bad_page_fault;
pud = pud_offset(p4d, address);
pud_k = pud_offset(p4d_k, address);
if (!pud_present(*pud) || !pud_present(*pud_k))
goto bad_page_fault;
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd) || !pmd_present(*pmd_k)) if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
goto bad_page_fault; goto bad_page_fault;
......
...@@ -193,8 +193,8 @@ void __init mem_init(void) ...@@ -193,8 +193,8 @@ void __init mem_init(void)
((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20, ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20,
(unsigned long)_text, (unsigned long)_etext, (unsigned long)_text, (unsigned long)_etext,
(unsigned long)(_etext - _text) >> 10, (unsigned long)(_etext - _text) >> 10,
(unsigned long)__start_rodata, (unsigned long)_sdata, (unsigned long)__start_rodata, (unsigned long)__end_rodata,
(unsigned long)(_sdata - __start_rodata) >> 10, (unsigned long)(__end_rodata - __start_rodata) >> 10,
(unsigned long)_sdata, (unsigned long)_edata, (unsigned long)_sdata, (unsigned long)_edata,
(unsigned long)(_edata - _sdata) >> 10, (unsigned long)(_edata - _sdata) >> 10,
(unsigned long)__init_begin, (unsigned long)__init_end, (unsigned long)__init_begin, (unsigned long)__init_end,
......
...@@ -20,7 +20,9 @@ void __init kasan_early_init(void) ...@@ -20,7 +20,9 @@ void __init kasan_early_init(void)
{ {
unsigned long vaddr = KASAN_SHADOW_START; unsigned long vaddr = KASAN_SHADOW_START;
pgd_t *pgd = pgd_offset_k(vaddr); pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = pmd_offset(pgd, vaddr); p4d_t *p4d = p4d_offset(pgd, vaddr);
pud_t *pud = pud_offset(p4d, vaddr);
pmd_t *pmd = pmd_offset(pud, vaddr);
int i; int i;
for (i = 0; i < PTRS_PER_PTE; ++i) for (i = 0; i < PTRS_PER_PTE; ++i)
...@@ -42,7 +44,9 @@ static void __init populate(void *start, void *end) ...@@ -42,7 +44,9 @@ static void __init populate(void *start, void *end)
unsigned long i, j; unsigned long i, j;
unsigned long vaddr = (unsigned long)start; unsigned long vaddr = (unsigned long)start;
pgd_t *pgd = pgd_offset_k(vaddr); pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = pmd_offset(pgd, vaddr); p4d_t *p4d = p4d_offset(pgd, vaddr);
pud_t *pud = pud_offset(p4d, vaddr);
pmd_t *pmd = pmd_offset(pud, vaddr);
pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE); pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
if (!pte) if (!pte)
...@@ -56,7 +60,9 @@ static void __init populate(void *start, void *end) ...@@ -56,7 +60,9 @@ static void __init populate(void *start, void *end)
for (k = 0; k < PTRS_PER_PTE; ++k, ++j) { for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
phys_addr_t phys = phys_addr_t phys =
memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE,
0,
MEMBLOCK_ALLOC_ANYWHERE);
if (!phys) if (!phys)
panic("Failed to allocate page table page\n"); panic("Failed to allocate page table page\n");
......
...@@ -22,7 +22,9 @@ ...@@ -22,7 +22,9 @@
static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages) static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
{ {
pgd_t *pgd = pgd_offset_k(vaddr); pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = pmd_offset(pgd, vaddr); p4d_t *p4d = p4d_offset(pgd, vaddr);
pud_t *pud = pud_offset(p4d, vaddr);
pmd_t *pmd = pmd_offset(pud, vaddr);
pte_t *pte; pte_t *pte;
unsigned long i; unsigned long i;
......
...@@ -169,6 +169,8 @@ static unsigned get_pte_for_vaddr(unsigned vaddr) ...@@ -169,6 +169,8 @@ static unsigned get_pte_for_vaddr(unsigned vaddr)
struct task_struct *task = get_current(); struct task_struct *task = get_current();
struct mm_struct *mm = task->mm; struct mm_struct *mm = task->mm;
pgd_t *pgd; pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte; pte_t *pte;
...@@ -177,7 +179,13 @@ static unsigned get_pte_for_vaddr(unsigned vaddr) ...@@ -177,7 +179,13 @@ static unsigned get_pte_for_vaddr(unsigned vaddr)
pgd = pgd_offset(mm, vaddr); pgd = pgd_offset(mm, vaddr);
if (pgd_none_or_clear_bad(pgd)) if (pgd_none_or_clear_bad(pgd))
return 0; return 0;
pmd = pmd_offset(pgd, vaddr); p4d = p4d_offset(pgd, vaddr);
if (p4d_none_or_clear_bad(p4d))
return 0;
pud = pud_offset(p4d, vaddr);
if (pud_none_or_clear_bad(pud))
return 0;
pmd = pmd_offset(pud, vaddr);
if (pmd_none_or_clear_bad(pmd)) if (pmd_none_or_clear_bad(pmd))
return 0; return 0;
pte = pte_offset_map(pmd, vaddr); pte = pte_offset_map(pmd, vaddr);
...@@ -216,6 +224,8 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb) ...@@ -216,6 +224,8 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
unsigned tlbidx = w | (e << PAGE_SHIFT); unsigned tlbidx = w | (e << PAGE_SHIFT);
unsigned r0 = dtlb ? unsigned r0 = dtlb ?
read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx); read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
unsigned r1 = dtlb ?
read_dtlb_translation(tlbidx) : read_itlb_translation(tlbidx);
unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT); unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
unsigned pte = get_pte_for_vaddr(vpn); unsigned pte = get_pte_for_vaddr(vpn);
unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK; unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
...@@ -231,8 +241,6 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb) ...@@ -231,8 +241,6 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
} }
if (tlb_asid == mm_asid) { if (tlb_asid == mm_asid) {
unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
read_itlb_translation(tlbidx);
if ((pte ^ r1) & PAGE_MASK) { if ((pte ^ r1) & PAGE_MASK) {
pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n", pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
dtlb ? 'D' : 'I', w, e, r0, r1, pte); dtlb ? 'D' : 'I', w, e, r0, r1, pte);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment