Commit 4de9ad9b authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile

Pull Tile arch updates from Chris Metcalf:
 "These changes bring in a bunch of new functionality that has been
  maintained internally at Tilera over the last year, plus other stray
  bits of work that I've taken into the tile tree from other folks.

  The changes include some PCI root complex work, interrupt-driven
  console support, support for performing fast-path unaligned data
  fixups by kernel-based JIT code generation, CONFIG_PREEMPT support,
  vDSO support for gettimeofday(), a serial driver for the tilegx
  on-chip UART, KGDB support, more optimized string routines, support
  for ftrace and kprobes, improved ASLR, and many bug fixes.

  We also remove support for the old TILE64 chip, which is no longer
  buildable"

* git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile: (85 commits)
  tile: refresh tile defconfig files
  tile: rework <asm/cmpxchg.h>
  tile PCI RC: make default consistent DMA mask 32-bit
  tile: add null check for kzalloc in tile/kernel/setup.c
  tile: make __write_once a synonym for __read_mostly
  tile: remove support for TILE64
  tile: use asm-generic/bitops/builtin-*.h
  tile: eliminate no-op "noatomichash" boot argument
  tile: use standard tile_bundle_bits type in traps.c
  tile: simplify code referencing hypervisor API addresses
  tile: change <asm/system.h> to <asm/switch_to.h> in comments
  tile: mark pcibios_init() as __init
  tile: check for correct compiler earlier in asm-offsets.c
  tile: use standard 'generic-y' model for <asm/hw_irq.h>
  tile: use asm-generic version of <asm/local64.h>
  tile PCI RC: add comment about "PCI hole" problem
  tile: remove DEBUG_EXTRA_FLAGS kernel config option
  tile: add virt_to_kpte() API and clean up and document behavior
  tile: support FRAME_POINTER
  tile: support reporting Tilera hypervisor statistics
  ...
parents 576c25eb 06da6629
...@@ -8373,9 +8373,14 @@ M: Chris Metcalf <cmetcalf@tilera.com> ...@@ -8373,9 +8373,14 @@ M: Chris Metcalf <cmetcalf@tilera.com>
W: http://www.tilera.com/scm/ W: http://www.tilera.com/scm/
S: Supported S: Supported
F: arch/tile/ F: arch/tile/
F: drivers/tty/hvc/hvc_tile.c F: drivers/char/tile-srom.c
F: drivers/net/ethernet/tile/
F: drivers/edac/tile_edac.c F: drivers/edac/tile_edac.c
F: drivers/net/ethernet/tile/
F: drivers/rtc/rtc-tile.c
F: drivers/tty/hvc/hvc_tile.c
F: drivers/tty/serial/tilegx.c
F: drivers/usb/host/*-tilegx.c
F: include/linux/usb/tilegx.h
TLAN NETWORK DRIVER TLAN NETWORK DRIVER
M: Samuel Chessman <chessman@tux.org> M: Samuel Chessman <chessman@tux.org>
......
...@@ -26,6 +26,7 @@ config TILE ...@@ -26,6 +26,7 @@ config TILE
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select HAVE_DEBUG_STACKOVERFLOW select HAVE_DEBUG_STACKOVERFLOW
select ARCH_WANT_FRAME_POINTERS
# FIXME: investigate whether we need/want these options. # FIXME: investigate whether we need/want these options.
# select HAVE_IOREMAP_PROT # select HAVE_IOREMAP_PROT
...@@ -64,6 +65,9 @@ config HUGETLB_SUPER_PAGES ...@@ -64,6 +65,9 @@ config HUGETLB_SUPER_PAGES
depends on HUGETLB_PAGE && TILEGX depends on HUGETLB_PAGE && TILEGX
def_bool y def_bool y
config GENERIC_TIME_VSYSCALL
def_bool y
# FIXME: tilegx can implement a more efficient rwsem. # FIXME: tilegx can implement a more efficient rwsem.
config RWSEM_GENERIC_SPINLOCK config RWSEM_GENERIC_SPINLOCK
def_bool y def_bool y
...@@ -112,10 +116,19 @@ config SMP ...@@ -112,10 +116,19 @@ config SMP
config HVC_TILE config HVC_TILE
depends on TTY depends on TTY
select HVC_DRIVER select HVC_DRIVER
select HVC_IRQ if TILEGX
def_bool y def_bool y
config TILEGX config TILEGX
bool "Building with TILE-Gx (64-bit) compiler and toolchain" bool "Building for TILE-Gx (64-bit) processor"
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_ARCH_KGDB
config TILEPRO config TILEPRO
def_bool !TILEGX def_bool !TILEGX
...@@ -194,7 +207,7 @@ config SYSVIPC_COMPAT ...@@ -194,7 +207,7 @@ config SYSVIPC_COMPAT
def_bool y def_bool y
depends on COMPAT && SYSVIPC depends on COMPAT && SYSVIPC
# We do not currently support disabling HIGHMEM on tile64 and tilepro. # We do not currently support disabling HIGHMEM on tilepro.
config HIGHMEM config HIGHMEM
bool # "Support for more than 512 MB of RAM" bool # "Support for more than 512 MB of RAM"
default !TILEGX default !TILEGX
...@@ -300,6 +313,8 @@ config PAGE_OFFSET ...@@ -300,6 +313,8 @@ config PAGE_OFFSET
source "mm/Kconfig" source "mm/Kconfig"
source "kernel/Kconfig.preempt"
config CMDLINE_BOOL config CMDLINE_BOOL
bool "Built-in kernel command line" bool "Built-in kernel command line"
default n default n
...@@ -396,8 +411,20 @@ config NO_IOMEM ...@@ -396,8 +411,20 @@ config NO_IOMEM
config NO_IOPORT config NO_IOPORT
def_bool !PCI def_bool !PCI
config TILE_PCI_IO
bool "PCI I/O space support"
default n
depends on PCI
depends on TILEGX
---help---
Enable PCI I/O space support on TILEGx. Since the PCI I/O space
is used by few modern PCIe endpoint devices, its support is disabled
by default to save the TRIO PIO Region resource for other purposes.
source "drivers/pci/Kconfig" source "drivers/pci/Kconfig"
source "drivers/pci/pcie/Kconfig"
config TILE_USB config TILE_USB
tristate "Tilera USB host adapter support" tristate "Tilera USB host adapter support"
default y default y
......
...@@ -14,14 +14,12 @@ config EARLY_PRINTK ...@@ -14,14 +14,12 @@ config EARLY_PRINTK
with klogd/syslogd. You should normally N here, with klogd/syslogd. You should normally N here,
unless you want to debug such a crash. unless you want to debug such a crash.
config DEBUG_EXTRA_FLAGS config TILE_HVGLUE_TRACE
string "Additional compiler arguments when building with '-g'" bool "Provide wrapper functions for hypervisor ABI calls"
depends on DEBUG_INFO default n
default ""
help help
Debug info can be large, and flags like Provide wrapper functions for the hypervisor ABI calls
`-femit-struct-debug-baseonly' can reduce the kernel file defined in arch/tile/kernel/hvglue.S. This allows tracing
size and build time noticeably. Such flags are often mechanisms, etc., to have visibility into those calls.
helpful if the main use of debug info is line number info.
endmenu endmenu
...@@ -30,10 +30,6 @@ endif ...@@ -30,10 +30,6 @@ endif
# In kernel modules, this causes load failures due to unsupported relocations. # In kernel modules, this causes load failures due to unsupported relocations.
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"")
KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS)
endif
LIBGCC_PATH := \ LIBGCC_PATH := \
$(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name) $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
......
This diff is collapsed.
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_FHANDLE=y
CONFIG_TASKSTATS=y CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_AUDIT=y
CONFIG_LOG_BUF_SHIFT=19 CONFIG_LOG_BUF_SHIFT=19
CONFIG_CGROUPS=y CONFIG_CGROUPS=y
CONFIG_CGROUP_DEBUG=y CONFIG_CGROUP_DEBUG=y
...@@ -17,14 +16,13 @@ CONFIG_CGROUP_DEVICE=y ...@@ -17,14 +16,13 @@ CONFIG_CGROUP_DEVICE=y
CONFIG_CPUSETS=y CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y CONFIG_RESOURCE_COUNTERS=y
CONFIG_CGROUP_MEMCG=y
CONFIG_CGROUP_MEMCG_SWAP=y
CONFIG_CGROUP_SCHED=y CONFIG_CGROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y CONFIG_RT_GROUP_SCHED=y
CONFIG_BLK_CGROUP=y CONFIG_BLK_CGROUP=y
CONFIG_NAMESPACES=y CONFIG_NAMESPACES=y
CONFIG_RELAY=y CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_INITRD=y
CONFIG_RD_XZ=y
CONFIG_SYSCTL_SYSCALL=y CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set
...@@ -44,11 +42,10 @@ CONFIG_UNIXWARE_DISKLABEL=y ...@@ -44,11 +42,10 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_SGI_PARTITION=y CONFIG_SGI_PARTITION=y
CONFIG_SUN_PARTITION=y CONFIG_SUN_PARTITION=y
CONFIG_KARMA_PARTITION=y CONFIG_KARMA_PARTITION=y
CONFIG_EFI_PARTITION=y
CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_100=y CONFIG_HZ_100=y
# CONFIG_COMPACTION is not set
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_PCI_DEBUG=y CONFIG_PCI_DEBUG=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=y CONFIG_BINFMT_MISC=y
...@@ -122,16 +119,15 @@ CONFIG_NF_CONNTRACK_PPTP=m ...@@ -122,16 +119,15 @@ CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NETFILTER_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
CONFIG_NETFILTER_XT_TARGET_CT=m
CONFIG_NETFILTER_XT_TARGET_DSCP=m CONFIG_NETFILTER_XT_TARGET_DSCP=m
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
CONFIG_NETFILTER_XT_TARGET_MARK=m CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TEE=m
CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TRACE=m
...@@ -189,14 +185,12 @@ CONFIG_IP_VS_SED=m ...@@ -189,14 +185,12 @@ CONFIG_IP_VS_SED=m
CONFIG_IP_VS_NQ=m CONFIG_IP_VS_NQ=m
CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NF_CONNTRACK_IPV4=m
# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set # CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=y CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=y CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y CONFIG_IP_NF_TARGET_REJECT=y
CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_TARGET_ULOG=m CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_ECN=m
...@@ -207,8 +201,6 @@ CONFIG_IP_NF_ARPTABLES=m ...@@ -207,8 +201,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_FRAG=m CONFIG_IP6_NF_MATCH_FRAG=m
...@@ -218,7 +210,6 @@ CONFIG_IP6_NF_MATCH_IPV6HEADER=m ...@@ -218,7 +210,6 @@ CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_MH=m CONFIG_IP6_NF_MATCH_MH=m
CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP6_NF_TARGET_HL=m
CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_MANGLE=m
...@@ -249,7 +240,6 @@ CONFIG_BRIDGE_EBT_NFLOG=m ...@@ -249,7 +240,6 @@ CONFIG_BRIDGE_EBT_NFLOG=m
CONFIG_RDS=m CONFIG_RDS=m
CONFIG_RDS_TCP=m CONFIG_RDS_TCP=m
CONFIG_BRIDGE=m CONFIG_BRIDGE=m
CONFIG_NET_DSA=y
CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y CONFIG_VLAN_8021Q_GVRP=y
CONFIG_PHONET=m CONFIG_PHONET=m
...@@ -297,6 +287,7 @@ CONFIG_NET_ACT_SIMP=m ...@@ -297,6 +287,7 @@ CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_CLS_IND=y CONFIG_NET_CLS_IND=y
CONFIG_DCB=y CONFIG_DCB=y
CONFIG_DNS_RESOLVER=y
# CONFIG_WIRELESS is not set # CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS=y
...@@ -354,40 +345,7 @@ CONFIG_NET_DSA_MV88E6060=y ...@@ -354,40 +345,7 @@ CONFIG_NET_DSA_MV88E6060=y
CONFIG_NET_DSA_MV88E6131=y CONFIG_NET_DSA_MV88E6131=y
CONFIG_NET_DSA_MV88E6123_61_65=y CONFIG_NET_DSA_MV88E6123_61_65=y
# CONFIG_NET_VENDOR_3COM is not set # CONFIG_NET_VENDOR_3COM is not set
# CONFIG_NET_VENDOR_ADAPTEC is not set CONFIG_E1000E=y
# CONFIG_NET_VENDOR_ALTEON is not set
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_CISCO is not set
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
# CONFIG_NET_VENDOR_EXAR is not set
# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MELLANOX is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MYRI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set
# CONFIG_NET_PACKET_ENGINE is not set
# CONFIG_NET_VENDOR_QLOGIC is not set
# CONFIG_NET_VENDOR_REALTEK is not set
# CONFIG_NET_VENDOR_RDC is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SILAN is not set
# CONFIG_NET_VENDOR_SIS is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_SUN is not set
# CONFIG_NET_VENDOR_TEHUTI is not set
# CONFIG_NET_VENDOR_TI is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_WLAN is not set # CONFIG_WLAN is not set
# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_KEYBOARD is not set
...@@ -403,7 +361,6 @@ CONFIG_I2C_CHARDEV=y ...@@ -403,7 +361,6 @@ CONFIG_I2C_CHARDEV=y
CONFIG_WATCHDOG=y CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y CONFIG_WATCHDOG_NOWAYOUT=y
# CONFIG_VGA_ARB is not set # CONFIG_VGA_ARB is not set
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set # CONFIG_USB_SUPPORT is not set
CONFIG_EDAC=y CONFIG_EDAC=y
CONFIG_EDAC_MM_EDAC=y CONFIG_EDAC_MM_EDAC=y
...@@ -448,13 +405,13 @@ CONFIG_PROC_KCORE=y ...@@ -448,13 +405,13 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y CONFIG_TMPFS_POSIX_ACL=y
CONFIG_HUGETLBFS=y CONFIG_HUGETLBFS=y
CONFIG_CONFIGFS_FS=m
CONFIG_ECRYPT_FS=m CONFIG_ECRYPT_FS=m
CONFIG_CRAMFS=m CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m CONFIG_SQUASHFS=m
CONFIG_NFS_FS=m CONFIG_NFS_FS=m
CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y CONFIG_NFS_V4=m
CONFIG_NFS_V4_1=y CONFIG_NFS_V4_1=y
CONFIG_NFS_FSCACHE=y CONFIG_NFS_FSCACHE=y
CONFIG_NFSD=m CONFIG_NFSD=m
...@@ -508,26 +465,29 @@ CONFIG_NLS_ISO8859_15=m ...@@ -508,26 +465,29 @@ CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m CONFIG_NLS_UTF8=m
CONFIG_DLM=m
CONFIG_DLM_DEBUG=y CONFIG_DLM_DEBUG=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_REDUCED=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_WARN_DEPRECATED is not set
CONFIG_FRAME_WARN=2048 CONFIG_FRAME_WARN=2048
CONFIG_MAGIC_SYSRQ=y
CONFIG_STRIP_ASM_SYMS=y CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_FS=y CONFIG_DEBUG_FS=y
CONFIG_HEADERS_CHECK=y CONFIG_HEADERS_CHECK=y
# CONFIG_FRAME_POINTER is not set
CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_VM=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_LOCKUP_DETECTOR=y CONFIG_LOCKUP_DETECTOR=y
CONFIG_SCHEDSTATS=y CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y CONFIG_TIMER_STATS=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_REDUCED=y
CONFIG_DEBUG_VM=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DEBUG_LIST=y CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_CREDENTIALS=y CONFIG_DEBUG_CREDENTIALS=y
CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_DYNAMIC_DEBUG=y
CONFIG_ASYNC_RAID6_TEST=m CONFIG_ASYNC_RAID6_TEST=m
CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_KEYS_DEBUG_PROC_KEYS=y CONFIG_KEYS_DEBUG_PROC_KEYS=y
CONFIG_SECURITY=y CONFIG_SECURITY=y
CONFIG_SECURITYFS=y CONFIG_SECURITYFS=y
...@@ -536,7 +496,6 @@ CONFIG_SECURITY_NETWORK_XFRM=y ...@@ -536,7 +496,6 @@ CONFIG_SECURITY_NETWORK_XFRM=y
CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_DISABLE=y CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_TEST=m
...@@ -549,14 +508,12 @@ CONFIG_CRYPTO_XTS=m ...@@ -549,14 +508,12 @@ CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32C=y
CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA1=y CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_WP512=m
......
...@@ -26,3 +26,8 @@ config TILE_GXIO_TRIO ...@@ -26,3 +26,8 @@ config TILE_GXIO_TRIO
config TILE_GXIO_USB_HOST config TILE_GXIO_USB_HOST
bool bool
select TILE_GXIO select TILE_GXIO
# Support direct access to the TILE-Gx UART hardware from kernel space.
config TILE_GXIO_UART
bool
select TILE_GXIO
...@@ -6,4 +6,5 @@ obj-$(CONFIG_TILE_GXIO) += iorpc_globals.o kiorpc.o ...@@ -6,4 +6,5 @@ obj-$(CONFIG_TILE_GXIO) += iorpc_globals.o kiorpc.o
obj-$(CONFIG_TILE_GXIO_DMA) += dma_queue.o obj-$(CONFIG_TILE_GXIO_DMA) += dma_queue.o
obj-$(CONFIG_TILE_GXIO_MPIPE) += mpipe.o iorpc_mpipe.o iorpc_mpipe_info.o obj-$(CONFIG_TILE_GXIO_MPIPE) += mpipe.o iorpc_mpipe.o iorpc_mpipe_info.o
obj-$(CONFIG_TILE_GXIO_TRIO) += trio.o iorpc_trio.o obj-$(CONFIG_TILE_GXIO_TRIO) += trio.o iorpc_trio.o
obj-$(CONFIG_TILE_GXIO_UART) += uart.o iorpc_uart.o
obj-$(CONFIG_TILE_GXIO_USB_HOST) += usb_host.o iorpc_usb_host.o obj-$(CONFIG_TILE_GXIO_USB_HOST) += usb_host.o iorpc_usb_host.o
...@@ -61,6 +61,29 @@ int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context, ...@@ -61,6 +61,29 @@ int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context,
EXPORT_SYMBOL(gxio_trio_alloc_memory_maps); EXPORT_SYMBOL(gxio_trio_alloc_memory_maps);
struct alloc_scatter_queues_param {
unsigned int count;
unsigned int first;
unsigned int flags;
};
int gxio_trio_alloc_scatter_queues(gxio_trio_context_t * context,
unsigned int count, unsigned int first,
unsigned int flags)
{
struct alloc_scatter_queues_param temp;
struct alloc_scatter_queues_param *params = &temp;
params->count = count;
params->first = first;
params->flags = flags;
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
sizeof(*params),
GXIO_TRIO_OP_ALLOC_SCATTER_QUEUES);
}
EXPORT_SYMBOL(gxio_trio_alloc_scatter_queues);
struct alloc_pio_regions_param { struct alloc_pio_regions_param {
unsigned int count; unsigned int count;
......
/*
* Copyright 2013 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
/* This file is machine-generated; DO NOT EDIT! */
#include "gxio/iorpc_uart.h"
struct cfg_interrupt_param {
union iorpc_interrupt interrupt;
};
int gxio_uart_cfg_interrupt(gxio_uart_context_t *context, int inter_x,
int inter_y, int inter_ipi, int inter_event)
{
struct cfg_interrupt_param temp;
struct cfg_interrupt_param *params = &temp;
params->interrupt.kernel.x = inter_x;
params->interrupt.kernel.y = inter_y;
params->interrupt.kernel.ipi = inter_ipi;
params->interrupt.kernel.event = inter_event;
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
sizeof(*params), GXIO_UART_OP_CFG_INTERRUPT);
}
EXPORT_SYMBOL(gxio_uart_cfg_interrupt);
struct get_mmio_base_param {
HV_PTE base;
};
int gxio_uart_get_mmio_base(gxio_uart_context_t *context, HV_PTE *base)
{
int __result;
struct get_mmio_base_param temp;
struct get_mmio_base_param *params = &temp;
__result =
hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
GXIO_UART_OP_GET_MMIO_BASE);
*base = params->base;
return __result;
}
EXPORT_SYMBOL(gxio_uart_get_mmio_base);
struct check_mmio_offset_param {
unsigned long offset;
unsigned long size;
};
int gxio_uart_check_mmio_offset(gxio_uart_context_t *context,
unsigned long offset, unsigned long size)
{
struct check_mmio_offset_param temp;
struct check_mmio_offset_param *params = &temp;
params->offset = offset;
params->size = size;
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
sizeof(*params), GXIO_UART_OP_CHECK_MMIO_OFFSET);
}
EXPORT_SYMBOL(gxio_uart_check_mmio_offset);
/*
* Copyright 2013 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
/*
* Implementation of UART gxio calls.
*/
#include <linux/io.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <gxio/uart.h>
#include <gxio/iorpc_globals.h>
#include <gxio/iorpc_uart.h>
#include <gxio/kiorpc.h>
int gxio_uart_init(gxio_uart_context_t *context, int uart_index)
{
char file[32];
int fd;
snprintf(file, sizeof(file), "uart/%d/iorpc", uart_index);
fd = hv_dev_open((HV_VirtAddr) file, 0);
if (fd < 0) {
if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
return fd;
else
return -ENODEV;
}
context->fd = fd;
/* Map in the MMIO space. */
context->mmio_base = (void __force *)
iorpc_ioremap(fd, HV_UART_MMIO_OFFSET, HV_UART_MMIO_SIZE);
if (context->mmio_base == NULL) {
hv_dev_close(context->fd);
context->fd = -1;
return -ENODEV;
}
return 0;
}
EXPORT_SYMBOL_GPL(gxio_uart_init);
int gxio_uart_destroy(gxio_uart_context_t *context)
{
iounmap((void __force __iomem *)(context->mmio_base));
hv_dev_close(context->fd);
context->mmio_base = NULL;
context->fd = -1;
return 0;
}
EXPORT_SYMBOL_GPL(gxio_uart_destroy);
/* UART register write wrapper. */
void gxio_uart_write(gxio_uart_context_t *context, uint64_t offset,
uint64_t word)
{
__gxio_mmio_write(context->mmio_base + offset, word);
}
EXPORT_SYMBOL_GPL(gxio_uart_write);
/* UART register read wrapper. */
uint64_t gxio_uart_read(gxio_uart_context_t *context, uint64_t offset)
{
return __gxio_mmio_read(context->mmio_base + offset);
}
EXPORT_SYMBOL_GPL(gxio_uart_read);
...@@ -22,6 +22,45 @@ ...@@ -22,6 +22,45 @@
#ifndef __ASSEMBLER__ #ifndef __ASSEMBLER__
/*
* Map SQ Doorbell Format.
* This describes the format of the write-only doorbell register that exists
* in the last 8-bytes of the MAP_SQ_BASE/LIM range. This register is only
* writable from PCIe space. Writes to this register will not be written to
* Tile memory space and thus no IO VA translation is required if the last
* page of the BASE/LIM range is not otherwise written.
*/
__extension__
typedef union
{
struct
{
#ifndef __BIG_ENDIAN__
/*
* When written with a 1, the associated MAP_SQ region's doorbell
* interrupt will be triggered once all previous writes are visible to
* Tile software.
*/
uint_reg_t doorbell : 1;
/*
* When written with a 1, the descriptor at the head of the associated
* MAP_SQ's FIFO will be dequeued.
*/
uint_reg_t pop : 1;
/* Reserved. */
uint_reg_t __reserved : 62;
#else /* __BIG_ENDIAN__ */
uint_reg_t __reserved : 62;
uint_reg_t pop : 1;
uint_reg_t doorbell : 1;
#endif
};
uint_reg_t word;
} TRIO_MAP_SQ_DOORBELL_FMT_t;
/* /*
* Tile PIO Region Configuration - CFG Address Format. * Tile PIO Region Configuration - CFG Address Format.
* This register describes the address format for PIO accesses when the * This register describes the address format for PIO accesses when the
......
/*
* Copyright 2013 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
/* Machine-generated file; do not edit. */
#ifndef __ARCH_UART_H__
#define __ARCH_UART_H__
#include <arch/abi.h>
#include <arch/uart_def.h>
#ifndef __ASSEMBLER__
/* Divisor. */
__extension__
typedef union
{
struct
{
#ifndef __BIG_ENDIAN__
/*
* Baud Rate Divisor. Desired_baud_rate = REF_CLK frequency / (baud *
* 16).
* Note: REF_CLK is always 125 MHz, the default
* divisor = 68, baud rate = 125M/(68*16) = 115200 baud.
*/
uint_reg_t divisor : 12;
/* Reserved. */
uint_reg_t __reserved : 52;
#else /* __BIG_ENDIAN__ */
uint_reg_t __reserved : 52;
uint_reg_t divisor : 12;
#endif
};
uint_reg_t word;
} UART_DIVISOR_t;
/* FIFO Count. */
__extension__
typedef union
{
struct
{
#ifndef __BIG_ENDIAN__
/*
* n: n active entries in the receive FIFO (max is 2**8). Each entry has
* 8 bits.
* 0: no active entry in the receive FIFO (that is empty).
*/
uint_reg_t rfifo_count : 9;
/* Reserved. */
uint_reg_t __reserved_0 : 7;
/*
* n: n active entries in the transmit FIFO (max is 2**8). Each entry has
* 8 bits.
* 0: no active entry in the transmit FIFO (that is empty).
*/
uint_reg_t tfifo_count : 9;
/* Reserved. */
uint_reg_t __reserved_1 : 7;
/*
* n: n active entries in the write FIFO (max is 2**2). Each entry has 8
* bits.
* 0: no active entry in the write FIFO (that is empty).
*/
uint_reg_t wfifo_count : 3;
/* Reserved. */
uint_reg_t __reserved_2 : 29;
#else /* __BIG_ENDIAN__ */
uint_reg_t __reserved_2 : 29;
uint_reg_t wfifo_count : 3;
uint_reg_t __reserved_1 : 7;
uint_reg_t tfifo_count : 9;
uint_reg_t __reserved_0 : 7;
uint_reg_t rfifo_count : 9;
#endif
};
uint_reg_t word;
} UART_FIFO_COUNT_t;
/* FLAG. */
__extension__
typedef union
{
struct
{
#ifndef __BIG_ENDIAN__
/* Reserved. */
uint_reg_t __reserved_0 : 1;
/* 1: receive FIFO is empty */
uint_reg_t rfifo_empty : 1;
/* 1: write FIFO is empty. */
uint_reg_t wfifo_empty : 1;
/* 1: transmit FIFO is empty. */
uint_reg_t tfifo_empty : 1;
/* 1: receive FIFO is full. */
uint_reg_t rfifo_full : 1;
/* 1: write FIFO is full. */
uint_reg_t wfifo_full : 1;
/* 1: transmit FIFO is full. */
uint_reg_t tfifo_full : 1;
/* Reserved. */
uint_reg_t __reserved_1 : 57;
#else /* __BIG_ENDIAN__ */
uint_reg_t __reserved_1 : 57;
uint_reg_t tfifo_full : 1;
uint_reg_t wfifo_full : 1;
uint_reg_t rfifo_full : 1;
uint_reg_t tfifo_empty : 1;
uint_reg_t wfifo_empty : 1;
uint_reg_t rfifo_empty : 1;
uint_reg_t __reserved_0 : 1;
#endif
};
uint_reg_t word;
} UART_FLAG_t;
/*
* Interrupt Vector Mask.
* Each bit in this register corresponds to a specific interrupt. When set,
* the associated interrupt will not be dispatched.
*/
__extension__
typedef union
{
struct
{
#ifndef __BIG_ENDIAN__
/* Read data FIFO read and no data available */
uint_reg_t rdat_err : 1;
/* Write FIFO was written but it was full */
uint_reg_t wdat_err : 1;
/* Stop bit not found when current data was received */
uint_reg_t frame_err : 1;
/* Parity error was detected when current data was received */
uint_reg_t parity_err : 1;
/* Data was received but the receive FIFO was full */
uint_reg_t rfifo_overflow : 1;
/*
* An almost full event is reached when data is to be written to the
* receive FIFO, and the receive FIFO has more than or equal to
* BUFFER_THRESHOLD.RFIFO_AFULL bytes.
*/
uint_reg_t rfifo_afull : 1;
/* Reserved. */
uint_reg_t __reserved_0 : 1;
/* An entry in the transmit FIFO was popped */
uint_reg_t tfifo_re : 1;
/* An entry has been pushed into the receive FIFO */
uint_reg_t rfifo_we : 1;
/* An entry of the write FIFO has been popped */
uint_reg_t wfifo_re : 1;
/* Rshim read receive FIFO in protocol mode */
uint_reg_t rfifo_err : 1;
/*
* An almost empty event is reached when data is to be read from the
* transmit FIFO, and the transmit FIFO has less than or equal to
* BUFFER_THRESHOLD.TFIFO_AEMPTY bytes.
*/
uint_reg_t tfifo_aempty : 1;
/* Reserved. */
uint_reg_t __reserved_1 : 52;
#else /* __BIG_ENDIAN__ */
uint_reg_t __reserved_1 : 52;
uint_reg_t tfifo_aempty : 1;
uint_reg_t rfifo_err : 1;
uint_reg_t wfifo_re : 1;
uint_reg_t rfifo_we : 1;
uint_reg_t tfifo_re : 1;
uint_reg_t __reserved_0 : 1;
uint_reg_t rfifo_afull : 1;
uint_reg_t rfifo_overflow : 1;
uint_reg_t parity_err : 1;
uint_reg_t frame_err : 1;
uint_reg_t wdat_err : 1;
uint_reg_t rdat_err : 1;
#endif
};
uint_reg_t word;
} UART_INTERRUPT_MASK_t;
/*
* Interrupt vector, write-one-to-clear.
* Each bit in this register corresponds to a specific interrupt. Hardware
* sets the bit when the associated condition has occurred. Writing a 1
* clears the status bit.
*/
__extension__
typedef union
{
struct
{
#ifndef __BIG_ENDIAN__
/* Read data FIFO read and no data available */
uint_reg_t rdat_err : 1;
/* Write FIFO was written but it was full */
uint_reg_t wdat_err : 1;
/* Stop bit not found when current data was received */
uint_reg_t frame_err : 1;
/* Parity error was detected when current data was received */
uint_reg_t parity_err : 1;
/* Data was received but the receive FIFO was full */
uint_reg_t rfifo_overflow : 1;
/*
* Data was received and the receive FIFO is now almost full (more than
* BUFFER_THRESHOLD.RFIFO_AFULL bytes in it)
*/
uint_reg_t rfifo_afull : 1;
/* Reserved. */
uint_reg_t __reserved_0 : 1;
/* An entry in the transmit FIFO was popped */
uint_reg_t tfifo_re : 1;
/* An entry has been pushed into the receive FIFO */
uint_reg_t rfifo_we : 1;
/* An entry of the write FIFO has been popped */
uint_reg_t wfifo_re : 1;
/* Rshim read receive FIFO in protocol mode */
uint_reg_t rfifo_err : 1;
/*
* Data was read from the transmit FIFO and now it is almost empty (less
* than or equal to BUFFER_THRESHOLD.TFIFO_AEMPTY bytes in it).
*/
uint_reg_t tfifo_aempty : 1;
/* Reserved. */
uint_reg_t __reserved_1 : 52;
#else /* __BIG_ENDIAN__ */
uint_reg_t __reserved_1 : 52;
uint_reg_t tfifo_aempty : 1;
uint_reg_t rfifo_err : 1;
uint_reg_t wfifo_re : 1;
uint_reg_t rfifo_we : 1;
uint_reg_t tfifo_re : 1;
uint_reg_t __reserved_0 : 1;
uint_reg_t rfifo_afull : 1;
uint_reg_t rfifo_overflow : 1;
uint_reg_t parity_err : 1;
uint_reg_t frame_err : 1;
uint_reg_t wdat_err : 1;
uint_reg_t rdat_err : 1;
#endif
};
uint_reg_t word;
} UART_INTERRUPT_STATUS_t;
/* Type. */
__extension__
typedef union
{
struct
{
#ifndef __BIG_ENDIAN__
/* Number of stop bits, rx and tx */
uint_reg_t sbits : 1;
/* Reserved. */
uint_reg_t __reserved_0 : 1;
/* Data word size, rx and tx */
uint_reg_t dbits : 1;
/* Reserved. */
uint_reg_t __reserved_1 : 1;
/* Parity selection, rx and tx */
uint_reg_t ptype : 3;
/* Reserved. */
uint_reg_t __reserved_2 : 57;
#else /* __BIG_ENDIAN__ */
uint_reg_t __reserved_2 : 57;
uint_reg_t ptype : 3;
uint_reg_t __reserved_1 : 1;
uint_reg_t dbits : 1;
uint_reg_t __reserved_0 : 1;
uint_reg_t sbits : 1;
#endif
};
uint_reg_t word;
} UART_TYPE_t;
#endif /* !defined(__ASSEMBLER__) */
#endif /* !defined(__ARCH_UART_H__) */
/*
* Copyright 2013 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
/* Machine-generated file; do not edit. */
#ifndef __ARCH_UART_DEF_H__
#define __ARCH_UART_DEF_H__
#define UART_DIVISOR 0x0158
#define UART_FIFO_COUNT 0x0110
#define UART_FLAG 0x0108
#define UART_INTERRUPT_MASK 0x0208
#define UART_INTERRUPT_MASK__RDAT_ERR_SHIFT 0
#define UART_INTERRUPT_MASK__RDAT_ERR_WIDTH 1
#define UART_INTERRUPT_MASK__RDAT_ERR_RESET_VAL 1
#define UART_INTERRUPT_MASK__RDAT_ERR_RMASK 0x1
#define UART_INTERRUPT_MASK__RDAT_ERR_MASK 0x1
#define UART_INTERRUPT_MASK__RDAT_ERR_FIELD 0,0
#define UART_INTERRUPT_MASK__WDAT_ERR_SHIFT 1
#define UART_INTERRUPT_MASK__WDAT_ERR_WIDTH 1
#define UART_INTERRUPT_MASK__WDAT_ERR_RESET_VAL 1
#define UART_INTERRUPT_MASK__WDAT_ERR_RMASK 0x1
#define UART_INTERRUPT_MASK__WDAT_ERR_MASK 0x2
#define UART_INTERRUPT_MASK__WDAT_ERR_FIELD 1,1
#define UART_INTERRUPT_MASK__FRAME_ERR_SHIFT 2
#define UART_INTERRUPT_MASK__FRAME_ERR_WIDTH 1
#define UART_INTERRUPT_MASK__FRAME_ERR_RESET_VAL 1
#define UART_INTERRUPT_MASK__FRAME_ERR_RMASK 0x1
#define UART_INTERRUPT_MASK__FRAME_ERR_MASK 0x4
#define UART_INTERRUPT_MASK__FRAME_ERR_FIELD 2,2
#define UART_INTERRUPT_MASK__PARITY_ERR_SHIFT 3
#define UART_INTERRUPT_MASK__PARITY_ERR_WIDTH 1
#define UART_INTERRUPT_MASK__PARITY_ERR_RESET_VAL 1
#define UART_INTERRUPT_MASK__PARITY_ERR_RMASK 0x1
#define UART_INTERRUPT_MASK__PARITY_ERR_MASK 0x8
#define UART_INTERRUPT_MASK__PARITY_ERR_FIELD 3,3
#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_SHIFT 4
#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_WIDTH 1
#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_RESET_VAL 1
#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_RMASK 0x1
#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_MASK 0x10
#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_FIELD 4,4
#define UART_INTERRUPT_MASK__RFIFO_AFULL_SHIFT 5
#define UART_INTERRUPT_MASK__RFIFO_AFULL_WIDTH 1
#define UART_INTERRUPT_MASK__RFIFO_AFULL_RESET_VAL 1
#define UART_INTERRUPT_MASK__RFIFO_AFULL_RMASK 0x1
#define UART_INTERRUPT_MASK__RFIFO_AFULL_MASK 0x20
#define UART_INTERRUPT_MASK__RFIFO_AFULL_FIELD 5,5
#define UART_INTERRUPT_MASK__TFIFO_RE_SHIFT 7
#define UART_INTERRUPT_MASK__TFIFO_RE_WIDTH 1
#define UART_INTERRUPT_MASK__TFIFO_RE_RESET_VAL 1
#define UART_INTERRUPT_MASK__TFIFO_RE_RMASK 0x1
#define UART_INTERRUPT_MASK__TFIFO_RE_MASK 0x80
#define UART_INTERRUPT_MASK__TFIFO_RE_FIELD 7,7
#define UART_INTERRUPT_MASK__RFIFO_WE_SHIFT 8
#define UART_INTERRUPT_MASK__RFIFO_WE_WIDTH 1
#define UART_INTERRUPT_MASK__RFIFO_WE_RESET_VAL 1
#define UART_INTERRUPT_MASK__RFIFO_WE_RMASK 0x1
#define UART_INTERRUPT_MASK__RFIFO_WE_MASK 0x100
#define UART_INTERRUPT_MASK__RFIFO_WE_FIELD 8,8
#define UART_INTERRUPT_MASK__WFIFO_RE_SHIFT 9
#define UART_INTERRUPT_MASK__WFIFO_RE_WIDTH 1
#define UART_INTERRUPT_MASK__WFIFO_RE_RESET_VAL 1
#define UART_INTERRUPT_MASK__WFIFO_RE_RMASK 0x1
#define UART_INTERRUPT_MASK__WFIFO_RE_MASK 0x200
#define UART_INTERRUPT_MASK__WFIFO_RE_FIELD 9,9
#define UART_INTERRUPT_MASK__RFIFO_ERR_SHIFT 10
#define UART_INTERRUPT_MASK__RFIFO_ERR_WIDTH 1
#define UART_INTERRUPT_MASK__RFIFO_ERR_RESET_VAL 1
#define UART_INTERRUPT_MASK__RFIFO_ERR_RMASK 0x1
#define UART_INTERRUPT_MASK__RFIFO_ERR_MASK 0x400
#define UART_INTERRUPT_MASK__RFIFO_ERR_FIELD 10,10
#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_SHIFT 11
#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_WIDTH 1
#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_RESET_VAL 1
#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_RMASK 0x1
#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_MASK 0x800
#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_FIELD 11,11
#define UART_INTERRUPT_STATUS 0x0200
#define UART_RECEIVE_DATA 0x0148
#define UART_TRANSMIT_DATA 0x0140
#define UART_TYPE 0x0160
#define UART_TYPE__SBITS_SHIFT 0
#define UART_TYPE__SBITS_WIDTH 1
#define UART_TYPE__SBITS_RESET_VAL 1
#define UART_TYPE__SBITS_RMASK 0x1
#define UART_TYPE__SBITS_MASK 0x1
#define UART_TYPE__SBITS_FIELD 0,0
#define UART_TYPE__SBITS_VAL_ONE_SBITS 0x0
#define UART_TYPE__SBITS_VAL_TWO_SBITS 0x1
#define UART_TYPE__DBITS_SHIFT 2
#define UART_TYPE__DBITS_WIDTH 1
#define UART_TYPE__DBITS_RESET_VAL 0
#define UART_TYPE__DBITS_RMASK 0x1
#define UART_TYPE__DBITS_MASK 0x4
#define UART_TYPE__DBITS_FIELD 2,2
#define UART_TYPE__DBITS_VAL_EIGHT_DBITS 0x0
#define UART_TYPE__DBITS_VAL_SEVEN_DBITS 0x1
#define UART_TYPE__PTYPE_SHIFT 4
#define UART_TYPE__PTYPE_WIDTH 3
#define UART_TYPE__PTYPE_RESET_VAL 3
#define UART_TYPE__PTYPE_RMASK 0x7
#define UART_TYPE__PTYPE_MASK 0x70
#define UART_TYPE__PTYPE_FIELD 4,6
#define UART_TYPE__PTYPE_VAL_NONE 0x0
#define UART_TYPE__PTYPE_VAL_MARK 0x1
#define UART_TYPE__PTYPE_VAL_SPACE 0x2
#define UART_TYPE__PTYPE_VAL_EVEN 0x3
#define UART_TYPE__PTYPE_VAL_ODD 0x4
#endif /* !defined(__ARCH_UART_DEF_H__) */
...@@ -11,12 +11,13 @@ generic-y += errno.h ...@@ -11,12 +11,13 @@ generic-y += errno.h
generic-y += exec.h generic-y += exec.h
generic-y += fb.h generic-y += fb.h
generic-y += fcntl.h generic-y += fcntl.h
generic-y += hw_irq.h
generic-y += ioctl.h generic-y += ioctl.h
generic-y += ioctls.h generic-y += ioctls.h
generic-y += ipcbuf.h generic-y += ipcbuf.h
generic-y += irq_regs.h generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += local.h generic-y += local.h
generic-y += local64.h
generic-y += msgbuf.h generic-y += msgbuf.h
generic-y += mutex.h generic-y += mutex.h
generic-y += param.h generic-y += param.h
......
...@@ -113,6 +113,32 @@ static inline int atomic_read(const atomic_t *v) ...@@ -113,6 +113,32 @@ static inline int atomic_read(const atomic_t *v)
*/ */
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
/**
* atomic_xchg - atomically exchange contents of memory with a new value
* @v: pointer of type atomic_t
* @i: integer value to store in memory
*
* Atomically sets @v to @i and returns old @v
*/
static inline int atomic_xchg(atomic_t *v, int n)
{
return xchg(&v->counter, n);
}
/**
* atomic_cmpxchg - atomically exchange contents of memory if it matches
* @v: pointer of type atomic_t
* @o: old value that memory should have
* @n: new value to write to memory if it matches
*
* Atomically checks if @v holds @o and replaces it with @n if so.
* Returns the old value at @v.
*/
static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
{
return cmpxchg(&v->counter, o, n);
}
/** /**
* atomic_add_negative - add and test if negative * atomic_add_negative - add and test if negative
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
...@@ -133,6 +159,32 @@ static inline int atomic_read(const atomic_t *v) ...@@ -133,6 +159,32 @@ static inline int atomic_read(const atomic_t *v)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/**
* atomic64_xchg - atomically exchange contents of memory with a new value
* @v: pointer of type atomic64_t
* @i: integer value to store in memory
*
* Atomically sets @v to @i and returns old @v
*/
static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
{
return xchg64(&v->counter, n);
}
/**
* atomic64_cmpxchg - atomically exchange contents of memory if it matches
* @v: pointer of type atomic64_t
* @o: old value that memory should have
* @n: new value to write to memory if it matches
*
* Atomically checks if @v holds @o and replaces it with @n if so.
* Returns the old value at @v.
*/
static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
{
return cmpxchg64(&v->counter, o, n);
}
static inline long long atomic64_dec_if_positive(atomic64_t *v) static inline long long atomic64_dec_if_positive(atomic64_t *v)
{ {
long long c, old, dec; long long c, old, dec;
......
...@@ -22,40 +22,6 @@ ...@@ -22,40 +22,6 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* Tile-specific routines to support <linux/atomic.h>. */
int _atomic_xchg(atomic_t *v, int n);
int _atomic_xchg_add(atomic_t *v, int i);
int _atomic_xchg_add_unless(atomic_t *v, int a, int u);
int _atomic_cmpxchg(atomic_t *v, int o, int n);
/**
* atomic_xchg - atomically exchange contents of memory with a new value
* @v: pointer of type atomic_t
* @i: integer value to store in memory
*
* Atomically sets @v to @i and returns old @v
*/
static inline int atomic_xchg(atomic_t *v, int n)
{
smp_mb(); /* barrier for proper semantics */
return _atomic_xchg(v, n);
}
/**
* atomic_cmpxchg - atomically exchange contents of memory if it matches
* @v: pointer of type atomic_t
* @o: old value that memory should have
* @n: new value to write to memory if it matches
*
* Atomically checks if @v holds @o and replaces it with @n if so.
* Returns the old value at @v.
*/
static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
{
smp_mb(); /* barrier for proper semantics */
return _atomic_cmpxchg(v, o, n);
}
/** /**
* atomic_add - add integer to atomic variable * atomic_add - add integer to atomic variable
* @i: integer value to add * @i: integer value to add
...@@ -65,7 +31,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n) ...@@ -65,7 +31,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
*/ */
static inline void atomic_add(int i, atomic_t *v) static inline void atomic_add(int i, atomic_t *v)
{ {
_atomic_xchg_add(v, i); _atomic_xchg_add(&v->counter, i);
} }
/** /**
...@@ -78,7 +44,7 @@ static inline void atomic_add(int i, atomic_t *v) ...@@ -78,7 +44,7 @@ static inline void atomic_add(int i, atomic_t *v)
static inline int atomic_add_return(int i, atomic_t *v) static inline int atomic_add_return(int i, atomic_t *v)
{ {
smp_mb(); /* barrier for proper semantics */ smp_mb(); /* barrier for proper semantics */
return _atomic_xchg_add(v, i) + i; return _atomic_xchg_add(&v->counter, i) + i;
} }
/** /**
...@@ -93,7 +59,7 @@ static inline int atomic_add_return(int i, atomic_t *v) ...@@ -93,7 +59,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
static inline int __atomic_add_unless(atomic_t *v, int a, int u) static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{ {
smp_mb(); /* barrier for proper semantics */ smp_mb(); /* barrier for proper semantics */
return _atomic_xchg_add_unless(v, a, u); return _atomic_xchg_add_unless(&v->counter, a, u);
} }
/** /**
...@@ -108,7 +74,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -108,7 +74,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
*/ */
static inline void atomic_set(atomic_t *v, int n) static inline void atomic_set(atomic_t *v, int n)
{ {
_atomic_xchg(v, n); _atomic_xchg(&v->counter, n);
} }
/* A 64bit atomic type */ /* A 64bit atomic type */
...@@ -119,11 +85,6 @@ typedef struct { ...@@ -119,11 +85,6 @@ typedef struct {
#define ATOMIC64_INIT(val) { (val) } #define ATOMIC64_INIT(val) { (val) }
u64 _atomic64_xchg(atomic64_t *v, u64 n);
u64 _atomic64_xchg_add(atomic64_t *v, u64 i);
u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u);
u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n);
/** /**
* atomic64_read - read atomic variable * atomic64_read - read atomic variable
* @v: pointer of type atomic64_t * @v: pointer of type atomic64_t
...@@ -137,35 +98,7 @@ static inline u64 atomic64_read(const atomic64_t *v) ...@@ -137,35 +98,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
* Casting away const is safe since the atomic support routines * Casting away const is safe since the atomic support routines
* do not write to memory if the value has not been modified. * do not write to memory if the value has not been modified.
*/ */
return _atomic64_xchg_add((atomic64_t *)v, 0); return _atomic64_xchg_add((u64 *)&v->counter, 0);
}
/**
* atomic64_xchg - atomically exchange contents of memory with a new value
* @v: pointer of type atomic64_t
* @i: integer value to store in memory
*
* Atomically sets @v to @i and returns old @v
*/
static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
{
smp_mb(); /* barrier for proper semantics */
return _atomic64_xchg(v, n);
}
/**
* atomic64_cmpxchg - atomically exchange contents of memory if it matches
* @v: pointer of type atomic64_t
* @o: old value that memory should have
* @n: new value to write to memory if it matches
*
* Atomically checks if @v holds @o and replaces it with @n if so.
* Returns the old value at @v.
*/
static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
{
smp_mb(); /* barrier for proper semantics */
return _atomic64_cmpxchg(v, o, n);
} }
/** /**
...@@ -177,7 +110,7 @@ static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) ...@@ -177,7 +110,7 @@ static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
*/ */
static inline void atomic64_add(u64 i, atomic64_t *v) static inline void atomic64_add(u64 i, atomic64_t *v)
{ {
_atomic64_xchg_add(v, i); _atomic64_xchg_add(&v->counter, i);
} }
/** /**
...@@ -190,7 +123,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v) ...@@ -190,7 +123,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
static inline u64 atomic64_add_return(u64 i, atomic64_t *v) static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
{ {
smp_mb(); /* barrier for proper semantics */ smp_mb(); /* barrier for proper semantics */
return _atomic64_xchg_add(v, i) + i; return _atomic64_xchg_add(&v->counter, i) + i;
} }
/** /**
...@@ -205,7 +138,7 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v) ...@@ -205,7 +138,7 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
{ {
smp_mb(); /* barrier for proper semantics */ smp_mb(); /* barrier for proper semantics */
return _atomic64_xchg_add_unless(v, a, u) != u; return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
} }
/** /**
...@@ -220,7 +153,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) ...@@ -220,7 +153,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
*/ */
static inline void atomic64_set(atomic64_t *v, u64 n) static inline void atomic64_set(atomic64_t *v, u64 n)
{ {
_atomic64_xchg(v, n); _atomic64_xchg(&v->counter, n);
} }
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
...@@ -252,21 +185,6 @@ static inline void atomic64_set(atomic64_t *v, u64 n) ...@@ -252,21 +185,6 @@ static inline void atomic64_set(atomic64_t *v, u64 n)
* Internal definitions only beyond this point. * Internal definitions only beyond this point.
*/ */
#define ATOMIC_LOCKS_FOUND_VIA_TABLE() \
(!CHIP_HAS_CBOX_HOME_MAP() && defined(CONFIG_SMP))
#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
/* Number of entries in atomic_lock_ptr[]. */
#define ATOMIC_HASH_L1_SHIFT 6
#define ATOMIC_HASH_L1_SIZE (1 << ATOMIC_HASH_L1_SHIFT)
/* Number of locks in each struct pointed to by atomic_lock_ptr[]. */
#define ATOMIC_HASH_L2_SHIFT (CHIP_L2_LOG_LINE_SIZE() - 2)
#define ATOMIC_HASH_L2_SIZE (1 << ATOMIC_HASH_L2_SHIFT)
#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
/* /*
* Number of atomic locks in atomic_locks[]. Must be a power of two. * Number of atomic locks in atomic_locks[]. Must be a power of two.
* There is no reason for more than PAGE_SIZE / 8 entries, since that * There is no reason for more than PAGE_SIZE / 8 entries, since that
...@@ -281,8 +199,6 @@ static inline void atomic64_set(atomic64_t *v, u64 n) ...@@ -281,8 +199,6 @@ static inline void atomic64_set(atomic64_t *v, u64 n)
extern int atomic_locks[]; extern int atomic_locks[];
#endif #endif
#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
/* /*
* All the code that may fault while holding an atomic lock must * All the code that may fault while holding an atomic lock must
* place the pointer to the lock in ATOMIC_LOCK_REG so the fault code * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
......
...@@ -32,25 +32,6 @@ ...@@ -32,25 +32,6 @@
* on any routine which updates memory and returns a value. * on any routine which updates memory and returns a value.
*/ */
static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
{
int val;
__insn_mtspr(SPR_CMPEXCH_VALUE, o);
smp_mb(); /* barrier for proper semantics */
val = __insn_cmpexch4((void *)&v->counter, n);
smp_mb(); /* barrier for proper semantics */
return val;
}
static inline int atomic_xchg(atomic_t *v, int n)
{
int val;
smp_mb(); /* barrier for proper semantics */
val = __insn_exch4((void *)&v->counter, n);
smp_mb(); /* barrier for proper semantics */
return val;
}
static inline void atomic_add(int i, atomic_t *v) static inline void atomic_add(int i, atomic_t *v)
{ {
__insn_fetchadd4((void *)&v->counter, i); __insn_fetchadd4((void *)&v->counter, i);
...@@ -72,7 +53,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -72,7 +53,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
if (oldval == u) if (oldval == u)
break; break;
guess = oldval; guess = oldval;
oldval = atomic_cmpxchg(v, guess, guess + a); oldval = cmpxchg(&v->counter, guess, guess + a);
} while (guess != oldval); } while (guess != oldval);
return oldval; return oldval;
} }
...@@ -84,25 +65,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -84,25 +65,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define atomic64_read(v) ((v)->counter) #define atomic64_read(v) ((v)->counter)
#define atomic64_set(v, i) ((v)->counter = (i)) #define atomic64_set(v, i) ((v)->counter = (i))
static inline long atomic64_cmpxchg(atomic64_t *v, long o, long n)
{
long val;
smp_mb(); /* barrier for proper semantics */
__insn_mtspr(SPR_CMPEXCH_VALUE, o);
val = __insn_cmpexch((void *)&v->counter, n);
smp_mb(); /* barrier for proper semantics */
return val;
}
static inline long atomic64_xchg(atomic64_t *v, long n)
{
long val;
smp_mb(); /* barrier for proper semantics */
val = __insn_exch((void *)&v->counter, n);
smp_mb(); /* barrier for proper semantics */
return val;
}
static inline void atomic64_add(long i, atomic64_t *v) static inline void atomic64_add(long i, atomic64_t *v)
{ {
__insn_fetchadd((void *)&v->counter, i); __insn_fetchadd((void *)&v->counter, i);
...@@ -124,7 +86,7 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u) ...@@ -124,7 +86,7 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
if (oldval == u) if (oldval == u)
break; break;
guess = oldval; guess = oldval;
oldval = atomic64_cmpxchg(v, guess, guess + a); oldval = cmpxchg(&v->counter, guess, guess + a);
} while (guess != oldval); } while (guess != oldval);
return oldval != u; return oldval != u;
} }
......
...@@ -77,7 +77,6 @@ ...@@ -77,7 +77,6 @@
#define __sync() __insn_mf() #define __sync() __insn_mf()
#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
#include <hv/syscall_public.h> #include <hv/syscall_public.h>
/* /*
* Issue an uncacheable load to each memory controller, then * Issue an uncacheable load to each memory controller, then
...@@ -96,7 +95,6 @@ static inline void __mb_incoherent(void) ...@@ -96,7 +95,6 @@ static inline void __mb_incoherent(void)
"r20", "r21", "r22", "r23", "r24", "r20", "r21", "r22", "r23", "r24",
"r25", "r26", "r27", "r28", "r29"); "r25", "r26", "r27", "r28", "r29");
} }
#endif
/* Fence to guarantee visibility of stores to incoherent memory. */ /* Fence to guarantee visibility of stores to incoherent memory. */
static inline void static inline void
...@@ -104,7 +102,6 @@ mb_incoherent(void) ...@@ -104,7 +102,6 @@ mb_incoherent(void)
{ {
__insn_mf(); __insn_mf();
#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
{ {
#if CHIP_HAS_TILE_WRITE_PENDING() #if CHIP_HAS_TILE_WRITE_PENDING()
const unsigned long WRITE_TIMEOUT_CYCLES = 400; const unsigned long WRITE_TIMEOUT_CYCLES = 400;
...@@ -116,7 +113,6 @@ mb_incoherent(void) ...@@ -116,7 +113,6 @@ mb_incoherent(void)
#endif /* CHIP_HAS_TILE_WRITE_PENDING() */ #endif /* CHIP_HAS_TILE_WRITE_PENDING() */
(void) __mb_incoherent(); (void) __mb_incoherent();
} }
#endif /* CHIP_HAS_MF_WAITS_FOR_VICTIMS() */
} }
#define fast_wmb() __sync() #define fast_wmb() __sync()
......
...@@ -28,17 +28,6 @@ ...@@ -28,17 +28,6 @@
#include <asm/bitops_32.h> #include <asm/bitops_32.h>
#endif #endif
/**
* __ffs - find first set bit in word
* @word: The word to search
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
static inline unsigned long __ffs(unsigned long word)
{
return __builtin_ctzl(word);
}
/** /**
* ffz - find first zero bit in word * ffz - find first zero bit in word
* @word: The word to search * @word: The word to search
...@@ -50,33 +39,6 @@ static inline unsigned long ffz(unsigned long word) ...@@ -50,33 +39,6 @@ static inline unsigned long ffz(unsigned long word)
return __builtin_ctzl(~word); return __builtin_ctzl(~word);
} }
/**
* __fls - find last set bit in word
* @word: The word to search
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
static inline unsigned long __fls(unsigned long word)
{
return (sizeof(word) * 8) - 1 - __builtin_clzl(word);
}
/**
* ffs - find first set bit in word
* @x: the word to search
*
* This is defined the same way as the libc and compiler builtin ffs
* routines, therefore differs in spirit from the other bitops.
*
* ffs(value) returns 0 if value is 0 or the position of the first
* set bit if value is nonzero. The first (least significant) bit
* is at position 1.
*/
static inline int ffs(int x)
{
return __builtin_ffs(x);
}
static inline int fls64(__u64 w) static inline int fls64(__u64 w)
{ {
return (sizeof(__u64) * 8) - __builtin_clzll(w); return (sizeof(__u64) * 8) - __builtin_clzll(w);
...@@ -118,6 +80,9 @@ static inline unsigned long __arch_hweight64(__u64 w) ...@@ -118,6 +80,9 @@ static inline unsigned long __arch_hweight64(__u64 w)
return __builtin_popcountll(w); return __builtin_popcountll(w);
} }
#include <asm-generic/bitops/builtin-__ffs.h>
#include <asm-generic/bitops/builtin-__fls.h>
#include <asm-generic/bitops/builtin-ffs.h>
#include <asm-generic/bitops/const_hweight.h> #include <asm-generic/bitops/const_hweight.h>
#include <asm-generic/bitops/lock.h> #include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/find.h> #include <asm-generic/bitops/find.h>
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#define _ASM_TILE_BITOPS_32_H #define _ASM_TILE_BITOPS_32_H
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/atomic.h> #include <asm/barrier.h>
/* Tile-specific routines to support <asm/bitops.h>. */ /* Tile-specific routines to support <asm/bitops.h>. */
unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask); unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask);
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#define _ASM_TILE_BITOPS_64_H #define _ASM_TILE_BITOPS_64_H
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/atomic.h> #include <asm/cmpxchg.h>
/* See <asm/bitops.h> for API comments. */ /* See <asm/bitops.h> for API comments. */
...@@ -44,8 +44,7 @@ static inline void change_bit(unsigned nr, volatile unsigned long *addr) ...@@ -44,8 +44,7 @@ static inline void change_bit(unsigned nr, volatile unsigned long *addr)
oldval = *addr; oldval = *addr;
do { do {
guess = oldval; guess = oldval;
oldval = atomic64_cmpxchg((atomic64_t *)addr, oldval = cmpxchg(addr, guess, guess ^ mask);
guess, guess ^ mask);
} while (guess != oldval); } while (guess != oldval);
} }
...@@ -90,8 +89,7 @@ static inline int test_and_change_bit(unsigned nr, ...@@ -90,8 +89,7 @@ static inline int test_and_change_bit(unsigned nr,
oldval = *addr; oldval = *addr;
do { do {
guess = oldval; guess = oldval;
oldval = atomic64_cmpxchg((atomic64_t *)addr, oldval = cmpxchg(addr, guess, guess ^ mask);
guess, guess ^ mask);
} while (guess != oldval); } while (guess != oldval);
return (oldval & mask) != 0; return (oldval & mask) != 0;
} }
......
...@@ -49,9 +49,16 @@ ...@@ -49,9 +49,16 @@
#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #define __read_mostly __attribute__((__section__(".data..read_mostly")))
/* /*
* Attribute for data that is kept read/write coherent until the end of * Originally we used small TLB pages for kernel data and grouped some
* initialization, then bumped to read/only incoherent for performance. * things together as "write once", enforcing the property at the end
* of initialization by making those pages read-only and non-coherent.
* This allowed better cache utilization since cache inclusion did not
* need to be maintained. However, to do this requires an extra TLB
* entry, which on balance is more of a performance hit than the
* non-coherence is a performance gain, so we now just make "read
* mostly" and "write once" be synonyms. We keep the attribute
* separate in case we change our minds at a future date.
*/ */
#define __write_once __attribute__((__section__(".w1data"))) #define __write_once __read_mostly
#endif /* _ASM_TILE_CACHE_H */ #endif /* _ASM_TILE_CACHE_H */
...@@ -75,23 +75,6 @@ static inline void copy_to_user_page(struct vm_area_struct *vma, ...@@ -75,23 +75,6 @@ static inline void copy_to_user_page(struct vm_area_struct *vma,
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy((dst), (src), (len)) memcpy((dst), (src), (len))
/*
* Invalidate a VA range; pads to L2 cacheline boundaries.
*
* Note that on TILE64, __inv_buffer() actually flushes modified
* cache lines in addition to invalidating them, i.e., it's the
* same as __finv_buffer().
*/
static inline void __inv_buffer(void *buffer, size_t size)
{
char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
while (next < finish) {
__insn_inv(next);
next += CHIP_INV_STRIDE();
}
}
/* Flush a VA range; pads to L2 cacheline boundaries. */ /* Flush a VA range; pads to L2 cacheline boundaries. */
static inline void __flush_buffer(void *buffer, size_t size) static inline void __flush_buffer(void *buffer, size_t size)
{ {
...@@ -115,13 +98,6 @@ static inline void __finv_buffer(void *buffer, size_t size) ...@@ -115,13 +98,6 @@ static inline void __finv_buffer(void *buffer, size_t size)
} }
/* Invalidate a VA range and wait for it to be complete. */
static inline void inv_buffer(void *buffer, size_t size)
{
__inv_buffer(buffer, size);
mb();
}
/* /*
* Flush a locally-homecached VA range and wait for the evicted * Flush a locally-homecached VA range and wait for the evicted
* cachelines to hit memory. * cachelines to hit memory.
...@@ -142,6 +118,26 @@ static inline void finv_buffer_local(void *buffer, size_t size) ...@@ -142,6 +118,26 @@ static inline void finv_buffer_local(void *buffer, size_t size)
mb_incoherent(); mb_incoherent();
} }
#ifdef __tilepro__
/* Invalidate a VA range; pads to L2 cacheline boundaries. */
static inline void __inv_buffer(void *buffer, size_t size)
{
char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
while (next < finish) {
__insn_inv(next);
next += CHIP_INV_STRIDE();
}
}
/* Invalidate a VA range and wait for it to be complete. */
static inline void inv_buffer(void *buffer, size_t size)
{
__inv_buffer(buffer, size);
mb();
}
#endif
/* /*
* Flush and invalidate a VA range that is homed remotely, waiting * Flush and invalidate a VA range that is homed remotely, waiting
* until the memory controller holds the flushed values. If "hfh" is * until the memory controller holds the flushed values. If "hfh" is
......
...@@ -20,53 +20,108 @@ ...@@ -20,53 +20,108 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* Nonexistent functions intended to cause link errors. */ #include <asm/barrier.h>
extern unsigned long __xchg_called_with_bad_pointer(void);
extern unsigned long __cmpxchg_called_with_bad_pointer(void);
#define xchg(ptr, x) \ /* Nonexistent functions intended to cause compile errors. */
extern void __xchg_called_with_bad_pointer(void)
__compiletime_error("Bad argument size for xchg");
extern void __cmpxchg_called_with_bad_pointer(void)
__compiletime_error("Bad argument size for cmpxchg");
#ifndef __tilegx__
/* Note the _atomic_xxx() routines include a final mb(). */
int _atomic_xchg(int *ptr, int n);
int _atomic_xchg_add(int *v, int i);
int _atomic_xchg_add_unless(int *v, int a, int u);
int _atomic_cmpxchg(int *ptr, int o, int n);
u64 _atomic64_xchg(u64 *v, u64 n);
u64 _atomic64_xchg_add(u64 *v, u64 i);
u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u);
u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
#define xchg(ptr, n) \
({ \
if (sizeof(*(ptr)) != 4) \
__xchg_called_with_bad_pointer(); \
smp_mb(); \
(typeof(*(ptr)))_atomic_xchg((int *)(ptr), (int)(n)); \
})
#define cmpxchg(ptr, o, n) \
({ \
if (sizeof(*(ptr)) != 4) \
__cmpxchg_called_with_bad_pointer(); \
smp_mb(); \
(typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \
})
#define xchg64(ptr, n) \
({ \
if (sizeof(*(ptr)) != 8) \
__xchg_called_with_bad_pointer(); \
smp_mb(); \
(typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n)); \
})
#define cmpxchg64(ptr, o, n) \
({ \
if (sizeof(*(ptr)) != 8) \
__cmpxchg_called_with_bad_pointer(); \
smp_mb(); \
(typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \
})
#else
#define xchg(ptr, n) \
({ \ ({ \
typeof(*(ptr)) __x; \ typeof(*(ptr)) __x; \
smp_mb(); \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 4: \ case 4: \
__x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \ __x = (typeof(__x))(unsigned long) \
(atomic_t *)(ptr), \ __insn_exch4((ptr), (u32)(unsigned long)(n)); \
(u32)(typeof((x)-(x)))(x)); \
break; \ break; \
case 8: \ case 8: \
__x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \ __x = (typeof(__x)) \
(atomic64_t *)(ptr), \ __insn_exch((ptr), (unsigned long)(n)); \
(u64)(typeof((x)-(x)))(x)); \
break; \ break; \
default: \ default: \
__xchg_called_with_bad_pointer(); \ __xchg_called_with_bad_pointer(); \
break; \
} \ } \
smp_mb(); \
__x; \ __x; \
}) })
#define cmpxchg(ptr, o, n) \ #define cmpxchg(ptr, o, n) \
({ \ ({ \
typeof(*(ptr)) __x; \ typeof(*(ptr)) __x; \
__insn_mtspr(SPR_CMPEXCH_VALUE, (unsigned long)(o)); \
smp_mb(); \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 4: \ case 4: \
__x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \ __x = (typeof(__x))(unsigned long) \
(atomic_t *)(ptr), \ __insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \
(u32)(typeof((o)-(o)))(o), \
(u32)(typeof((n)-(n)))(n)); \
break; \ break; \
case 8: \ case 8: \
__x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \ __x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \
(atomic64_t *)(ptr), \
(u64)(typeof((o)-(o)))(o), \
(u64)(typeof((n)-(n)))(n)); \
break; \ break; \
default: \ default: \
__cmpxchg_called_with_bad_pointer(); \ __cmpxchg_called_with_bad_pointer(); \
break; \
} \ } \
smp_mb(); \
__x; \ __x; \
}) })
#define tas(ptr) (xchg((ptr), 1)) #define xchg64 xchg
#define cmpxchg64 cmpxchg
#endif
#define tas(ptr) xchg((ptr), 1)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -23,7 +23,10 @@ struct dev_archdata { ...@@ -23,7 +23,10 @@ struct dev_archdata {
/* Offset of the DMA address from the PA. */ /* Offset of the DMA address from the PA. */
dma_addr_t dma_offset; dma_addr_t dma_offset;
/* Highest DMA address that can be generated by this device. */ /*
* Highest DMA address that can be generated by devices that
* have limited DMA capability, i.e. non 64-bit capable.
*/
dma_addr_t max_direct_dma_addr; dma_addr_t max_direct_dma_addr;
}; };
......
...@@ -20,9 +20,14 @@ ...@@ -20,9 +20,14 @@
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/io.h> #include <linux/io.h>
#ifdef __tilegx__
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
#endif
extern struct dma_map_ops *tile_dma_map_ops; extern struct dma_map_ops *tile_dma_map_ops;
extern struct dma_map_ops *gx_pci_dma_map_ops; extern struct dma_map_ops *gx_pci_dma_map_ops;
extern struct dma_map_ops *gx_legacy_pci_dma_map_ops; extern struct dma_map_ops *gx_legacy_pci_dma_map_ops;
extern struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
...@@ -44,12 +49,12 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off) ...@@ -44,12 +49,12 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{ {
return paddr + get_dma_offset(dev); return paddr;
} }
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
{ {
return daddr - get_dma_offset(dev); return daddr;
} }
static inline void dma_mark_clean(void *addr, size_t size) {} static inline void dma_mark_clean(void *addr, size_t size) {}
...@@ -87,11 +92,19 @@ dma_set_mask(struct device *dev, u64 mask) ...@@ -87,11 +92,19 @@ dma_set_mask(struct device *dev, u64 mask)
{ {
struct dma_map_ops *dma_ops = get_dma_ops(dev); struct dma_map_ops *dma_ops = get_dma_ops(dev);
/* Handle legacy PCI devices with limited memory addressability. */ /*
if ((dma_ops == gx_pci_dma_map_ops) && (mask <= DMA_BIT_MASK(32))) { * For PCI devices with 64-bit DMA addressing capability, promote
set_dma_ops(dev, gx_legacy_pci_dma_map_ops); * the dma_ops to hybrid, with the consistent memory DMA space limited
set_dma_offset(dev, 0); * to 32-bit. For 32-bit capable devices, limit the streaming DMA
if (mask > dev->archdata.max_direct_dma_addr) * address range to max_direct_dma_addr.
*/
if (dma_ops == gx_pci_dma_map_ops ||
dma_ops == gx_hybrid_pci_dma_map_ops ||
dma_ops == gx_legacy_pci_dma_map_ops) {
if (mask == DMA_BIT_MASK(64) &&
dma_ops == gx_legacy_pci_dma_map_ops)
set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
else if (mask > dev->archdata.max_direct_dma_addr)
mask = dev->archdata.max_direct_dma_addr; mask = dev->archdata.max_direct_dma_addr;
} }
......
...@@ -30,7 +30,6 @@ typedef unsigned long elf_greg_t; ...@@ -30,7 +30,6 @@ typedef unsigned long elf_greg_t;
#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t)) #define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef elf_greg_t elf_gregset_t[ELF_NGREG];
#define EM_TILE64 187
#define EM_TILEPRO 188 #define EM_TILEPRO 188
#define EM_TILEGX 191 #define EM_TILEGX 191
...@@ -132,6 +131,15 @@ extern int dump_task_regs(struct task_struct *, elf_gregset_t *); ...@@ -132,6 +131,15 @@ extern int dump_task_regs(struct task_struct *, elf_gregset_t *);
struct linux_binprm; struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm, extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int executable_stack); int executable_stack);
#define ARCH_DLINFO \
do { \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
} while (0)
struct mm_struct;
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
#define COMPAT_ELF_PLATFORM "tilegx-m32" #define COMPAT_ELF_PLATFORM "tilegx-m32"
......
...@@ -78,14 +78,6 @@ enum fixed_addresses { ...@@ -78,14 +78,6 @@ enum fixed_addresses {
#endif #endif
}; };
extern void __set_fixmap(enum fixed_addresses idx,
unsigned long phys, pgprot_t flags);
#define set_fixmap(idx, phys) \
__set_fixmap(idx, phys, PAGE_KERNEL)
#define clear_fixmap(idx) \
__set_fixmap(idx, 0, __pgprot(0))
#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) #define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_SIZE) #define FIXADDR_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_SIZE)
......
...@@ -15,6 +15,26 @@ ...@@ -15,6 +15,26 @@
#ifndef _ASM_TILE_FTRACE_H #ifndef _ASM_TILE_FTRACE_H
#define _ASM_TILE_FTRACE_H #define _ASM_TILE_FTRACE_H
/* empty */ #ifdef CONFIG_FUNCTION_TRACER
#define MCOUNT_ADDR ((unsigned long)(__mcount))
#define MCOUNT_INSN_SIZE 8 /* sizeof mcount call */
#ifndef __ASSEMBLY__
extern void __mcount(void);
#ifdef CONFIG_DYNAMIC_FTRACE
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
}
struct dyn_arch_ftrace {
};
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_TRACER */
#endif /* _ASM_TILE_FTRACE_H */ #endif /* _ASM_TILE_FTRACE_H */
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
".pushsection .fixup,\"ax\"\n" \ ".pushsection .fixup,\"ax\"\n" \
"0: { movei %0, %5; j 9f }\n" \ "0: { movei %0, %5; j 9f }\n" \
".section __ex_table,\"a\"\n" \ ".section __ex_table,\"a\"\n" \
".align 8\n" \
".quad 1b, 0b\n" \ ".quad 1b, 0b\n" \
".popsection\n" \ ".popsection\n" \
"9:" \ "9:" \
......
...@@ -33,8 +33,7 @@ struct zone; ...@@ -33,8 +33,7 @@ struct zone;
/* /*
* Is this page immutable (unwritable) and thus able to be cached more * Is this page immutable (unwritable) and thus able to be cached more
* widely than would otherwise be possible? On tile64 this means we * widely than would otherwise be possible? This means we have "nc" set.
* mark the PTE to cache locally; on tilepro it means we have "nc" set.
*/ */
#define PAGE_HOME_IMMUTABLE -2 #define PAGE_HOME_IMMUTABLE -2
...@@ -44,16 +43,8 @@ struct zone; ...@@ -44,16 +43,8 @@ struct zone;
*/ */
#define PAGE_HOME_INCOHERENT -3 #define PAGE_HOME_INCOHERENT -3
#if CHIP_HAS_CBOX_HOME_MAP()
/* Home for the page is distributed via hash-for-home. */ /* Home for the page is distributed via hash-for-home. */
#define PAGE_HOME_HASH -4 #define PAGE_HOME_HASH -4
#endif
/* Homing is unknown or unspecified. Not valid for page_home(). */
#define PAGE_HOME_UNKNOWN -5
/* Home on the current cpu. Not valid for page_home(). */
#define PAGE_HOME_HERE -6
/* Support wrapper to use instead of explicit hv_flush_remote(). */ /* Support wrapper to use instead of explicit hv_flush_remote(). */
extern void flush_remote(unsigned long cache_pfn, unsigned long cache_length, extern void flush_remote(unsigned long cache_pfn, unsigned long cache_length,
......
...@@ -19,7 +19,8 @@ ...@@ -19,7 +19,8 @@
#include <linux/bug.h> #include <linux/bug.h>
#include <asm/page.h> #include <asm/page.h>
#define IO_SPACE_LIMIT 0xfffffffful /* Maximum PCI I/O space address supported. */
#define IO_SPACE_LIMIT 0xffffffff
/* /*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem * Convert a physical pointer to a virtual kernel pointer for /dev/mem
...@@ -254,7 +255,7 @@ static inline void writeq(u64 val, unsigned long addr) ...@@ -254,7 +255,7 @@ static inline void writeq(u64 val, unsigned long addr)
static inline void memset_io(volatile void *dst, int val, size_t len) static inline void memset_io(volatile void *dst, int val, size_t len)
{ {
int x; size_t x;
BUG_ON((unsigned long)dst & 0x3); BUG_ON((unsigned long)dst & 0x3);
val = (val & 0xff) * 0x01010101; val = (val & 0xff) * 0x01010101;
for (x = 0; x < len; x += 4) for (x = 0; x < len; x += 4)
...@@ -264,7 +265,7 @@ static inline void memset_io(volatile void *dst, int val, size_t len) ...@@ -264,7 +265,7 @@ static inline void memset_io(volatile void *dst, int val, size_t len)
static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
size_t len) size_t len)
{ {
int x; size_t x;
BUG_ON((unsigned long)src & 0x3); BUG_ON((unsigned long)src & 0x3);
for (x = 0; x < len; x += 4) for (x = 0; x < len; x += 4)
*(u32 *)(dst + x) = readl(src + x); *(u32 *)(dst + x) = readl(src + x);
...@@ -273,7 +274,7 @@ static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, ...@@ -273,7 +274,7 @@ static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
static inline void memcpy_toio(volatile void __iomem *dst, const void *src, static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
size_t len) size_t len)
{ {
int x; size_t x;
BUG_ON((unsigned long)dst & 0x3); BUG_ON((unsigned long)dst & 0x3);
for (x = 0; x < len; x += 4) for (x = 0; x < len; x += 4)
writel(*(u32 *)(src + x), dst + x); writel(*(u32 *)(src + x), dst + x);
...@@ -281,8 +282,108 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, ...@@ -281,8 +282,108 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
#endif #endif
#if CHIP_HAS_MMIO() && defined(CONFIG_TILE_PCI_IO)
static inline u8 inb(unsigned long addr)
{
return readb((volatile void __iomem *) addr);
}
static inline u16 inw(unsigned long addr)
{
return readw((volatile void __iomem *) addr);
}
static inline u32 inl(unsigned long addr)
{
return readl((volatile void __iomem *) addr);
}
static inline void outb(u8 b, unsigned long addr)
{
writeb(b, (volatile void __iomem *) addr);
}
static inline void outw(u16 b, unsigned long addr)
{
writew(b, (volatile void __iomem *) addr);
}
static inline void outl(u32 b, unsigned long addr)
{
writel(b, (volatile void __iomem *) addr);
}
static inline void insb(unsigned long addr, void *buffer, int count)
{
if (count) {
u8 *buf = buffer;
do {
u8 x = inb(addr);
*buf++ = x;
} while (--count);
}
}
static inline void insw(unsigned long addr, void *buffer, int count)
{
if (count) {
u16 *buf = buffer;
do {
u16 x = inw(addr);
*buf++ = x;
} while (--count);
}
}
static inline void insl(unsigned long addr, void *buffer, int count)
{
if (count) {
u32 *buf = buffer;
do {
u32 x = inl(addr);
*buf++ = x;
} while (--count);
}
}
static inline void outsb(unsigned long addr, const void *buffer, int count)
{
if (count) {
const u8 *buf = buffer;
do {
outb(*buf++, addr);
} while (--count);
}
}
static inline void outsw(unsigned long addr, const void *buffer, int count)
{
if (count) {
const u16 *buf = buffer;
do {
outw(*buf++, addr);
} while (--count);
}
}
static inline void outsl(unsigned long addr, const void *buffer, int count)
{
if (count) {
const u32 *buf = buffer;
do {
outl(*buf++, addr);
} while (--count);
}
}
extern void __iomem *ioport_map(unsigned long port, unsigned int len);
extern void ioport_unmap(void __iomem *addr);
#else
/* /*
* The Tile architecture does not support IOPORT, even with PCI. * The TilePro architecture does not support IOPORT, even with PCI.
* Unfortunately we can't yet simply not declare these methods, * Unfortunately we can't yet simply not declare these methods,
* since some generic code that compiles into the kernel, but * since some generic code that compiles into the kernel, but
* we never run, uses them unconditionally. * we never run, uses them unconditionally.
...@@ -290,7 +391,12 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, ...@@ -290,7 +391,12 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
static inline long ioport_panic(void) static inline long ioport_panic(void)
{ {
#ifdef __tilegx__
panic("PCI IO space support is disabled. Configure the kernel with"
" CONFIG_TILE_PCI_IO to enable it");
#else
panic("inb/outb and friends do not exist on tile"); panic("inb/outb and friends do not exist on tile");
#endif
return 0; return 0;
} }
...@@ -335,13 +441,6 @@ static inline void outl(u32 b, unsigned long addr) ...@@ -335,13 +441,6 @@ static inline void outl(u32 b, unsigned long addr)
ioport_panic(); ioport_panic();
} }
#define inb_p(addr) inb(addr)
#define inw_p(addr) inw(addr)
#define inl_p(addr) inl(addr)
#define outb_p(x, addr) outb((x), (addr))
#define outw_p(x, addr) outw((x), (addr))
#define outl_p(x, addr) outl((x), (addr))
static inline void insb(unsigned long addr, void *buffer, int count) static inline void insb(unsigned long addr, void *buffer, int count)
{ {
ioport_panic(); ioport_panic();
...@@ -372,6 +471,15 @@ static inline void outsl(unsigned long addr, const void *buffer, int count) ...@@ -372,6 +471,15 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
ioport_panic(); ioport_panic();
} }
#endif /* CHIP_HAS_MMIO() && defined(CONFIG_TILE_PCI_IO) */
#define inb_p(addr) inb(addr)
#define inw_p(addr) inw(addr)
#define inl_p(addr) inl(addr)
#define outb_p(x, addr) outb((x), (addr))
#define outw_p(x, addr) outw((x), (addr))
#define outl_p(x, addr) outl((x), (addr))
#define ioread16be(addr) be16_to_cpu(ioread16(addr)) #define ioread16be(addr) be16_to_cpu(ioread16(addr))
#define ioread32be(addr) be32_to_cpu(ioread32(addr)) #define ioread32be(addr) be32_to_cpu(ioread32(addr))
#define iowrite16be(v, addr) iowrite16(be16_to_cpu(v), (addr)) #define iowrite16be(v, addr) iowrite16(be16_to_cpu(v), (addr))
......
...@@ -124,6 +124,12 @@ ...@@ -124,6 +124,12 @@
DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
#define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR) #define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR)
#ifdef CONFIG_DEBUG_PREEMPT
/* Due to inclusion issues, we can't rely on <linux/smp.h> here. */
extern unsigned int debug_smp_processor_id(void);
# define smp_processor_id() debug_smp_processor_id()
#endif
/* Disable interrupts. */ /* Disable interrupts. */
#define arch_local_irq_disable() \ #define arch_local_irq_disable() \
interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS) interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS)
...@@ -132,9 +138,18 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); ...@@ -132,9 +138,18 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
#define arch_local_irq_disable_all() \ #define arch_local_irq_disable_all() \
interrupt_mask_set_mask(-1ULL) interrupt_mask_set_mask(-1ULL)
/*
* Read the set of maskable interrupts.
* We avoid the preemption warning here via __this_cpu_ptr since even
* if irqs are already enabled, it's harmless to read the wrong cpu's
* enabled mask.
*/
#define arch_local_irqs_enabled() \
(*__this_cpu_ptr(&interrupts_enabled_mask))
/* Re-enable all maskable interrupts. */ /* Re-enable all maskable interrupts. */
#define arch_local_irq_enable() \ #define arch_local_irq_enable() \
interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask)) interrupt_mask_reset_mask(arch_local_irqs_enabled())
/* Disable or enable interrupts based on flag argument. */ /* Disable or enable interrupts based on flag argument. */
#define arch_local_irq_restore(disabled) do { \ #define arch_local_irq_restore(disabled) do { \
...@@ -161,7 +176,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); ...@@ -161,7 +176,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
/* Prevent the given interrupt from being enabled next time we enable irqs. */ /* Prevent the given interrupt from being enabled next time we enable irqs. */
#define arch_local_irq_mask(interrupt) \ #define arch_local_irq_mask(interrupt) \
(__get_cpu_var(interrupts_enabled_mask) &= ~(1ULL << (interrupt))) this_cpu_and(interrupts_enabled_mask, ~(1ULL << (interrupt)))
/* Prevent the given interrupt from being enabled immediately. */ /* Prevent the given interrupt from being enabled immediately. */
#define arch_local_irq_mask_now(interrupt) do { \ #define arch_local_irq_mask_now(interrupt) do { \
...@@ -171,7 +186,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); ...@@ -171,7 +186,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
/* Allow the given interrupt to be enabled next time we enable irqs. */ /* Allow the given interrupt to be enabled next time we enable irqs. */
#define arch_local_irq_unmask(interrupt) \ #define arch_local_irq_unmask(interrupt) \
(__get_cpu_var(interrupts_enabled_mask) |= (1ULL << (interrupt))) this_cpu_or(interrupts_enabled_mask, (1ULL << (interrupt)))
/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
#define arch_local_irq_unmask_now(interrupt) do { \ #define arch_local_irq_unmask_now(interrupt) do { \
......
/* /*
* Copyright 2010 Tilera Corporation. All Rights Reserved. * Copyright 2012 Tilera Corporation. All Rights Reserved.
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License * modify it under the terms of the GNU General Public License
...@@ -12,7 +12,17 @@ ...@@ -12,7 +12,17 @@
* more details. * more details.
*/ */
#ifndef _ASM_TILE_HW_IRQ_H #ifndef _ASM_TILE_KDEBUG_H
#define _ASM_TILE_HW_IRQ_H #define _ASM_TILE_KDEBUG_H
#endif /* _ASM_TILE_HW_IRQ_H */ #include <linux/notifier.h>
enum die_val {
DIE_OOPS = 1,
DIE_BREAK,
DIE_SSTEPBP,
DIE_PAGE_FAULT,
DIE_COMPILED_BPT
};
#endif /* _ASM_TILE_KDEBUG_H */
/*
* Copyright 2013 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* TILE-Gx KGDB support.
*/
#ifndef __TILE_KGDB_H__
#define __TILE_KGDB_H__
#include <linux/kdebug.h>
#include <arch/opcode.h>
#define GDB_SIZEOF_REG sizeof(unsigned long)
/*
* TILE-Gx gdb is expecting the following register layout:
* 56 GPRs(R0 - R52, TP, SP, LR), 8 special GPRs(networks and ZERO),
* plus the PC and the faultnum.
*
* Even though kernel not use the 8 special GPRs, they need to be present
* in the registers sent for correct processing in the host-side gdb.
*
*/
#define DBG_MAX_REG_NUM (56+8+2)
#define NUMREGBYTES (DBG_MAX_REG_NUM * GDB_SIZEOF_REG)
/*
* BUFMAX defines the maximum number of characters in inbound/outbound
* buffers at least NUMREGBYTES*2 are needed for register packets,
* Longer buffer is needed to list all threads.
*/
#define BUFMAX 2048
#define BREAK_INSTR_SIZE TILEGX_BUNDLE_SIZE_IN_BYTES
/*
* Require cache flush for set/clear a software breakpoint or write memory.
*/
#define CACHE_FLUSH_IS_SAFE 1
/*
* The compiled-in breakpoint instruction can be used to "break" into
* the debugger via magic system request key (sysrq-G).
*/
static tile_bundle_bits compiled_bpt = TILEGX_BPT_BUNDLE | DIE_COMPILED_BPT;
enum tilegx_regnum {
TILEGX_PC_REGNUM = TREG_LAST_GPR + 9,
TILEGX_FAULTNUM_REGNUM,
};
/*
* Generate a breakpoint exception to "break" into the debugger.
*/
static inline void arch_kgdb_breakpoint(void)
{
asm volatile (".quad %0\n\t"
::""(compiled_bpt));
}
#endif /* __TILE_KGDB_H__ */
/*
* arch/tile/include/asm/kprobes.h
*
* Copyright 2012 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_KPROBES_H
#define _ASM_TILE_KPROBES_H
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/percpu.h>
#include <arch/opcode.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT
#define MAX_INSN_SIZE 2
#define kretprobe_blacklist_size 0
typedef tile_bundle_bits kprobe_opcode_t;
#define flush_insn_slot(p) \
flush_icache_range((unsigned long)p->addr, \
(unsigned long)p->addr + \
(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
struct kprobe;
/* Architecture specific copy of original instruction. */
struct arch_specific_insn {
kprobe_opcode_t *insn;
};
struct prev_kprobe {
struct kprobe *kp;
unsigned long status;
unsigned long saved_pc;
};
#define MAX_JPROBES_STACK_SIZE 128
#define MAX_JPROBES_STACK_ADDR \
(((unsigned long)current_thread_info()) + THREAD_SIZE - 32 \
- sizeof(struct pt_regs))
#define MIN_JPROBES_STACK_SIZE(ADDR) \
((((ADDR) + MAX_JPROBES_STACK_SIZE) > MAX_JPROBES_STACK_ADDR) \
? MAX_JPROBES_STACK_ADDR - (ADDR) \
: MAX_JPROBES_STACK_SIZE)
/* per-cpu kprobe control block. */
struct kprobe_ctlblk {
unsigned long kprobe_status;
unsigned long kprobe_saved_pc;
unsigned long jprobe_saved_sp;
struct prev_kprobe prev_kprobe;
struct pt_regs jprobe_saved_regs;
char jprobes_stack[MAX_JPROBES_STACK_SIZE];
};
extern tile_bundle_bits breakpoint2_insn;
extern tile_bundle_bits breakpoint_insn;
void arch_remove_kprobe(struct kprobe *);
extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
#endif /* _ASM_TILE_KPROBES_H */
...@@ -22,6 +22,7 @@ struct mm_context { ...@@ -22,6 +22,7 @@ struct mm_context {
* semaphore but atomically, but it is conservatively set. * semaphore but atomically, but it is conservatively set.
*/ */
unsigned long priority_cached; unsigned long priority_cached;
unsigned long vdso_base;
}; };
typedef struct mm_context mm_context_t; typedef struct mm_context mm_context_t;
......
...@@ -45,7 +45,7 @@ static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot) ...@@ -45,7 +45,7 @@ static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot)
static inline void install_page_table(pgd_t *pgdir, int asid) static inline void install_page_table(pgd_t *pgdir, int asid)
{ {
pte_t *ptep = virt_to_pte(NULL, (unsigned long)pgdir); pte_t *ptep = virt_to_kpte((unsigned long)pgdir);
__install_page_table(pgdir, asid, *ptep); __install_page_table(pgdir, asid, *ptep);
} }
......
...@@ -42,7 +42,7 @@ static inline int pfn_to_nid(unsigned long pfn) ...@@ -42,7 +42,7 @@ static inline int pfn_to_nid(unsigned long pfn)
#define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr) #define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr)
static inline int pfn_valid(int pfn) static inline int pfn_valid(unsigned long pfn)
{ {
int nid = pfn_to_nid(pfn); int nid = pfn_to_nid(pfn);
......
...@@ -38,6 +38,12 @@ ...@@ -38,6 +38,12 @@
#define PAGE_MASK (~(PAGE_SIZE - 1)) #define PAGE_MASK (~(PAGE_SIZE - 1))
#define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HPAGE_MASK (~(HPAGE_SIZE - 1))
/*
* We do define AT_SYSINFO_EHDR to support vDSO,
* but don't use the gate mechanism.
*/
#define __HAVE_ARCH_GATE_AREA 1
/* /*
* If the Kconfig doesn't specify, set a maximum zone order that * If the Kconfig doesn't specify, set a maximum zone order that
* is enough so that we can create huge pages from small pages given * is enough so that we can create huge pages from small pages given
...@@ -142,8 +148,12 @@ static inline __attribute_const__ int get_order(unsigned long size) ...@@ -142,8 +148,12 @@ static inline __attribute_const__ int get_order(unsigned long size)
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif #endif
/* Allow overriding how much VA or PA the kernel will use. */
#define MAX_PA_WIDTH CHIP_PA_WIDTH()
#define MAX_VA_WIDTH CHIP_VA_WIDTH()
/* Each memory controller has PAs distinct in their high bits. */ /* Each memory controller has PAs distinct in their high bits. */
#define NR_PA_HIGHBIT_SHIFT (CHIP_PA_WIDTH() - CHIP_LOG_NUM_MSHIMS()) #define NR_PA_HIGHBIT_SHIFT (MAX_PA_WIDTH - CHIP_LOG_NUM_MSHIMS())
#define NR_PA_HIGHBIT_VALUES (1 << CHIP_LOG_NUM_MSHIMS()) #define NR_PA_HIGHBIT_VALUES (1 << CHIP_LOG_NUM_MSHIMS())
#define __pa_to_highbits(pa) ((phys_addr_t)(pa) >> NR_PA_HIGHBIT_SHIFT) #define __pa_to_highbits(pa) ((phys_addr_t)(pa) >> NR_PA_HIGHBIT_SHIFT)
#define __pfn_to_highbits(pfn) ((pfn) >> (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT)) #define __pfn_to_highbits(pfn) ((pfn) >> (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT))
...@@ -154,7 +164,7 @@ static inline __attribute_const__ int get_order(unsigned long size) ...@@ -154,7 +164,7 @@ static inline __attribute_const__ int get_order(unsigned long size)
* We reserve the lower half of memory for user-space programs, and the * We reserve the lower half of memory for user-space programs, and the
* upper half for system code. We re-map all of physical memory in the * upper half for system code. We re-map all of physical memory in the
* upper half, which takes a quarter of our VA space. Then we have * upper half, which takes a quarter of our VA space. Then we have
* the vmalloc regions. The supervisor code lives at 0xfffffff700000000, * the vmalloc regions. The supervisor code lives at the highest address,
* with the hypervisor above that. * with the hypervisor above that.
* *
* Loadable kernel modules are placed immediately after the static * Loadable kernel modules are placed immediately after the static
...@@ -166,26 +176,19 @@ static inline __attribute_const__ int get_order(unsigned long size) ...@@ -166,26 +176,19 @@ static inline __attribute_const__ int get_order(unsigned long size)
* Similarly, for now we don't play any struct page mapping games. * Similarly, for now we don't play any struct page mapping games.
*/ */
#if CHIP_PA_WIDTH() + 2 > CHIP_VA_WIDTH() #if MAX_PA_WIDTH + 2 > MAX_VA_WIDTH
# error Too much PA to map with the VA available! # error Too much PA to map with the VA available!
#endif #endif
#define HALF_VA_SPACE (_AC(1, UL) << (CHIP_VA_WIDTH() - 1))
#define MEM_LOW_END (HALF_VA_SPACE - 1) /* low half */ #define PAGE_OFFSET (-(_AC(1, UL) << (MAX_VA_WIDTH - 1)))
#define MEM_HIGH_START (-HALF_VA_SPACE) /* high half */ #define KERNEL_HIGH_VADDR _AC(0xfffffff800000000, UL) /* high 32GB */
#define PAGE_OFFSET MEM_HIGH_START #define FIXADDR_BASE (KERNEL_HIGH_VADDR - 0x400000000) /* 4 GB */
#define FIXADDR_BASE _AC(0xfffffff400000000, UL) /* 4 GB */ #define FIXADDR_TOP (KERNEL_HIGH_VADDR - 0x300000000) /* 4 GB */
#define FIXADDR_TOP _AC(0xfffffff500000000, UL) /* 4 GB */
#define _VMALLOC_START FIXADDR_TOP #define _VMALLOC_START FIXADDR_TOP
#define HUGE_VMAP_BASE _AC(0xfffffff600000000, UL) /* 4 GB */ #define HUGE_VMAP_BASE (KERNEL_HIGH_VADDR - 0x200000000) /* 4 GB */
#define MEM_SV_START _AC(0xfffffff700000000, UL) /* 256 MB */ #define MEM_SV_START (KERNEL_HIGH_VADDR - 0x100000000) /* 256 MB */
#define MEM_SV_INTRPT MEM_SV_START #define MEM_MODULE_START (MEM_SV_START + (256*1024*1024)) /* 256 MB */
#define MEM_MODULE_START _AC(0xfffffff710000000, UL) /* 256 MB */
#define MEM_MODULE_END (MEM_MODULE_START + (256*1024*1024)) #define MEM_MODULE_END (MEM_MODULE_START + (256*1024*1024))
#define MEM_HV_START _AC(0xfffffff800000000, UL) /* 32 GB */
/* Highest DTLB address we will use */
#define KERNEL_HIGH_VADDR MEM_SV_START
#else /* !__tilegx__ */ #else /* !__tilegx__ */
...@@ -207,25 +210,18 @@ static inline __attribute_const__ int get_order(unsigned long size) ...@@ -207,25 +210,18 @@ static inline __attribute_const__ int get_order(unsigned long size)
* values, and after that, we show "typical" values, since the actual * values, and after that, we show "typical" values, since the actual
* addresses depend on kernel #defines. * addresses depend on kernel #defines.
* *
* MEM_HV_INTRPT 0xfe000000 * MEM_HV_START 0xfe000000
* MEM_SV_INTRPT (kernel code) 0xfd000000 * MEM_SV_START (kernel code) 0xfd000000
* MEM_USER_INTRPT (user vector) 0xfc000000 * MEM_USER_INTRPT (user vector) 0xfc000000
* FIX_KMAP_xxx 0xf8000000 (via NR_CPUS * KM_TYPE_NR) * FIX_KMAP_xxx 0xfa000000 (via NR_CPUS * KM_TYPE_NR)
* PKMAP_BASE 0xf7000000 (via LAST_PKMAP) * PKMAP_BASE 0xf9000000 (via LAST_PKMAP)
* HUGE_VMAP 0xf3000000 (via CONFIG_NR_HUGE_VMAPS) * VMALLOC_START 0xf7000000 (via VMALLOC_RESERVE)
* VMALLOC_START 0xf0000000 (via __VMALLOC_RESERVE)
* mapped LOWMEM 0xc0000000 * mapped LOWMEM 0xc0000000
*/ */
#define MEM_USER_INTRPT _AC(0xfc000000, UL) #define MEM_USER_INTRPT _AC(0xfc000000, UL)
#if CONFIG_KERNEL_PL == 1 #define MEM_SV_START _AC(0xfd000000, UL)
#define MEM_SV_INTRPT _AC(0xfd000000, UL) #define MEM_HV_START _AC(0xfe000000, UL)
#define MEM_HV_INTRPT _AC(0xfe000000, UL)
#else
#define MEM_GUEST_INTRPT _AC(0xfd000000, UL)
#define MEM_SV_INTRPT _AC(0xfe000000, UL)
#define MEM_HV_INTRPT _AC(0xff000000, UL)
#endif
#define INTRPT_SIZE 0x4000 #define INTRPT_SIZE 0x4000
...@@ -246,7 +242,7 @@ static inline __attribute_const__ int get_order(unsigned long size) ...@@ -246,7 +242,7 @@ static inline __attribute_const__ int get_order(unsigned long size)
#endif /* __tilegx__ */ #endif /* __tilegx__ */
#ifndef __ASSEMBLY__ #if !defined(__ASSEMBLY__) && !defined(VDSO_BUILD)
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
...@@ -332,6 +328,7 @@ static inline int pfn_valid(unsigned long pfn) ...@@ -332,6 +328,7 @@ static inline int pfn_valid(unsigned long pfn)
struct mm_struct; struct mm_struct;
extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
extern pte_t *virt_to_kpte(unsigned long kaddr);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/numa.h>
#include <asm-generic/pci_iomap.h> #include <asm-generic/pci_iomap.h>
#ifndef __tilegx__ #ifndef __tilegx__
...@@ -29,7 +28,6 @@ struct pci_controller { ...@@ -29,7 +28,6 @@ struct pci_controller {
int index; /* PCI domain number */ int index; /* PCI domain number */
struct pci_bus *root_bus; struct pci_bus *root_bus;
int first_busno;
int last_busno; int last_busno;
int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */ int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */
...@@ -124,6 +122,11 @@ static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {} ...@@ -124,6 +122,11 @@ static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
* the CPA plus TILE_PCI_MEM_MAP_BASE_OFFSET. To support 32-bit * the CPA plus TILE_PCI_MEM_MAP_BASE_OFFSET. To support 32-bit
* devices, we create a separate map region that handles the low * devices, we create a separate map region that handles the low
* 4GB. * 4GB.
*
* This design lets us avoid the "PCI hole" problem where the host bridge
* won't pass DMA traffic with target addresses that happen to fall within the
* BAR space. This enables us to use all the physical memory for DMA, instead
* of wasting the same amount of physical memory as the BAR window size.
*/ */
#define TILE_PCI_MEM_MAP_BASE_OFFSET (1ULL << CHIP_PA_WIDTH()) #define TILE_PCI_MEM_MAP_BASE_OFFSET (1ULL << CHIP_PA_WIDTH())
...@@ -145,6 +148,10 @@ struct pci_controller { ...@@ -145,6 +148,10 @@ struct pci_controller {
int pio_mem_index; /* PIO region index for memory access */ int pio_mem_index; /* PIO region index for memory access */
#ifdef CONFIG_TILE_PCI_IO
int pio_io_index; /* PIO region index for I/O space access */
#endif
/* /*
* Mem-Map regions for all the memory controllers so that Linux can * Mem-Map regions for all the memory controllers so that Linux can
* map all of its physical memory space to the PCI bus. * map all of its physical memory space to the PCI bus.
...@@ -154,6 +161,10 @@ struct pci_controller { ...@@ -154,6 +161,10 @@ struct pci_controller {
int index; /* PCI domain number */ int index; /* PCI domain number */
struct pci_bus *root_bus; struct pci_bus *root_bus;
/* PCI I/O space resource for this controller. */
struct resource io_space;
char io_space_name[32];
/* PCI memory space resource for this controller. */ /* PCI memory space resource for this controller. */
struct resource mem_space; struct resource mem_space;
char mem_space_name[32]; char mem_space_name[32];
...@@ -166,13 +177,11 @@ struct pci_controller { ...@@ -166,13 +177,11 @@ struct pci_controller {
/* Table that maps the INTx numbers to Linux irq numbers. */ /* Table that maps the INTx numbers to Linux irq numbers. */
int irq_intx_table[4]; int irq_intx_table[4];
/* Address ranges that are routed to this controller/bridge. */
struct resource mem_resources[3];
}; };
extern struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES]; extern struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES];
extern gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO]; extern gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO];
extern int num_trio_shims;
extern void pci_iounmap(struct pci_dev *dev, void __iomem *); extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
...@@ -211,7 +220,8 @@ static inline int pcibios_assign_all_busses(void) ...@@ -211,7 +220,8 @@ static inline int pcibios_assign_all_busses(void)
} }
#define PCIBIOS_MIN_MEM 0 #define PCIBIOS_MIN_MEM 0
#define PCIBIOS_MIN_IO 0 /* Minimum PCI I/O address, starting at the page boundary. */
#define PCIBIOS_MIN_IO PAGE_SIZE
/* Use any cpu for PCI. */ /* Use any cpu for PCI. */
#define cpumask_of_pcibus(bus) cpu_online_mask #define cpumask_of_pcibus(bus) cpu_online_mask
......
...@@ -84,10 +84,12 @@ extern unsigned long VMALLOC_RESERVE /* = CONFIG_VMALLOC_RESERVE */; ...@@ -84,10 +84,12 @@ extern unsigned long VMALLOC_RESERVE /* = CONFIG_VMALLOC_RESERVE */;
/* We have no pmd or pud since we are strictly a two-level page table */ /* We have no pmd or pud since we are strictly a two-level page table */
#include <asm-generic/pgtable-nopmd.h> #include <asm-generic/pgtable-nopmd.h>
static inline int pud_huge_page(pud_t pud) { return 0; }
/* We don't define any pgds for these addresses. */ /* We don't define any pgds for these addresses. */
static inline int pgd_addr_invalid(unsigned long addr) static inline int pgd_addr_invalid(unsigned long addr)
{ {
return addr >= MEM_HV_INTRPT; return addr >= MEM_HV_START;
} }
/* /*
......
...@@ -63,6 +63,15 @@ ...@@ -63,6 +63,15 @@
/* We have no pud since we are a three-level page table. */ /* We have no pud since we are a three-level page table. */
#include <asm-generic/pgtable-nopud.h> #include <asm-generic/pgtable-nopud.h>
/*
* pmds are the same as pgds and ptes, so converting is a no-op.
*/
#define pmd_pte(pmd) (pmd)
#define pmdp_ptep(pmdp) (pmdp)
#define pte_pmd(pte) (pte)
#define pud_pte(pud) ((pud).pgd)
static inline int pud_none(pud_t pud) static inline int pud_none(pud_t pud)
{ {
return pud_val(pud) == 0; return pud_val(pud) == 0;
...@@ -73,6 +82,11 @@ static inline int pud_present(pud_t pud) ...@@ -73,6 +82,11 @@ static inline int pud_present(pud_t pud)
return pud_val(pud) & _PAGE_PRESENT; return pud_val(pud) & _PAGE_PRESENT;
} }
static inline int pud_huge_page(pud_t pud)
{
return pud_val(pud) & _PAGE_HUGE_PAGE;
}
#define pmd_ERROR(e) \ #define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd 0x%016llx.\n", __FILE__, __LINE__, pmd_val(e)) pr_err("%s:%d: bad pmd 0x%016llx.\n", __FILE__, __LINE__, pmd_val(e))
...@@ -89,6 +103,9 @@ static inline int pud_bad(pud_t pud) ...@@ -89,6 +103,9 @@ static inline int pud_bad(pud_t pud)
/* Return the page-table frame number (ptfn) that a pud_t points at. */ /* Return the page-table frame number (ptfn) that a pud_t points at. */
#define pud_ptfn(pud) hv_pte_get_ptfn((pud).pgd) #define pud_ptfn(pud) hv_pte_get_ptfn((pud).pgd)
/* Return the page frame number (pfn) that a pud_t points at. */
#define pud_pfn(pud) pte_pfn(pud_pte(pud))
/* /*
* A given kernel pud_t maps to a kernel pmd_t table at a specific * A given kernel pud_t maps to a kernel pmd_t table at a specific
* virtual address. Since kernel pmd_t tables can be aligned at * virtual address. Since kernel pmd_t tables can be aligned at
...@@ -123,8 +140,7 @@ static inline unsigned long pgd_addr_normalize(unsigned long addr) ...@@ -123,8 +140,7 @@ static inline unsigned long pgd_addr_normalize(unsigned long addr)
/* We don't define any pgds for these addresses. */ /* We don't define any pgds for these addresses. */
static inline int pgd_addr_invalid(unsigned long addr) static inline int pgd_addr_invalid(unsigned long addr)
{ {
return addr >= MEM_HV_START || return addr >= KERNEL_HIGH_VADDR || addr != pgd_addr_normalize(addr);
(addr > MEM_LOW_END && addr < MEM_HIGH_START);
} }
/* /*
...@@ -152,13 +168,6 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, ...@@ -152,13 +168,6 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
return hv_pte(__insn_exch(&ptep->val, 0UL)); return hv_pte(__insn_exch(&ptep->val, 0UL));
} }
/*
* pmds are the same as pgds and ptes, so converting is a no-op.
*/
#define pmd_pte(pmd) (pmd)
#define pmdp_ptep(pmdp) (pmdp)
#define pte_pmd(pte) (pte)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_TILE_PGTABLE_64_H */ #endif /* _ASM_TILE_PGTABLE_64_H */
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#ifndef _ASM_TILE_PROCESSOR_H #ifndef _ASM_TILE_PROCESSOR_H
#define _ASM_TILE_PROCESSOR_H #define _ASM_TILE_PROCESSOR_H
#include <arch/chip.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* /*
...@@ -25,7 +27,6 @@ ...@@ -25,7 +27,6 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/percpu.h> #include <asm/percpu.h>
#include <arch/chip.h>
#include <arch/spr_def.h> #include <arch/spr_def.h>
struct task_struct; struct task_struct;
...@@ -110,18 +111,16 @@ struct thread_struct { ...@@ -110,18 +111,16 @@ struct thread_struct {
unsigned long long interrupt_mask; unsigned long long interrupt_mask;
/* User interrupt-control 0 state */ /* User interrupt-control 0 state */
unsigned long intctrl_0; unsigned long intctrl_0;
#if CHIP_HAS_PROC_STATUS_SPR() /* Is this task currently doing a backtrace? */
bool in_backtrace;
/* Any other miscellaneous processor state bits */ /* Any other miscellaneous processor state bits */
unsigned long proc_status; unsigned long proc_status;
#endif
#if !CHIP_HAS_FIXED_INTVEC_BASE() #if !CHIP_HAS_FIXED_INTVEC_BASE()
/* Interrupt base for PL0 interrupts */ /* Interrupt base for PL0 interrupts */
unsigned long interrupt_vector_base; unsigned long interrupt_vector_base;
#endif #endif
#if CHIP_HAS_TILE_RTF_HWM()
/* Tile cache retry fifo high-water mark */ /* Tile cache retry fifo high-water mark */
unsigned long tile_rtf_hwm; unsigned long tile_rtf_hwm;
#endif
#if CHIP_HAS_DSTREAM_PF() #if CHIP_HAS_DSTREAM_PF()
/* Data stream prefetch control */ /* Data stream prefetch control */
unsigned long dstream_pf; unsigned long dstream_pf;
...@@ -134,21 +133,16 @@ struct thread_struct { ...@@ -134,21 +133,16 @@ struct thread_struct {
/* Async DMA TLB fault information */ /* Async DMA TLB fault information */
struct async_tlb dma_async_tlb; struct async_tlb dma_async_tlb;
#endif #endif
#if CHIP_HAS_SN_PROC()
/* Was static network processor when we were switched out? */
int sn_proc_running;
/* Async SNI TLB fault information */
struct async_tlb sn_async_tlb;
#endif
}; };
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
/* /*
* Start with "sp" this many bytes below the top of the kernel stack. * Start with "sp" this many bytes below the top of the kernel stack.
* This preserves the invariant that a called function may write to *sp. * This allows us to be cache-aware when handling the initial save
* of the pt_regs value to the stack.
*/ */
#define STACK_TOP_DELTA 8 #define STACK_TOP_DELTA 64
/* /*
* When entering the kernel via a fault, start with the top of the * When entering the kernel via a fault, start with the top of the
...@@ -164,7 +158,7 @@ struct thread_struct { ...@@ -164,7 +158,7 @@ struct thread_struct {
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#ifdef __tilegx__ #ifdef __tilegx__
#define TASK_SIZE_MAX (MEM_LOW_END + 1) #define TASK_SIZE_MAX (_AC(1, UL) << (MAX_VA_WIDTH - 1))
#else #else
#define TASK_SIZE_MAX PAGE_OFFSET #define TASK_SIZE_MAX PAGE_OFFSET
#endif #endif
...@@ -178,10 +172,10 @@ struct thread_struct { ...@@ -178,10 +172,10 @@ struct thread_struct {
#define TASK_SIZE TASK_SIZE_MAX #define TASK_SIZE TASK_SIZE_MAX
#endif #endif
/* We provide a minimal "vdso" a la x86; just the sigreturn code for now. */ #define VDSO_BASE ((unsigned long)current->active_mm->context.vdso_base)
#define VDSO_BASE (TASK_SIZE - PAGE_SIZE) #define VDSO_SYM(x) (VDSO_BASE + (unsigned long)(x))
#define STACK_TOP VDSO_BASE #define STACK_TOP TASK_SIZE
/* STACK_TOP_MAX is used temporarily in execve and should not check COMPAT. */ /* STACK_TOP_MAX is used temporarily in execve and should not check COMPAT. */
#define STACK_TOP_MAX TASK_SIZE_MAX #define STACK_TOP_MAX TASK_SIZE_MAX
...@@ -232,21 +226,28 @@ extern int do_work_pending(struct pt_regs *regs, u32 flags); ...@@ -232,21 +226,28 @@ extern int do_work_pending(struct pt_regs *regs, u32 flags);
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
/* Return initial ksp value for given task. */ /* Return initial ksp value for given task. */
#define task_ksp0(task) ((unsigned long)(task)->stack + THREAD_SIZE) #define task_ksp0(task) \
((unsigned long)(task)->stack + THREAD_SIZE - STACK_TOP_DELTA)
/* Return some info about the user process TASK. */ /* Return some info about the user process TASK. */
#define KSTK_TOP(task) (task_ksp0(task) - STACK_TOP_DELTA)
#define task_pt_regs(task) \ #define task_pt_regs(task) \
((struct pt_regs *)(task_ksp0(task) - KSTK_PTREGS_GAP) - 1) ((struct pt_regs *)(task_ksp0(task) - KSTK_PTREGS_GAP) - 1)
#define current_pt_regs() \ #define current_pt_regs() \
((struct pt_regs *)((stack_pointer | (THREAD_SIZE - 1)) - \ ((struct pt_regs *)((stack_pointer | (THREAD_SIZE - 1)) - \
(KSTK_PTREGS_GAP - 1)) - 1) STACK_TOP_DELTA - (KSTK_PTREGS_GAP - 1)) - 1)
#define task_sp(task) (task_pt_regs(task)->sp) #define task_sp(task) (task_pt_regs(task)->sp)
#define task_pc(task) (task_pt_regs(task)->pc) #define task_pc(task) (task_pt_regs(task)->pc)
/* Aliases for pc and sp (used in fs/proc/array.c) */ /* Aliases for pc and sp (used in fs/proc/array.c) */
#define KSTK_EIP(task) task_pc(task) #define KSTK_EIP(task) task_pc(task)
#define KSTK_ESP(task) task_sp(task) #define KSTK_ESP(task) task_sp(task)
/* Fine-grained unaligned JIT support */
#define GET_UNALIGN_CTL(tsk, adr) get_unalign_ctl((tsk), (adr))
#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
/* Standard format for printing registers and other word-size data. */ /* Standard format for printing registers and other word-size data. */
#ifdef __tilegx__ #ifdef __tilegx__
# define REGFMT "0x%016lx" # define REGFMT "0x%016lx"
...@@ -275,7 +276,6 @@ extern char chip_model[64]; ...@@ -275,7 +276,6 @@ extern char chip_model[64];
/* Data on which physical memory controller corresponds to which NUMA node. */ /* Data on which physical memory controller corresponds to which NUMA node. */
extern int node_controller[]; extern int node_controller[];
#if CHIP_HAS_CBOX_HOME_MAP()
/* Does the heap allocator return hash-for-home pages by default? */ /* Does the heap allocator return hash-for-home pages by default? */
extern int hash_default; extern int hash_default;
...@@ -285,11 +285,6 @@ extern int kstack_hash; ...@@ -285,11 +285,6 @@ extern int kstack_hash;
/* Does MAP_ANONYMOUS return hash-for-home pages by default? */ /* Does MAP_ANONYMOUS return hash-for-home pages by default? */
#define uheap_hash hash_default #define uheap_hash hash_default
#else
#define hash_default 0
#define kstack_hash 0
#define uheap_hash 0
#endif
/* Are we using huge pages in the TLB for kernel data? */ /* Are we using huge pages in the TLB for kernel data? */
extern int kdata_huge; extern int kdata_huge;
...@@ -337,7 +332,6 @@ extern int kdata_huge; ...@@ -337,7 +332,6 @@ extern int kdata_huge;
/* /*
* Provide symbolic constants for PLs. * Provide symbolic constants for PLs.
* Note that assembly code assumes that USER_PL is zero.
*/ */
#define USER_PL 0 #define USER_PL 0
#if CONFIG_KERNEL_PL == 2 #if CONFIG_KERNEL_PL == 2
...@@ -346,20 +340,38 @@ extern int kdata_huge; ...@@ -346,20 +340,38 @@ extern int kdata_huge;
#define KERNEL_PL CONFIG_KERNEL_PL #define KERNEL_PL CONFIG_KERNEL_PL
/* SYSTEM_SAVE_K_0 holds the current cpu number ORed with ksp0. */ /* SYSTEM_SAVE_K_0 holds the current cpu number ORed with ksp0. */
#define CPU_LOG_MASK_VALUE 12 #ifdef __tilegx__
#define CPU_MASK_VALUE ((1 << CPU_LOG_MASK_VALUE) - 1) #define CPU_SHIFT 48
#if CONFIG_NR_CPUS > CPU_MASK_VALUE #if CHIP_VA_WIDTH() > CPU_SHIFT
# error Too many cpus! # error Too many VA bits!
#endif #endif
#define MAX_CPU_ID ((1 << (64 - CPU_SHIFT)) - 1)
#define raw_smp_processor_id() \
((int)(__insn_mfspr(SPR_SYSTEM_SAVE_K_0) >> CPU_SHIFT))
#define get_current_ksp0() \
((unsigned long)(((long)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) << \
(64 - CPU_SHIFT)) >> (64 - CPU_SHIFT)))
#define next_current_ksp0(task) ({ \
unsigned long __ksp0 = task_ksp0(task) & ((1UL << CPU_SHIFT) - 1); \
unsigned long __cpu = (long)raw_smp_processor_id() << CPU_SHIFT; \
__ksp0 | __cpu; \
})
#else
#define LOG2_NR_CPU_IDS 6
#define MAX_CPU_ID ((1 << LOG2_NR_CPU_IDS) - 1)
#define raw_smp_processor_id() \ #define raw_smp_processor_id() \
((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & CPU_MASK_VALUE) ((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & MAX_CPU_ID)
#define get_current_ksp0() \ #define get_current_ksp0() \
(__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~CPU_MASK_VALUE) (__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~MAX_CPU_ID)
#define next_current_ksp0(task) ({ \ #define next_current_ksp0(task) ({ \
unsigned long __ksp0 = task_ksp0(task); \ unsigned long __ksp0 = task_ksp0(task); \
int __cpu = raw_smp_processor_id(); \ int __cpu = raw_smp_processor_id(); \
BUG_ON(__ksp0 & CPU_MASK_VALUE); \ BUG_ON(__ksp0 & MAX_CPU_ID); \
__ksp0 | __cpu; \ __ksp0 | __cpu; \
}) })
#endif
#if CONFIG_NR_CPUS > (MAX_CPU_ID + 1)
# error Too many cpus!
#endif
#endif /* _ASM_TILE_PROCESSOR_H */ #endif /* _ASM_TILE_PROCESSOR_H */
...@@ -33,12 +33,13 @@ typedef unsigned long pt_reg_t; ...@@ -33,12 +33,13 @@ typedef unsigned long pt_reg_t;
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define regs_return_value(regs) ((regs)->regs[0])
#define instruction_pointer(regs) ((regs)->pc) #define instruction_pointer(regs) ((regs)->pc)
#define profile_pc(regs) instruction_pointer(regs) #define profile_pc(regs) instruction_pointer(regs)
#define user_stack_pointer(regs) ((regs)->sp) #define user_stack_pointer(regs) ((regs)->sp)
/* Does the process account for user or for system time? */ /* Does the process account for user or for system time? */
#define user_mode(regs) (EX1_PL((regs)->ex1) == USER_PL) #define user_mode(regs) (EX1_PL((regs)->ex1) < KERNEL_PL)
/* Fill in a struct pt_regs with the current kernel registers. */ /* Fill in a struct pt_regs with the current kernel registers. */
struct pt_regs *get_pt_regs(struct pt_regs *); struct pt_regs *get_pt_regs(struct pt_regs *);
...@@ -79,8 +80,7 @@ extern void single_step_execve(void); ...@@ -79,8 +80,7 @@ extern void single_step_execve(void);
struct task_struct; struct task_struct;
extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs);
int error_code);
#ifdef __tilegx__ #ifdef __tilegx__
/* We need this since sigval_t has a user pointer in it, for GETSIGINFO etc. */ /* We need this since sigval_t has a user pointer in it, for GETSIGINFO etc. */
......
...@@ -25,10 +25,16 @@ extern char _sinitdata[], _einitdata[]; ...@@ -25,10 +25,16 @@ extern char _sinitdata[], _einitdata[];
/* Write-once data is writable only till the end of initialization. */ /* Write-once data is writable only till the end of initialization. */
extern char __w1data_begin[], __w1data_end[]; extern char __w1data_begin[], __w1data_end[];
extern char vdso_start[], vdso_end[];
#ifdef CONFIG_COMPAT
extern char vdso32_start[], vdso32_end[];
#endif
/* Not exactly sections, but PC comparison points in the code. */ /* Not exactly sections, but PC comparison points in the code. */
extern char __rt_sigreturn[], __rt_sigreturn_end[]; extern char __rt_sigreturn[], __rt_sigreturn_end[];
#ifndef __tilegx__ #ifdef __tilegx__
extern char __start_unalign_asm_code[], __end_unalign_asm_code[];
#else
extern char sys_cmpxchg[], __sys_cmpxchg_end[]; extern char sys_cmpxchg[], __sys_cmpxchg_end[];
extern char __sys_cmpxchg_grab_lock[]; extern char __sys_cmpxchg_grab_lock[];
extern char __start_atomic_asm_code[], __end_atomic_asm_code[]; extern char __start_atomic_asm_code[], __end_atomic_asm_code[];
......
...@@ -24,9 +24,8 @@ ...@@ -24,9 +24,8 @@
*/ */
#define MAXMEM_PFN PFN_DOWN(MAXMEM) #define MAXMEM_PFN PFN_DOWN(MAXMEM)
int tile_console_write(const char *buf, int count);
void early_panic(const char *fmt, ...); void early_panic(const char *fmt, ...);
void warn_early_printk(void);
void __init disable_early_printk(void);
/* Init-time routine to do tile-specific per-cpu setup. */ /* Init-time routine to do tile-specific per-cpu setup. */
void setup_cpu(int boot); void setup_cpu(int boot);
......
...@@ -101,10 +101,8 @@ void print_disabled_cpus(void); ...@@ -101,10 +101,8 @@ void print_disabled_cpus(void);
extern struct cpumask cpu_lotar_map; extern struct cpumask cpu_lotar_map;
#define cpu_is_valid_lotar(cpu) cpumask_test_cpu((cpu), &cpu_lotar_map) #define cpu_is_valid_lotar(cpu) cpumask_test_cpu((cpu), &cpu_lotar_map)
#if CHIP_HAS_CBOX_HOME_MAP()
/* Which processors are used for hash-for-home mapping */ /* Which processors are used for hash-for-home mapping */
extern struct cpumask hash_for_home_map; extern struct cpumask hash_for_home_map;
#endif
/* Which cpus can have their cache flushed by hv_flush_remote(). */ /* Which cpus can have their cache flushed by hv_flush_remote(). */
extern struct cpumask cpu_cacheable_map; extern struct cpumask cpu_cacheable_map;
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
* Return the "current" portion of a ticket lock value, * Return the "current" portion of a ticket lock value,
* i.e. the number that currently owns the lock. * i.e. the number that currently owns the lock.
*/ */
static inline int arch_spin_current(u32 val) static inline u32 arch_spin_current(u32 val)
{ {
return val >> __ARCH_SPIN_CURRENT_SHIFT; return val >> __ARCH_SPIN_CURRENT_SHIFT;
} }
...@@ -36,7 +36,7 @@ static inline int arch_spin_current(u32 val) ...@@ -36,7 +36,7 @@ static inline int arch_spin_current(u32 val)
* Return the "next" portion of a ticket lock value, * Return the "next" portion of a ticket lock value,
* i.e. the number that the next task to try to acquire the lock will get. * i.e. the number that the next task to try to acquire the lock will get.
*/ */
static inline int arch_spin_next(u32 val) static inline u32 arch_spin_next(u32 val)
{ {
return val & __ARCH_SPIN_NEXT_MASK; return val & __ARCH_SPIN_NEXT_MASK;
} }
......
...@@ -21,8 +21,10 @@ ...@@ -21,8 +21,10 @@
#define __HAVE_ARCH_MEMMOVE #define __HAVE_ARCH_MEMMOVE
#define __HAVE_ARCH_STRCHR #define __HAVE_ARCH_STRCHR
#define __HAVE_ARCH_STRLEN #define __HAVE_ARCH_STRLEN
#define __HAVE_ARCH_STRNLEN
extern __kernel_size_t strlen(const char *); extern __kernel_size_t strlen(const char *);
extern __kernel_size_t strnlen(const char *, __kernel_size_t);
extern char *strchr(const char *s, int c); extern char *strchr(const char *s, int c);
extern void *memchr(const void *s, int c, size_t n); extern void *memchr(const void *s, int c, size_t n);
extern void *memset(void *, int, __kernel_size_t); extern void *memset(void *, int, __kernel_size_t);
......
...@@ -39,6 +39,11 @@ struct thread_info { ...@@ -39,6 +39,11 @@ struct thread_info {
struct restart_block restart_block; struct restart_block restart_block;
struct single_step_state *step_state; /* single step state struct single_step_state *step_state; /* single step state
(if non-zero) */ (if non-zero) */
int align_ctl; /* controls unaligned access */
#ifdef __tilegx__
unsigned long unalign_jit_tmp[4]; /* temp r0..r3 storage */
void __user *unalign_jit_base; /* unalign fixup JIT base */
#endif
}; };
/* /*
...@@ -56,6 +61,7 @@ struct thread_info { ...@@ -56,6 +61,7 @@ struct thread_info {
.fn = do_no_restart_syscall, \ .fn = do_no_restart_syscall, \
}, \ }, \
.step_state = NULL, \ .step_state = NULL, \
.align_ctl = 0, \
} }
#define init_thread_info (init_thread_union.thread_info) #define init_thread_info (init_thread_union.thread_info)
......
...@@ -15,12 +15,13 @@ ...@@ -15,12 +15,13 @@
#ifndef _ASM_TILE_TRAPS_H #ifndef _ASM_TILE_TRAPS_H
#define _ASM_TILE_TRAPS_H #define _ASM_TILE_TRAPS_H
#ifndef __ASSEMBLY__
#include <arch/chip.h> #include <arch/chip.h>
/* mm/fault.c */ /* mm/fault.c */
void do_page_fault(struct pt_regs *, int fault_num, void do_page_fault(struct pt_regs *, int fault_num,
unsigned long address, unsigned long write); unsigned long address, unsigned long write);
#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() #if CHIP_HAS_TILE_DMA()
void do_async_page_fault(struct pt_regs *); void do_async_page_fault(struct pt_regs *);
#endif #endif
...@@ -69,6 +70,16 @@ void gx_singlestep_handle(struct pt_regs *, int fault_num); ...@@ -69,6 +70,16 @@ void gx_singlestep_handle(struct pt_regs *, int fault_num);
/* kernel/intvec_64.S */ /* kernel/intvec_64.S */
void fill_ra_stack(void); void fill_ra_stack(void);
/* Handle unalign data fixup. */
extern void do_unaligned(struct pt_regs *regs, int vecnum);
#endif
#endif /* __ASSEMBLY__ */
#ifdef __tilegx__
/* 128 byte JIT per unalign fixup. */
#define UNALIGN_JIT_SHIFT 7
#endif #endif
#endif /* _ASM_TILE_TRAPS_H */ #endif /* _ASM_TILE_TRAPS_H */
...@@ -127,8 +127,10 @@ extern int fixup_exception(struct pt_regs *regs); ...@@ -127,8 +127,10 @@ extern int fixup_exception(struct pt_regs *regs);
#ifdef __LP64__ #ifdef __LP64__
#define _ASM_PTR ".quad" #define _ASM_PTR ".quad"
#define _ASM_ALIGN ".align 8"
#else #else
#define _ASM_PTR ".long" #define _ASM_PTR ".long"
#define _ASM_ALIGN ".align 4"
#endif #endif
#define __get_user_asm(OP, x, ptr, ret) \ #define __get_user_asm(OP, x, ptr, ret) \
...@@ -137,6 +139,7 @@ extern int fixup_exception(struct pt_regs *regs); ...@@ -137,6 +139,7 @@ extern int fixup_exception(struct pt_regs *regs);
"0: { movei %1, 0; movei %0, %3 }\n" \ "0: { movei %1, 0; movei %0, %3 }\n" \
"j 9f\n" \ "j 9f\n" \
".section __ex_table,\"a\"\n" \ ".section __ex_table,\"a\"\n" \
_ASM_ALIGN "\n" \
_ASM_PTR " 1b, 0b\n" \ _ASM_PTR " 1b, 0b\n" \
".popsection\n" \ ".popsection\n" \
"9:" \ "9:" \
...@@ -168,6 +171,7 @@ extern int fixup_exception(struct pt_regs *regs); ...@@ -168,6 +171,7 @@ extern int fixup_exception(struct pt_regs *regs);
"0: { movei %1, 0; movei %2, 0 }\n" \ "0: { movei %1, 0; movei %2, 0 }\n" \
"{ movei %0, %4; j 9f }\n" \ "{ movei %0, %4; j 9f }\n" \
".section __ex_table,\"a\"\n" \ ".section __ex_table,\"a\"\n" \
".align 4\n" \
".word 1b, 0b\n" \ ".word 1b, 0b\n" \
".word 2b, 0b\n" \ ".word 2b, 0b\n" \
".popsection\n" \ ".popsection\n" \
...@@ -224,6 +228,7 @@ extern int __get_user_bad(void) ...@@ -224,6 +228,7 @@ extern int __get_user_bad(void)
".pushsection .fixup,\"ax\"\n" \ ".pushsection .fixup,\"ax\"\n" \
"0: { movei %0, %3; j 9f }\n" \ "0: { movei %0, %3; j 9f }\n" \
".section __ex_table,\"a\"\n" \ ".section __ex_table,\"a\"\n" \
_ASM_ALIGN "\n" \
_ASM_PTR " 1b, 0b\n" \ _ASM_PTR " 1b, 0b\n" \
".popsection\n" \ ".popsection\n" \
"9:" \ "9:" \
...@@ -248,6 +253,7 @@ extern int __get_user_bad(void) ...@@ -248,6 +253,7 @@ extern int __get_user_bad(void)
".pushsection .fixup,\"ax\"\n" \ ".pushsection .fixup,\"ax\"\n" \
"0: { movei %0, %4; j 9f }\n" \ "0: { movei %0, %4; j 9f }\n" \
".section __ex_table,\"a\"\n" \ ".section __ex_table,\"a\"\n" \
".align 4\n" \
".word 1b, 0b\n" \ ".word 1b, 0b\n" \
".word 2b, 0b\n" \ ".word 2b, 0b\n" \
".popsection\n" \ ".popsection\n" \
...@@ -566,37 +572,6 @@ static inline unsigned long __must_check flush_user( ...@@ -566,37 +572,6 @@ static inline unsigned long __must_check flush_user(
return len; return len;
} }
/**
* inv_user: - Invalidate a block of memory in user space from cache.
* @mem: Destination address, in user space.
* @len: Number of bytes to invalidate.
*
* Returns number of bytes that could not be invalidated.
* On success, this will be zero.
*
* Note that on Tile64, the "inv" operation is in fact a
* "flush and invalidate", so cache write-backs will occur prior
* to the cache being marked invalid.
*/
extern unsigned long inv_user_asm(void __user *mem, unsigned long len);
static inline unsigned long __must_check __inv_user(
void __user *mem, unsigned long len)
{
int retval;
might_fault();
retval = inv_user_asm(mem, len);
mb_incoherent();
return retval;
}
static inline unsigned long __must_check inv_user(
void __user *mem, unsigned long len)
{
if (access_ok(VERIFY_WRITE, mem, len))
return __inv_user(mem, len);
return len;
}
/** /**
* finv_user: - Flush-inval a block of memory in user space from cache. * finv_user: - Flush-inval a block of memory in user space from cache.
* @mem: Destination address, in user space. * @mem: Destination address, in user space.
......
...@@ -15,11 +15,15 @@ ...@@ -15,11 +15,15 @@
#ifndef _ASM_TILE_UNALIGNED_H #ifndef _ASM_TILE_UNALIGNED_H
#define _ASM_TILE_UNALIGNED_H #define _ASM_TILE_UNALIGNED_H
#include <linux/unaligned/le_struct.h> /*
#include <linux/unaligned/be_byteshift.h> * We could implement faster get_unaligned_[be/le]64 using the ldna
#include <linux/unaligned/generic.h> * instruction on tilegx; however, we need to either copy all of the
#define get_unaligned __get_unaligned_le * other generic functions to here (which is pretty ugly) or else
#define put_unaligned __put_unaligned_le * modify both the generic code and other arch code to allow arch
* specific unaligned data access functions. Given these functions
* are not often called, we'll stick with the generic version.
*/
#include <asm-generic/unaligned.h>
/* /*
* Is the kernel doing fixups of unaligned accesses? If <0, no kernel * Is the kernel doing fixups of unaligned accesses? If <0, no kernel
......
/*
* Copyright 2012 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef __TILE_VDSO_H__
#define __TILE_VDSO_H__
#include <linux/types.h>
/*
* Note about the vdso_data structure:
*
* NEVER USE THEM IN USERSPACE CODE DIRECTLY. The layout of the
* structure is supposed to be known only to the function in the vdso
* itself and may change without notice.
*/
struct vdso_data {
__u64 tz_update_count; /* Timezone atomicity ctr */
__u64 tb_update_count; /* Timebase atomicity ctr */
__u64 xtime_tod_stamp; /* TOD clock for xtime */
__u64 xtime_clock_sec; /* Kernel time second */
__u64 xtime_clock_nsec; /* Kernel time nanosecond */
__u64 wtom_clock_sec; /* Wall to monotonic clock second */
__u64 wtom_clock_nsec; /* Wall to monotonic clock nanosecond */
__u32 mult; /* Cycle to nanosecond multiplier */
__u32 shift; /* Cycle to nanosecond divisor (power of two) */
__u32 tz_minuteswest; /* Minutes west of Greenwich */
__u32 tz_dsttime; /* Type of dst correction */
};
extern struct vdso_data *vdso_data;
/* __vdso_rt_sigreturn is defined with the addresses in the vdso page. */
extern void __vdso_rt_sigreturn(void);
extern int setup_vdso_pages(void);
#endif /* __TILE_VDSO_H__ */
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#define GXIO_TRIO_OP_ALLOC_MEMORY_MAPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1404) #define GXIO_TRIO_OP_ALLOC_MEMORY_MAPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1404)
#define GXIO_TRIO_OP_ALLOC_SCATTER_QUEUES IORPC_OPCODE(IORPC_FORMAT_NONE, 0x140e)
#define GXIO_TRIO_OP_ALLOC_PIO_REGIONS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1412) #define GXIO_TRIO_OP_ALLOC_PIO_REGIONS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1412)
#define GXIO_TRIO_OP_INIT_PIO_REGION_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1414) #define GXIO_TRIO_OP_INIT_PIO_REGION_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1414)
...@@ -54,6 +55,10 @@ int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context, ...@@ -54,6 +55,10 @@ int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context,
unsigned int flags); unsigned int flags);
int gxio_trio_alloc_scatter_queues(gxio_trio_context_t * context,
unsigned int count, unsigned int first,
unsigned int flags);
int gxio_trio_alloc_pio_regions(gxio_trio_context_t * context, int gxio_trio_alloc_pio_regions(gxio_trio_context_t * context,
unsigned int count, unsigned int first, unsigned int count, unsigned int first,
unsigned int flags); unsigned int flags);
......
/*
* Copyright 2013 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
/* This file is machine-generated; DO NOT EDIT! */
#ifndef __GXIO_UART_LINUX_RPC_H__
#define __GXIO_UART_LINUX_RPC_H__
#include <hv/iorpc.h>
#include <hv/drv_uart_intf.h>
#include <gxio/uart.h>
#include <gxio/kiorpc.h>
#include <linux/string.h>
#include <linux/module.h>
#include <asm/pgtable.h>
#define GXIO_UART_OP_CFG_INTERRUPT IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1900)
#define GXIO_UART_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
#define GXIO_UART_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
int gxio_uart_cfg_interrupt(gxio_uart_context_t *context, int inter_x,
int inter_y, int inter_ipi, int inter_event);
int gxio_uart_get_mmio_base(gxio_uart_context_t *context, HV_PTE *base);
int gxio_uart_check_mmio_offset(gxio_uart_context_t *context,
unsigned long offset, unsigned long size);
#endif /* !__GXIO_UART_LINUX_RPC_H__ */
/*
* Copyright 2013 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _GXIO_UART_H_
#define _GXIO_UART_H_
#include "common.h"
#include <hv/drv_uart_intf.h>
#include <hv/iorpc.h>
/*
*
* An API for manipulating UART interface.
*/
/*
*
* The Rshim allows access to the processor's UART interface.
*/
/* A context object used to manage UART resources. */
typedef struct {
/* File descriptor for calling up to the hypervisor. */
int fd;
/* The VA at which our MMIO registers are mapped. */
char *mmio_base;
} gxio_uart_context_t;
/* Request UART interrupts.
*
* Request that interrupts be delivered to a tile when the UART's
* Receive FIFO is written, or the Write FIFO is read.
*
* @param context Pointer to a properly initialized gxio_uart_context_t.
* @param bind_cpu_x X coordinate of CPU to which interrupt will be delivered.
* @param bind_cpu_y Y coordinate of CPU to which interrupt will be delivered.
* @param bind_interrupt IPI interrupt number.
* @param bind_event Sub-interrupt event bit number; a negative value can
* disable the interrupt.
* @return Zero if all of the requested UART events were successfully
* configured to interrupt.
*/
extern int gxio_uart_cfg_interrupt(gxio_uart_context_t *context,
int bind_cpu_x,
int bind_cpu_y,
int bind_interrupt, int bind_event);
/* Initialize a UART context.
*
* A properly initialized context must be obtained before any of the other
* gxio_uart routines may be used.
*
* @param context Pointer to a gxio_uart_context_t, which will be initialized
* by this routine, if it succeeds.
* @param uart_index Index of the UART to use.
* @return Zero if the context was successfully initialized, else a
* GXIO_ERR_xxx error code.
*/
extern int gxio_uart_init(gxio_uart_context_t *context, int uart_index);
/* Destroy a UART context.
*
* Once destroyed, a context may not be used with any gxio_uart routines
* other than gxio_uart_init(). After this routine returns, no further
* interrupts requested on this context will be delivered. The state and
* configuration of the pins which had been attached to this context are
* unchanged by this operation.
*
* @param context Pointer to a gxio_uart_context_t.
* @return Zero if the context was successfully destroyed, else a
* GXIO_ERR_xxx error code.
*/
extern int gxio_uart_destroy(gxio_uart_context_t *context);
/* Write UART register.
* @param context Pointer to a gxio_uart_context_t.
* @param offset UART register offset.
* @param word Data will be wrote to UART reigister.
*/
extern void gxio_uart_write(gxio_uart_context_t *context, uint64_t offset,
uint64_t word);
/* Read UART register.
* @param context Pointer to a gxio_uart_context_t.
* @param offset UART register offset.
* @return Data read from UART register.
*/
extern uint64_t gxio_uart_read(gxio_uart_context_t *context, uint64_t offset);
#endif /* _GXIO_UART_H_ */
...@@ -64,8 +64,9 @@ struct pcie_port_property ...@@ -64,8 +64,9 @@ struct pcie_port_property
* will not consider it an error if the link comes up as a x8 link. */ * will not consider it an error if the link comes up as a x8 link. */
uint8_t allow_x8: 1; uint8_t allow_x8: 1;
/** Reserved. */ /** If true, this link is connected to a device which may or may not
uint8_t reserved: 1; * be present. */
uint8_t removable: 1;
}; };
...@@ -167,6 +168,9 @@ pcie_stream_intr_config_sel_t; ...@@ -167,6 +168,9 @@ pcie_stream_intr_config_sel_t;
struct pcie_trio_ports_property struct pcie_trio_ports_property
{ {
struct pcie_port_property ports[TILEGX_TRIO_PCIES]; struct pcie_port_property ports[TILEGX_TRIO_PCIES];
/** Set if this TRIO belongs to a Gx72 device. */
uint8_t is_gx72;
}; };
/* Flags indicating traffic class. */ /* Flags indicating traffic class. */
......
/*
* Copyright 2013 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
/**
* Interface definitions for the UART driver.
*/
#ifndef _SYS_HV_DRV_UART_INTF_H
#define _SYS_HV_DRV_UART_INTF_H
#include <arch/uart.h>
/** Number of UART ports supported. */
#define TILEGX_UART_NR 2
/** The mmap file offset (PA) of the UART MMIO region. */
#define HV_UART_MMIO_OFFSET 0
/** The maximum size of the UARTs MMIO region (64K Bytes). */
#define HV_UART_MMIO_SIZE (1UL << 16)
#endif /* _SYS_HV_DRV_UART_INTF_H */
...@@ -318,8 +318,11 @@ ...@@ -318,8 +318,11 @@
/** hv_set_pte_super_shift */ /** hv_set_pte_super_shift */
#define HV_DISPATCH_SET_PTE_SUPER_SHIFT 57 #define HV_DISPATCH_SET_PTE_SUPER_SHIFT 57
/** hv_console_set_ipi */
#define HV_DISPATCH_CONSOLE_SET_IPI 63
/** One more than the largest dispatch value */ /** One more than the largest dispatch value */
#define _HV_DISPATCH_END 58 #define _HV_DISPATCH_END 64
#ifndef __ASSEMBLER__ #ifndef __ASSEMBLER__
...@@ -541,14 +544,24 @@ typedef enum { ...@@ -541,14 +544,24 @@ typedef enum {
HV_CONFSTR_CPUMOD_REV = 18, HV_CONFSTR_CPUMOD_REV = 18,
/** Human-readable CPU module description. */ /** Human-readable CPU module description. */
HV_CONFSTR_CPUMOD_DESC = 19 HV_CONFSTR_CPUMOD_DESC = 19,
/** Per-tile hypervisor statistics. When this identifier is specified,
* the hv_confstr call takes two extra arguments. The first is the
* HV_XY_TO_LOTAR of the target tile's coordinates. The second is
* a flag word. The only current flag is the lowest bit, which means
* "zero out the stats instead of retrieving them"; in this case the
* buffer and buffer length are ignored. */
HV_CONFSTR_HV_STATS = 20
} HV_ConfstrQuery; } HV_ConfstrQuery;
/** Query a configuration string from the hypervisor. /** Query a configuration string from the hypervisor.
* *
* @param query Identifier for the specific string to be retrieved * @param query Identifier for the specific string to be retrieved
* (HV_CONFSTR_xxx). * (HV_CONFSTR_xxx). Some strings may require or permit extra
* arguments to be appended which select specific objects to be
* described; see the string descriptions above.
* @param buf Buffer in which to place the string. * @param buf Buffer in which to place the string.
* @param len Length of the buffer. * @param len Length of the buffer.
* @return If query is valid, then the length of the corresponding string, * @return If query is valid, then the length of the corresponding string,
...@@ -556,21 +569,16 @@ typedef enum { ...@@ -556,21 +569,16 @@ typedef enum {
* was truncated. If query is invalid, HV_EINVAL. If the specified * was truncated. If query is invalid, HV_EINVAL. If the specified
* buffer is not writable by the client, HV_EFAULT. * buffer is not writable by the client, HV_EFAULT.
*/ */
int hv_confstr(HV_ConfstrQuery query, HV_VirtAddr buf, int len); int hv_confstr(HV_ConfstrQuery query, HV_VirtAddr buf, int len, ...);
/** Tile coordinate */ /** Tile coordinate */
typedef struct typedef struct
{ {
#ifndef __BIG_ENDIAN__
/** X coordinate, relative to supervisor's top-left coordinate */ /** X coordinate, relative to supervisor's top-left coordinate */
int x; int x;
/** Y coordinate, relative to supervisor's top-left coordinate */ /** Y coordinate, relative to supervisor's top-left coordinate */
int y; int y;
#else
int y;
int x;
#endif
} HV_Coord; } HV_Coord;
...@@ -585,6 +593,30 @@ typedef struct ...@@ -585,6 +593,30 @@ typedef struct
*/ */
int hv_get_ipi_pte(HV_Coord tile, int pl, HV_PTE* pte); int hv_get_ipi_pte(HV_Coord tile, int pl, HV_PTE* pte);
/** Configure the console interrupt.
*
* When the console client interrupt is enabled, the hypervisor will
* deliver the specified IPI to the client in the following situations:
*
* - The console has at least one character available for input.
*
* - The console can accept new characters for output, and the last call
* to hv_console_write() did not write all of the characters requested
* by the client.
*
* Note that in some system configurations, console interrupt will not
* be available; clients should be prepared for this routine to fail and
* to fall back to periodic console polling in that case.
*
* @param ipi Index of the IPI register which will receive the interrupt.
* @param event IPI event number for console interrupt. If less than 0,
* disable the console IPI interrupt.
* @param coord Tile to be targeted for console interrupt.
* @return 0 on success, otherwise, HV_EINVAL if illegal parameter,
* HV_ENOTSUP if console interrupt are not available.
*/
int hv_console_set_ipi(int ipi, int event, HV_Coord coord);
#else /* !CHIP_HAS_IPI() */ #else /* !CHIP_HAS_IPI() */
/** A set of interrupts. */ /** A set of interrupts. */
...@@ -1092,13 +1124,8 @@ HV_VirtAddrRange hv_inquire_virtual(int idx); ...@@ -1092,13 +1124,8 @@ HV_VirtAddrRange hv_inquire_virtual(int idx);
/** A range of ASID values. */ /** A range of ASID values. */
typedef struct typedef struct
{ {
#ifndef __BIG_ENDIAN__
HV_ASID start; /**< First ASID in the range. */ HV_ASID start; /**< First ASID in the range. */
unsigned int size; /**< Number of ASIDs. Zero for an invalid range. */ unsigned int size; /**< Number of ASIDs. Zero for an invalid range. */
#else
unsigned int size; /**< Number of ASIDs. Zero for an invalid range. */
HV_ASID start; /**< First ASID in the range. */
#endif
} HV_ASIDRange; } HV_ASIDRange;
/** Returns information about a range of ASIDs. /** Returns information about a range of ASIDs.
...@@ -1422,7 +1449,6 @@ typedef enum ...@@ -1422,7 +1449,6 @@ typedef enum
/** Message recipient. */ /** Message recipient. */
typedef struct typedef struct
{ {
#ifndef __BIG_ENDIAN__
/** X coordinate, relative to supervisor's top-left coordinate */ /** X coordinate, relative to supervisor's top-left coordinate */
unsigned int x:11; unsigned int x:11;
...@@ -1431,11 +1457,6 @@ typedef struct ...@@ -1431,11 +1457,6 @@ typedef struct
/** Status of this recipient */ /** Status of this recipient */
HV_Recip_State state:10; HV_Recip_State state:10;
#else //__BIG_ENDIAN__
HV_Recip_State state:10;
unsigned int y:11;
unsigned int x:11;
#endif
} HV_Recipient; } HV_Recipient;
/** Send a message to a set of recipients. /** Send a message to a set of recipients.
......
# UAPI Header export list # UAPI Header export list
header-y += abi.h header-y += abi.h
header-y += chip.h header-y += chip.h
header-y += chip_tile64.h
header-y += chip_tilegx.h header-y += chip_tilegx.h
header-y += chip_tilepro.h header-y += chip_tilepro.h
header-y += icache.h header-y += icache.h
......
...@@ -12,9 +12,7 @@ ...@@ -12,9 +12,7 @@
* more details. * more details.
*/ */
#if __tile_chip__ == 0 #if __tile_chip__ == 1
#include <arch/chip_tile64.h>
#elif __tile_chip__ == 1
#include <arch/chip_tilepro.h> #include <arch/chip_tilepro.h>
#elif defined(__tilegx__) #elif defined(__tilegx__)
#include <arch/chip_tilegx.h> #include <arch/chip_tilegx.h>
......
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
/*
* @file
* Global header file.
* This header file specifies defines for TILE64.
*/
#ifndef __ARCH_CHIP_H__
#define __ARCH_CHIP_H__
/** Specify chip version.
* When possible, prefer the CHIP_xxx symbols below for future-proofing.
* This is intended for cross-compiling; native compilation should
* use the predefined __tile_chip__ symbol.
*/
#define TILE_CHIP 0
/** Specify chip revision.
* This provides for the case of a respin of a particular chip type;
* the normal value for this symbol is "0".
* This is intended for cross-compiling; native compilation should
* use the predefined __tile_chip_rev__ symbol.
*/
#define TILE_CHIP_REV 0
/** The name of this architecture. */
#define CHIP_ARCH_NAME "tile64"
/** The ELF e_machine type for binaries for this chip. */
#define CHIP_ELF_TYPE() EM_TILE64
/** The alternate ELF e_machine type for binaries for this chip. */
#define CHIP_COMPAT_ELF_TYPE() 0x2506
/** What is the native word size of the machine? */
#define CHIP_WORD_SIZE() 32
/** How many bits of a virtual address are used. Extra bits must be
* the sign extension of the low bits.
*/
#define CHIP_VA_WIDTH() 32
/** How many bits are in a physical address? */
#define CHIP_PA_WIDTH() 36
/** Size of the L2 cache, in bytes. */
#define CHIP_L2_CACHE_SIZE() 65536
/** Log size of an L2 cache line in bytes. */
#define CHIP_L2_LOG_LINE_SIZE() 6
/** Size of an L2 cache line, in bytes. */
#define CHIP_L2_LINE_SIZE() (1 << CHIP_L2_LOG_LINE_SIZE())
/** Associativity of the L2 cache. */
#define CHIP_L2_ASSOC() 2
/** Size of the L1 data cache, in bytes. */
#define CHIP_L1D_CACHE_SIZE() 8192
/** Log size of an L1 data cache line in bytes. */
#define CHIP_L1D_LOG_LINE_SIZE() 4
/** Size of an L1 data cache line, in bytes. */
#define CHIP_L1D_LINE_SIZE() (1 << CHIP_L1D_LOG_LINE_SIZE())
/** Associativity of the L1 data cache. */
#define CHIP_L1D_ASSOC() 2
/** Size of the L1 instruction cache, in bytes. */
#define CHIP_L1I_CACHE_SIZE() 8192
/** Log size of an L1 instruction cache line in bytes. */
#define CHIP_L1I_LOG_LINE_SIZE() 6
/** Size of an L1 instruction cache line, in bytes. */
#define CHIP_L1I_LINE_SIZE() (1 << CHIP_L1I_LOG_LINE_SIZE())
/** Associativity of the L1 instruction cache. */
#define CHIP_L1I_ASSOC() 1
/** Stride with which flush instructions must be issued. */
#define CHIP_FLUSH_STRIDE() CHIP_L2_LINE_SIZE()
/** Stride with which inv instructions must be issued. */
#define CHIP_INV_STRIDE() CHIP_L1D_LINE_SIZE()
/** Stride with which finv instructions must be issued. */
#define CHIP_FINV_STRIDE() CHIP_L1D_LINE_SIZE()
/** Can the local cache coherently cache data that is homed elsewhere? */
#define CHIP_HAS_COHERENT_LOCAL_CACHE() 0
/** How many simultaneous outstanding victims can the L2 cache have? */
#define CHIP_MAX_OUTSTANDING_VICTIMS() 2
/** Does the TLB support the NC and NOALLOC bits? */
#define CHIP_HAS_NC_AND_NOALLOC_BITS() 0
/** Does the chip support hash-for-home caching? */
#define CHIP_HAS_CBOX_HOME_MAP() 0
/** Number of entries in the chip's home map tables. */
/* #define CHIP_CBOX_HOME_MAP_SIZE() -- does not apply to chip 0 */
/** Do uncacheable requests miss in the cache regardless of whether
* there is matching data? */
#define CHIP_HAS_ENFORCED_UNCACHEABLE_REQUESTS() 0
/** Does the mf instruction wait for victims? */
#define CHIP_HAS_MF_WAITS_FOR_VICTIMS() 1
/** Does the chip have an "inv" instruction that doesn't also flush? */
#define CHIP_HAS_INV() 0
/** Does the chip have a "wh64" instruction? */
#define CHIP_HAS_WH64() 0
/** Does this chip have a 'dword_align' instruction? */
#define CHIP_HAS_DWORD_ALIGN() 0
/** Number of performance counters. */
#define CHIP_PERFORMANCE_COUNTERS() 2
/** Does this chip have auxiliary performance counters? */
#define CHIP_HAS_AUX_PERF_COUNTERS() 0
/** Is the CBOX_MSR1 SPR supported? */
#define CHIP_HAS_CBOX_MSR1() 0
/** Is the TILE_RTF_HWM SPR supported? */
#define CHIP_HAS_TILE_RTF_HWM() 0
/** Is the TILE_WRITE_PENDING SPR supported? */
#define CHIP_HAS_TILE_WRITE_PENDING() 0
/** Is the PROC_STATUS SPR supported? */
#define CHIP_HAS_PROC_STATUS_SPR() 0
/** Is the DSTREAM_PF SPR supported? */
#define CHIP_HAS_DSTREAM_PF() 0
/** Log of the number of mshims we have. */
#define CHIP_LOG_NUM_MSHIMS() 2
/** Are the bases of the interrupt vector areas fixed? */
#define CHIP_HAS_FIXED_INTVEC_BASE() 1
/** Are the interrupt masks split up into 2 SPRs? */
#define CHIP_HAS_SPLIT_INTR_MASK() 1
/** Is the cycle count split up into 2 SPRs? */
#define CHIP_HAS_SPLIT_CYCLE() 1
/** Does the chip have a static network? */
#define CHIP_HAS_SN() 1
/** Does the chip have a static network processor? */
#define CHIP_HAS_SN_PROC() 1
/** Size of the L1 static network processor instruction cache, in bytes. */
#define CHIP_L1SNI_CACHE_SIZE() 2048
/** Does the chip have DMA support in each tile? */
#define CHIP_HAS_TILE_DMA() 1
/** Does the chip have the second revision of the directly accessible
* dynamic networks? This encapsulates a number of characteristics,
* including the absence of the catch-all, the absence of inline message
* tags, the absence of support for network context-switching, and so on.
*/
#define CHIP_HAS_REV1_XDN() 0
/** Does the chip have cmpexch and similar (fetchadd, exch, etc.)? */
#define CHIP_HAS_CMPEXCH() 0
/** Does the chip have memory-mapped I/O support? */
#define CHIP_HAS_MMIO() 0
/** Does the chip have post-completion interrupts? */
#define CHIP_HAS_POST_COMPLETION_INTERRUPTS() 0
/** Does the chip have native single step support? */
#define CHIP_HAS_SINGLE_STEP() 0
#ifndef __OPEN_SOURCE__ /* features only relevant to hypervisor-level code */
/** How many entries are present in the instruction TLB? */
#define CHIP_ITLB_ENTRIES() 8
/** How many entries are present in the data TLB? */
#define CHIP_DTLB_ENTRIES() 16
/** How many MAF entries does the XAUI shim have? */
#define CHIP_XAUI_MAF_ENTRIES() 16
/** Does the memory shim have a source-id table? */
#define CHIP_HAS_MSHIM_SRCID_TABLE() 1
/** Does the L1 instruction cache clear on reset? */
#define CHIP_HAS_L1I_CLEAR_ON_RESET() 0
/** Does the chip come out of reset with valid coordinates on all tiles?
* Note that if defined, this also implies that the upper left is 1,1.
*/
#define CHIP_HAS_VALID_TILE_COORD_RESET() 0
/** Does the chip have unified packet formats? */
#define CHIP_HAS_UNIFIED_PACKET_FORMATS() 0
/** Does the chip support write reordering? */
#define CHIP_HAS_WRITE_REORDERING() 0
/** Does the chip support Y-X routing as well as X-Y? */
#define CHIP_HAS_Y_X_ROUTING() 0
/** Is INTCTRL_3 managed with the correct MPL? */
#define CHIP_HAS_INTCTRL_3_STATUS_FIX() 0
/** Is it possible to configure the chip to be big-endian? */
#define CHIP_HAS_BIG_ENDIAN_CONFIG() 0
/** Is the CACHE_RED_WAY_OVERRIDDEN SPR supported? */
#define CHIP_HAS_CACHE_RED_WAY_OVERRIDDEN() 0
/** Is the DIAG_TRACE_WAY SPR supported? */
#define CHIP_HAS_DIAG_TRACE_WAY() 0
/** Is the MEM_STRIPE_CONFIG SPR supported? */
#define CHIP_HAS_MEM_STRIPE_CONFIG() 0
/** Are the TLB_PERF SPRs supported? */
#define CHIP_HAS_TLB_PERF() 0
/** Is the VDN_SNOOP_SHIM_CTL SPR supported? */
#define CHIP_HAS_VDN_SNOOP_SHIM_CTL() 0
/** Does the chip support rev1 DMA packets? */
#define CHIP_HAS_REV1_DMA_PACKETS() 0
/** Does the chip have an IPI shim? */
#define CHIP_HAS_IPI() 0
#endif /* !__OPEN_SOURCE__ */
#endif /* __ARCH_CHIP_H__ */
...@@ -61,6 +61,7 @@ typedef tilegx_bundle_bits tile_bundle_bits; ...@@ -61,6 +61,7 @@ typedef tilegx_bundle_bits tile_bundle_bits;
#define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEGX_BUNDLE_ALIGNMENT_IN_BYTES #define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEGX_BUNDLE_ALIGNMENT_IN_BYTES
#define TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES \ #define TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES \
TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES
#define TILE_BPT_BUNDLE TILEGX_BPT_BUNDLE
/* 64-bit pattern for a { bpt ; nop } bundle. */ /* 64-bit pattern for a { bpt ; nop } bundle. */
#define TILEGX_BPT_BUNDLE 0x286a44ae51485000ULL #define TILEGX_BPT_BUNDLE 0x286a44ae51485000ULL
......
...@@ -71,6 +71,7 @@ typedef tilepro_bundle_bits tile_bundle_bits; ...@@ -71,6 +71,7 @@ typedef tilepro_bundle_bits tile_bundle_bits;
#define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEPRO_BUNDLE_ALIGNMENT_IN_BYTES #define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEPRO_BUNDLE_ALIGNMENT_IN_BYTES
#define TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES \ #define TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES \
TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES
#define TILE_BPT_BUNDLE TILEPRO_BPT_BUNDLE
/* 64-bit pattern for a { bpt ; nop } bundle. */ /* 64-bit pattern for a { bpt ; nop } bundle. */
#define TILEPRO_BPT_BUNDLE 0x400b3cae70166000ULL #define TILEPRO_BPT_BUNDLE 0x400b3cae70166000ULL
......
...@@ -200,8 +200,6 @@ ...@@ -200,8 +200,6 @@
#define SPR_SIM_CONTROL 0x4e0c #define SPR_SIM_CONTROL 0x4e0c
#define SPR_SNCTL 0x0805 #define SPR_SNCTL 0x0805
#define SPR_SNCTL__FRZFABRIC_MASK 0x1 #define SPR_SNCTL__FRZFABRIC_MASK 0x1
#define SPR_SNCTL__FRZPROC_MASK 0x2
#define SPR_SNPC 0x080b
#define SPR_SNSTATIC 0x080c #define SPR_SNSTATIC 0x080c
#define SPR_SYSTEM_SAVE_0_0 0x4b00 #define SPR_SYSTEM_SAVE_0_0 0x4b00
#define SPR_SYSTEM_SAVE_0_1 0x4b01 #define SPR_SYSTEM_SAVE_0_1 0x4b01
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#ifndef _ASM_TILE_AUXVEC_H #ifndef _ASM_TILE_AUXVEC_H
#define _ASM_TILE_AUXVEC_H #define _ASM_TILE_AUXVEC_H
/* No extensions to auxvec */ /* The vDSO location. */
#define AT_SYSINFO_EHDR 33
#endif /* _ASM_TILE_AUXVEC_H */ #endif /* _ASM_TILE_AUXVEC_H */
...@@ -29,8 +29,8 @@ ...@@ -29,8 +29,8 @@
* to honor the arguments at some point.) * to honor the arguments at some point.)
* *
* Flush and invalidation of memory can normally be performed with the * Flush and invalidation of memory can normally be performed with the
* __insn_flush(), __insn_inv(), and __insn_finv() instructions from * __insn_flush() and __insn_finv() instructions from userspace.
* userspace. The DCACHE option to the system call allows userspace * The DCACHE option to the system call allows userspace
* to flush the entire L1+L2 data cache from the core. In this case, * to flush the entire L1+L2 data cache from the core. In this case,
* the address and length arguments are not used. The DCACHE flush is * the address and length arguments are not used. The DCACHE flush is
* restricted to the current core, not all cores in the address space. * restricted to the current core, not all cores in the address space.
......
...@@ -3,11 +3,17 @@ ...@@ -3,11 +3,17 @@
# #
extra-y := vmlinux.lds head_$(BITS).o extra-y := vmlinux.lds head_$(BITS).o
obj-y := backtrace.o entry.o irq.o messaging.o \ obj-y := backtrace.o entry.o hvglue.o irq.o messaging.o \
pci-dma.o proc.o process.o ptrace.o reboot.o \ pci-dma.o proc.o process.o ptrace.o reboot.o \
setup.o signal.o single_step.o stack.o sys.o sysfs.o time.o traps.o \ setup.o signal.o single_step.o stack.o sys.o \
sysfs.o time.o traps.o unaligned.o vdso.o \
intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_early_printk.o = -pg
endif
obj-$(CONFIG_HARDWALL) += hardwall.o obj-$(CONFIG_HARDWALL) += hardwall.o
obj-$(CONFIG_COMPAT) += compat.o compat_signal.o obj-$(CONFIG_COMPAT) += compat.o compat_signal.o
obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o
...@@ -20,3 +26,9 @@ else ...@@ -20,3 +26,9 @@ else
obj-$(CONFIG_PCI) += pci.o obj-$(CONFIG_PCI) += pci.o
endif endif
obj-$(CONFIG_TILE_USB) += usb.o obj-$(CONFIG_TILE_USB) += usb.o
obj-$(CONFIG_TILE_HVGLUE_TRACE) += hvglue_trace.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount_64.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-y += vdso/
...@@ -14,13 +14,6 @@ ...@@ -14,13 +14,6 @@
* Generates definitions from c-type structures used by assembly sources. * Generates definitions from c-type structures used by assembly sources.
*/ */
#include <linux/kbuild.h>
#include <linux/thread_info.h>
#include <linux/sched.h>
#include <linux/hardirq.h>
#include <linux/ptrace.h>
#include <hv/hypervisor.h>
/* Check for compatible compiler early in the build. */ /* Check for compatible compiler early in the build. */
#ifdef CONFIG_TILEGX #ifdef CONFIG_TILEGX
# ifndef __tilegx__ # ifndef __tilegx__
...@@ -31,46 +24,61 @@ ...@@ -31,46 +24,61 @@
# endif # endif
#else #else
# ifdef __tilegx__ # ifdef __tilegx__
# error Can not build TILEPro/TILE64 configurations with tilegx compiler # error Can not build TILEPro configurations with tilegx compiler
# endif # endif
#endif #endif
#include <linux/kbuild.h>
#include <linux/thread_info.h>
#include <linux/sched.h>
#include <linux/hardirq.h>
#include <linux/ptrace.h>
#include <hv/hypervisor.h>
void foo(void) void foo(void)
{ {
DEFINE(SINGLESTEP_STATE_BUFFER_OFFSET, \ DEFINE(SINGLESTEP_STATE_BUFFER_OFFSET,
offsetof(struct single_step_state, buffer)); offsetof(struct single_step_state, buffer));
DEFINE(SINGLESTEP_STATE_FLAGS_OFFSET, \ DEFINE(SINGLESTEP_STATE_FLAGS_OFFSET,
offsetof(struct single_step_state, flags)); offsetof(struct single_step_state, flags));
DEFINE(SINGLESTEP_STATE_ORIG_PC_OFFSET, \ DEFINE(SINGLESTEP_STATE_ORIG_PC_OFFSET,
offsetof(struct single_step_state, orig_pc)); offsetof(struct single_step_state, orig_pc));
DEFINE(SINGLESTEP_STATE_NEXT_PC_OFFSET, \ DEFINE(SINGLESTEP_STATE_NEXT_PC_OFFSET,
offsetof(struct single_step_state, next_pc)); offsetof(struct single_step_state, next_pc));
DEFINE(SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET, \ DEFINE(SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET,
offsetof(struct single_step_state, branch_next_pc)); offsetof(struct single_step_state, branch_next_pc));
DEFINE(SINGLESTEP_STATE_UPDATE_VALUE_OFFSET, \ DEFINE(SINGLESTEP_STATE_UPDATE_VALUE_OFFSET,
offsetof(struct single_step_state, update_value)); offsetof(struct single_step_state, update_value));
DEFINE(THREAD_INFO_TASK_OFFSET, \ DEFINE(THREAD_INFO_TASK_OFFSET,
offsetof(struct thread_info, task)); offsetof(struct thread_info, task));
DEFINE(THREAD_INFO_FLAGS_OFFSET, \ DEFINE(THREAD_INFO_FLAGS_OFFSET,
offsetof(struct thread_info, flags)); offsetof(struct thread_info, flags));
DEFINE(THREAD_INFO_STATUS_OFFSET, \ DEFINE(THREAD_INFO_STATUS_OFFSET,
offsetof(struct thread_info, status)); offsetof(struct thread_info, status));
DEFINE(THREAD_INFO_HOMECACHE_CPU_OFFSET, \ DEFINE(THREAD_INFO_HOMECACHE_CPU_OFFSET,
offsetof(struct thread_info, homecache_cpu)); offsetof(struct thread_info, homecache_cpu));
DEFINE(THREAD_INFO_STEP_STATE_OFFSET, \ DEFINE(THREAD_INFO_PREEMPT_COUNT_OFFSET,
offsetof(struct thread_info, preempt_count));
DEFINE(THREAD_INFO_STEP_STATE_OFFSET,
offsetof(struct thread_info, step_state)); offsetof(struct thread_info, step_state));
#ifdef __tilegx__
DEFINE(THREAD_INFO_UNALIGN_JIT_BASE_OFFSET,
offsetof(struct thread_info, unalign_jit_base));
DEFINE(THREAD_INFO_UNALIGN_JIT_TMP_OFFSET,
offsetof(struct thread_info, unalign_jit_tmp));
#endif
DEFINE(TASK_STRUCT_THREAD_KSP_OFFSET, DEFINE(TASK_STRUCT_THREAD_KSP_OFFSET,
offsetof(struct task_struct, thread.ksp)); offsetof(struct task_struct, thread.ksp));
DEFINE(TASK_STRUCT_THREAD_PC_OFFSET, DEFINE(TASK_STRUCT_THREAD_PC_OFFSET,
offsetof(struct task_struct, thread.pc)); offsetof(struct task_struct, thread.pc));
DEFINE(HV_TOPOLOGY_WIDTH_OFFSET, \ DEFINE(HV_TOPOLOGY_WIDTH_OFFSET,
offsetof(HV_Topology, width)); offsetof(HV_Topology, width));
DEFINE(HV_TOPOLOGY_HEIGHT_OFFSET, \ DEFINE(HV_TOPOLOGY_HEIGHT_OFFSET,
offsetof(HV_Topology, height)); offsetof(HV_Topology, height));
DEFINE(IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET, \ DEFINE(IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET,
offsetof(irq_cpustat_t, irq_syscall_count)); offsetof(irq_cpustat_t, irq_syscall_count));
} }
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <asm/ucontext.h> #include <asm/ucontext.h>
#include <asm/sigframe.h> #include <asm/sigframe.h>
#include <asm/syscalls.h> #include <asm/syscalls.h>
#include <asm/vdso.h>
#include <arch/interrupts.h> #include <arch/interrupts.h>
struct compat_ucontext { struct compat_ucontext {
...@@ -227,7 +228,7 @@ int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -227,7 +228,7 @@ int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
if (err) if (err)
goto give_sigsegv; goto give_sigsegv;
restorer = VDSO_BASE; restorer = VDSO_SYM(&__vdso_rt_sigreturn);
if (ka->sa.sa_flags & SA_RESTORER) if (ka->sa.sa_flags & SA_RESTORER)
restorer = ptr_to_compat_reg(ka->sa.sa_restorer); restorer = ptr_to_compat_reg(ka->sa.sa_restorer);
......
...@@ -23,19 +23,24 @@ ...@@ -23,19 +23,24 @@
static void early_hv_write(struct console *con, const char *s, unsigned n) static void early_hv_write(struct console *con, const char *s, unsigned n)
{ {
hv_console_write((HV_VirtAddr) s, n); tile_console_write(s, n);
/*
* Convert NL to NLCR (close enough to CRNL) during early boot.
* We assume newlines are at the ends of strings, which turns out
* to be good enough for early boot console output.
*/
if (n && s[n-1] == '\n')
tile_console_write("\r", 1);
} }
static struct console early_hv_console = { static struct console early_hv_console = {
.name = "earlyhv", .name = "earlyhv",
.write = early_hv_write, .write = early_hv_write,
.flags = CON_PRINTBUFFER, .flags = CON_PRINTBUFFER | CON_BOOT,
.index = -1, .index = -1,
}; };
/* Direct interface for emergencies */
static int early_console_complete;
void early_panic(const char *fmt, ...) void early_panic(const char *fmt, ...)
{ {
va_list ap; va_list ap;
...@@ -43,51 +48,21 @@ void early_panic(const char *fmt, ...) ...@@ -43,51 +48,21 @@ void early_panic(const char *fmt, ...)
va_start(ap, fmt); va_start(ap, fmt);
early_printk("Kernel panic - not syncing: "); early_printk("Kernel panic - not syncing: ");
early_vprintk(fmt, ap); early_vprintk(fmt, ap);
early_console->write(early_console, "\n", 1); early_printk("\n");
va_end(ap); va_end(ap);
dump_stack(); dump_stack();
hv_halt(); hv_halt();
} }
static int __initdata keep_early;
static int __init setup_early_printk(char *str) static int __init setup_early_printk(char *str)
{ {
if (early_console) if (early_console)
return 1; return 1;
if (str != NULL && strncmp(str, "keep", 4) == 0)
keep_early = 1;
early_console = &early_hv_console; early_console = &early_hv_console;
register_console(early_console); register_console(early_console);
return 0; return 0;
} }
void __init disable_early_printk(void)
{
early_console_complete = 1;
if (!early_console)
return;
if (!keep_early) {
early_printk("disabling early console\n");
unregister_console(early_console);
early_console = NULL;
} else {
early_printk("keeping early console\n");
}
}
void warn_early_printk(void)
{
if (early_console_complete || early_console)
return;
early_printk("\
Machine shutting down before console output is fully initialized.\n\
You may wish to reboot and add the option 'earlyprintk' to your\n\
boot command line to see any diagnostic early console output.\n\
");
}
early_param("earlyprintk", setup_early_printk); early_param("earlyprintk", setup_early_printk);
...@@ -27,22 +27,6 @@ STD_ENTRY(current_text_addr) ...@@ -27,22 +27,6 @@ STD_ENTRY(current_text_addr)
{ move r0, lr; jrp lr } { move r0, lr; jrp lr }
STD_ENDPROC(current_text_addr) STD_ENDPROC(current_text_addr)
/*
* We don't run this function directly, but instead copy it to a page
* we map into every user process. See vdso_setup().
*
* Note that libc has a copy of this function that it uses to compare
* against the PC when a stack backtrace ends, so if this code is
* changed, the libc implementation(s) should also be updated.
*/
.pushsection .data
ENTRY(__rt_sigreturn)
moveli TREG_SYSCALL_NR_NAME,__NR_rt_sigreturn
swint1
ENDPROC(__rt_sigreturn)
ENTRY(__rt_sigreturn_end)
.popsection
STD_ENTRY(dump_stack) STD_ENTRY(dump_stack)
{ move r2, lr; lnk r1 } { move r2, lr; lnk r1 }
{ move r4, r52; addli r1, r1, dump_stack - . } { move r4, r52; addli r1, r1, dump_stack - . }
......
/*
* Copyright 2012 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* TILE-Gx specific ftrace support
*/
#include <linux/ftrace.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/ftrace.h>
#include <asm/sections.h>
#include <arch/opcode.h>
#ifdef CONFIG_DYNAMIC_FTRACE
static inline tilegx_bundle_bits NOP(void)
{
return create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
create_Opcode_X0(RRR_0_OPCODE_X0) |
create_UnaryOpcodeExtension_X1(NOP_UNARY_OPCODE_X1) |
create_RRROpcodeExtension_X1(UNARY_RRR_0_OPCODE_X1) |
create_Opcode_X1(RRR_0_OPCODE_X1);
}
static int machine_stopped __read_mostly;
int ftrace_arch_code_modify_prepare(void)
{
machine_stopped = 1;
return 0;
}
int ftrace_arch_code_modify_post_process(void)
{
flush_icache_range(0, CHIP_L1I_CACHE_SIZE());
machine_stopped = 0;
return 0;
}
/*
* Put { move r10, lr; jal ftrace_caller } in a bundle, this lets dynamic
* tracer just add one cycle overhead to every kernel function when disabled.
*/
static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
bool link)
{
tilegx_bundle_bits opcode_x0, opcode_x1;
long pcrel_by_instr = (addr - pc) >> TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES;
if (link) {
/* opcode: jal addr */
opcode_x1 =
create_Opcode_X1(JUMP_OPCODE_X1) |
create_JumpOpcodeExtension_X1(JAL_JUMP_OPCODE_X1) |
create_JumpOff_X1(pcrel_by_instr);
} else {
/* opcode: j addr */
opcode_x1 =
create_Opcode_X1(JUMP_OPCODE_X1) |
create_JumpOpcodeExtension_X1(J_JUMP_OPCODE_X1) |
create_JumpOff_X1(pcrel_by_instr);
}
if (addr == FTRACE_ADDR) {
/* opcode: or r10, lr, zero */
opcode_x0 =
create_Dest_X0(10) |
create_SrcA_X0(TREG_LR) |
create_SrcB_X0(TREG_ZERO) |
create_RRROpcodeExtension_X0(OR_RRR_0_OPCODE_X0) |
create_Opcode_X0(RRR_0_OPCODE_X0);
} else {
/* opcode: fnop */
opcode_x0 =
create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
create_Opcode_X0(RRR_0_OPCODE_X0);
}
return opcode_x1 | opcode_x0;
}
static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
{
return NOP();
}
static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
{
return ftrace_gen_branch(pc, addr, true);
}
static int ftrace_modify_code(unsigned long pc, unsigned long old,
unsigned long new)
{
unsigned long pc_wr;
/* Check if the address is in kernel text space and module space. */
if (!kernel_text_address(pc))
return -EINVAL;
/* Operate on writable kernel text mapping. */
pc_wr = pc - MEM_SV_START + PAGE_OFFSET;
if (probe_kernel_write((void *)pc_wr, &new, MCOUNT_INSN_SIZE))
return -EPERM;
smp_wmb();
if (!machine_stopped && num_online_cpus() > 1)
flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
return 0;
}
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long pc, old;
unsigned long new;
int ret;
pc = (unsigned long)&ftrace_call;
memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE);
new = ftrace_call_replace(pc, (unsigned long)func);
ret = ftrace_modify_code(pc, old, new);
return ret;
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long new, old;
unsigned long ip = rec->ip;
old = ftrace_nop_replace(rec);
new = ftrace_call_replace(ip, addr);
return ftrace_modify_code(rec->ip, old, new);
}
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long ip = rec->ip;
unsigned long old;
unsigned long new;
int ret;
old = ftrace_call_replace(ip, addr);
new = ftrace_nop_replace(rec);
ret = ftrace_modify_code(ip, old, new);
return ret;
}
int __init ftrace_dyn_arch_init(void *data)
{
*(unsigned long *)data = 0;
return 0;
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long frame_pointer)
{
unsigned long return_hooker = (unsigned long) &return_to_handler;
struct ftrace_graph_ent trace;
unsigned long old;
int err;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
old = *parent;
*parent = return_hooker;
err = ftrace_push_return_trace(old, self_addr, &trace.depth,
frame_pointer);
if (err == -EBUSY) {
*parent = old;
return;
}
trace.func = self_addr;
/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace)) {
current->curr_ret_stack--;
*parent = old;
}
}
#ifdef CONFIG_DYNAMIC_FTRACE
extern unsigned long ftrace_graph_call;
static int __ftrace_modify_caller(unsigned long *callsite,
void (*func) (void), bool enable)
{
unsigned long caller_fn = (unsigned long) func;
unsigned long pc = (unsigned long) callsite;
unsigned long branch = ftrace_gen_branch(pc, caller_fn, false);
unsigned long nop = NOP();
unsigned long old = enable ? nop : branch;
unsigned long new = enable ? branch : nop;
return ftrace_modify_code(pc, old, new);
}
static int ftrace_modify_graph_caller(bool enable)
{
int ret;
ret = __ftrace_modify_caller(&ftrace_graph_call,
ftrace_graph_caller,
enable);
return ret;
}
int ftrace_enable_ftrace_graph_caller(void)
{
return ftrace_modify_graph_caller(true);
}
int ftrace_disable_ftrace_graph_caller(void)
{
return ftrace_modify_graph_caller(false);
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
...@@ -272,9 +272,9 @@ static void hardwall_setup_func(void *info) ...@@ -272,9 +272,9 @@ static void hardwall_setup_func(void *info)
struct hardwall_info *r = info; struct hardwall_info *r = info;
struct hardwall_type *hwt = r->type; struct hardwall_type *hwt = r->type;
int cpu = smp_processor_id(); int cpu = smp_processor_id(); /* on_each_cpu disables preemption */
int x = cpu % smp_width; int x = cpu_x(cpu);
int y = cpu / smp_width; int y = cpu_y(cpu);
int bits = 0; int bits = 0;
if (x == r->ulhc_x) if (x == r->ulhc_x)
bits |= W_PROTECT; bits |= W_PROTECT;
...@@ -317,6 +317,7 @@ static void hardwall_protect_rectangle(struct hardwall_info *r) ...@@ -317,6 +317,7 @@ static void hardwall_protect_rectangle(struct hardwall_info *r)
on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1); on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1);
} }
/* Entered from INT_xDN_FIREWALL interrupt vector with irqs disabled. */
void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num) void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
{ {
struct hardwall_info *rect; struct hardwall_info *rect;
...@@ -325,7 +326,6 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num) ...@@ -325,7 +326,6 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
struct siginfo info; struct siginfo info;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
int found_processes; int found_processes;
unsigned long flags;
struct pt_regs *old_regs = set_irq_regs(regs); struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter(); irq_enter();
...@@ -346,7 +346,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num) ...@@ -346,7 +346,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
BUG_ON(hwt->disabled); BUG_ON(hwt->disabled);
/* This tile trapped a network access; find the rectangle. */ /* This tile trapped a network access; find the rectangle. */
spin_lock_irqsave(&hwt->lock, flags); spin_lock(&hwt->lock);
list_for_each_entry(rect, &hwt->list, list) { list_for_each_entry(rect, &hwt->list, list) {
if (cpumask_test_cpu(cpu, &rect->cpumask)) if (cpumask_test_cpu(cpu, &rect->cpumask))
break; break;
...@@ -401,7 +401,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num) ...@@ -401,7 +401,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
pr_notice("hardwall: no associated processes!\n"); pr_notice("hardwall: no associated processes!\n");
done: done:
spin_unlock_irqrestore(&hwt->lock, flags); spin_unlock(&hwt->lock);
/* /*
* We have to disable firewall interrupts now, or else when we * We have to disable firewall interrupts now, or else when we
...@@ -540,6 +540,14 @@ static struct hardwall_info *hardwall_create(struct hardwall_type *hwt, ...@@ -540,6 +540,14 @@ static struct hardwall_info *hardwall_create(struct hardwall_type *hwt,
} }
} }
/*
* Eliminate cpus that are not part of this Linux client.
* Note that this allows for configurations that we might not want to
* support, such as one client on every even cpu, another client on
* every odd cpu.
*/
cpumask_and(&info->cpumask, &info->cpumask, cpu_online_mask);
/* Confirm it doesn't overlap and add it to the list. */ /* Confirm it doesn't overlap and add it to the list. */
spin_lock_irqsave(&hwt->lock, flags); spin_lock_irqsave(&hwt->lock, flags);
list_for_each_entry(iter, &hwt->list, list) { list_for_each_entry(iter, &hwt->list, list) {
...@@ -612,7 +620,7 @@ static int hardwall_activate(struct hardwall_info *info) ...@@ -612,7 +620,7 @@ static int hardwall_activate(struct hardwall_info *info)
/* /*
* Deactivate a task's hardwall. Must hold lock for hardwall_type. * Deactivate a task's hardwall. Must hold lock for hardwall_type.
* This method may be called from free_task(), so we don't want to * This method may be called from exit_thread(), so we don't want to
* rely on too many fields of struct task_struct still being valid. * rely on too many fields of struct task_struct still being valid.
* We assume the cpus_allowed, pid, and comm fields are still valid. * We assume the cpus_allowed, pid, and comm fields are still valid.
*/ */
...@@ -653,7 +661,7 @@ static int hardwall_deactivate(struct hardwall_type *hwt, ...@@ -653,7 +661,7 @@ static int hardwall_deactivate(struct hardwall_type *hwt,
return -EINVAL; return -EINVAL;
printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n", printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n",
task->pid, task->comm, hwt->name, smp_processor_id()); task->pid, task->comm, hwt->name, raw_smp_processor_id());
return 0; return 0;
} }
...@@ -795,8 +803,8 @@ static void reset_xdn_network_state(struct hardwall_type *hwt) ...@@ -795,8 +803,8 @@ static void reset_xdn_network_state(struct hardwall_type *hwt)
/* Reset UDN coordinates to their standard value */ /* Reset UDN coordinates to their standard value */
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
unsigned int x = cpu % smp_width; unsigned int x = cpu_x(cpu);
unsigned int y = cpu / smp_width; unsigned int y = cpu_y(cpu);
__insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7)); __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
} }
......
...@@ -39,12 +39,12 @@ ENTRY(_start) ...@@ -39,12 +39,12 @@ ENTRY(_start)
} }
{ {
moveli r0, _HV_VERSION_OLD_HV_INIT moveli r0, _HV_VERSION_OLD_HV_INIT
jal hv_init jal _hv_init
} }
/* Get a reasonable default ASID in r0 */ /* Get a reasonable default ASID in r0 */
{ {
move r0, zero move r0, zero
jal hv_inquire_asid jal _hv_inquire_asid
} }
/* Install the default page table */ /* Install the default page table */
{ {
...@@ -64,7 +64,7 @@ ENTRY(_start) ...@@ -64,7 +64,7 @@ ENTRY(_start)
auli r0, r0, ha16(swapper_pg_dir - PAGE_OFFSET) auli r0, r0, ha16(swapper_pg_dir - PAGE_OFFSET)
} }
{ {
inv r6 finv r6
move r1, zero /* high 32 bits of CPA is zero */ move r1, zero /* high 32 bits of CPA is zero */
} }
{ {
...@@ -73,12 +73,12 @@ ENTRY(_start) ...@@ -73,12 +73,12 @@ ENTRY(_start)
} }
{ {
auli lr, lr, ha16(1f) auli lr, lr, ha16(1f)
j hv_install_context j _hv_install_context
} }
1: 1:
/* Get our processor number and save it away in SAVE_K_0. */ /* Get our processor number and save it away in SAVE_K_0. */
jal hv_inquire_topology jal _hv_inquire_topology
mulll_uu r4, r1, r2 /* r1 == y, r2 == width */ mulll_uu r4, r1, r2 /* r1 == y, r2 == width */
add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */ add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */
...@@ -86,7 +86,7 @@ ENTRY(_start) ...@@ -86,7 +86,7 @@ ENTRY(_start)
/* /*
* Load up our per-cpu offset. When the first (master) tile * Load up our per-cpu offset. When the first (master) tile
* boots, this value is still zero, so we will load boot_pc * boots, this value is still zero, so we will load boot_pc
* with start_kernel, and boot_sp with init_stack + THREAD_SIZE. * with start_kernel, and boot_sp at the top of init_stack.
* The master tile initializes the per-cpu offset array, so that * The master tile initializes the per-cpu offset array, so that
* when subsequent (secondary) tiles boot, they will instead load * when subsequent (secondary) tiles boot, they will instead load
* from their per-cpu versions of boot_sp and boot_pc. * from their per-cpu versions of boot_sp and boot_pc.
...@@ -126,7 +126,6 @@ ENTRY(_start) ...@@ -126,7 +126,6 @@ ENTRY(_start)
lw sp, r1 lw sp, r1
or r4, sp, r4 or r4, sp, r4
mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */ mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */
addi sp, sp, -STACK_TOP_DELTA
{ {
move lr, zero /* stop backtraces in the called function */ move lr, zero /* stop backtraces in the called function */
jr r0 jr r0
...@@ -163,8 +162,8 @@ ENTRY(swapper_pg_dir) ...@@ -163,8 +162,8 @@ ENTRY(swapper_pg_dir)
.set addr, addr + PGDIR_SIZE .set addr, addr + PGDIR_SIZE
.endr .endr
/* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */ /* The true text VAs are mapped as VA = PA + MEM_SV_START */
PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \ PTE MEM_SV_START, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
(1 << (HV_PTE_INDEX_EXECUTABLE - 32)) (1 << (HV_PTE_INDEX_EXECUTABLE - 32))
.org swapper_pg_dir + PGDIR_SIZE .org swapper_pg_dir + PGDIR_SIZE
END(swapper_pg_dir) END(swapper_pg_dir)
......
...@@ -25,6 +25,15 @@ ...@@ -25,6 +25,15 @@
#include <arch/chip.h> #include <arch/chip.h>
#include <arch/spr_def.h> #include <arch/spr_def.h>
/* Extract two 32-bit bit values that were read into one register. */
#ifdef __BIG_ENDIAN__
#define GET_FIRST_INT(rd, rs) shrsi rd, rs, 32
#define GET_SECOND_INT(rd, rs) addxi rd, rs, 0
#else
#define GET_FIRST_INT(rd, rs) addxi rd, rs, 0
#define GET_SECOND_INT(rd, rs) shrsi rd, rs, 32
#endif
/* /*
* This module contains the entry code for kernel images. It performs the * This module contains the entry code for kernel images. It performs the
* minimal setup needed to call the generic C routines. * minimal setup needed to call the generic C routines.
...@@ -46,11 +55,11 @@ ENTRY(_start) ...@@ -46,11 +55,11 @@ ENTRY(_start)
movei r2, TILE_CHIP_REV movei r2, TILE_CHIP_REV
movei r3, KERNEL_PL movei r3, KERNEL_PL
} }
jal hv_init jal _hv_init
/* Get a reasonable default ASID in r0 */ /* Get a reasonable default ASID in r0 */
{ {
move r0, zero move r0, zero
jal hv_inquire_asid jal _hv_inquire_asid
} }
/* /*
...@@ -61,7 +70,7 @@ ENTRY(_start) ...@@ -61,7 +70,7 @@ ENTRY(_start)
* other CPUs should see a properly-constructed page table. * other CPUs should see a properly-constructed page table.
*/ */
{ {
v4int_l r2, zero, r0 /* ASID for hv_install_context */ GET_FIRST_INT(r2, r0) /* ASID for hv_install_context */
moveli r4, hw1_last(swapper_pgprot - PAGE_OFFSET) moveli r4, hw1_last(swapper_pgprot - PAGE_OFFSET)
} }
{ {
...@@ -77,7 +86,7 @@ ENTRY(_start) ...@@ -77,7 +86,7 @@ ENTRY(_start)
{ {
/* After initializing swapper_pgprot, HV_PTE_GLOBAL is set. */ /* After initializing swapper_pgprot, HV_PTE_GLOBAL is set. */
bfextu r7, r1, HV_PTE_INDEX_GLOBAL, HV_PTE_INDEX_GLOBAL bfextu r7, r1, HV_PTE_INDEX_GLOBAL, HV_PTE_INDEX_GLOBAL
inv r4 finv r4
} }
bnez r7, .Lno_write bnez r7, .Lno_write
{ {
...@@ -121,29 +130,24 @@ ENTRY(_start) ...@@ -121,29 +130,24 @@ ENTRY(_start)
} }
{ {
moveli r3, CTX_PAGE_FLAG moveli r3, CTX_PAGE_FLAG
j hv_install_context j _hv_install_context
} }
1: 1:
/* Install the interrupt base. */ /* Install the interrupt base. */
moveli r0, hw2_last(MEM_SV_START) moveli r0, hw2_last(intrpt_start)
shl16insli r0, r0, hw1(MEM_SV_START) shl16insli r0, r0, hw1(intrpt_start)
shl16insli r0, r0, hw0(MEM_SV_START) shl16insli r0, r0, hw0(intrpt_start)
mtspr SPR_INTERRUPT_VECTOR_BASE_K, r0 mtspr SPR_INTERRUPT_VECTOR_BASE_K, r0
/* /* Get our processor number and save it away in SAVE_K_0. */
* Get our processor number and save it away in SAVE_K_0. jal _hv_inquire_topology
* Extract stuff from the topology structure: r4 = y, r6 = x,
* r5 = width. FIXME: consider whether we want to just make these
* 64-bit values (and if so fix smp_topology write below, too).
*/
jal hv_inquire_topology
{ {
v4int_l r5, zero, r1 /* r5 = width */ GET_FIRST_INT(r5, r1) /* r5 = width */
shrui r4, r0, 32 /* r4 = y */ GET_SECOND_INT(r4, r0) /* r4 = y */
} }
{ {
v4int_l r6, zero, r0 /* r6 = x */ GET_FIRST_INT(r6, r0) /* r6 = x */
mul_lu_lu r4, r4, r5 mul_lu_lu r4, r4, r5
} }
{ {
...@@ -154,7 +158,7 @@ ENTRY(_start) ...@@ -154,7 +158,7 @@ ENTRY(_start)
/* /*
* Load up our per-cpu offset. When the first (master) tile * Load up our per-cpu offset. When the first (master) tile
* boots, this value is still zero, so we will load boot_pc * boots, this value is still zero, so we will load boot_pc
* with start_kernel, and boot_sp with init_stack + THREAD_SIZE. * with start_kernel, and boot_sp with at the top of init_stack.
* The master tile initializes the per-cpu offset array, so that * The master tile initializes the per-cpu offset array, so that
* when subsequent (secondary) tiles boot, they will instead load * when subsequent (secondary) tiles boot, they will instead load
* from their per-cpu versions of boot_sp and boot_pc. * from their per-cpu versions of boot_sp and boot_pc.
...@@ -198,9 +202,9 @@ ENTRY(_start) ...@@ -198,9 +202,9 @@ ENTRY(_start)
} }
ld r0, r0 ld r0, r0
ld sp, r1 ld sp, r1
or r4, sp, r4 shli r4, r4, CPU_SHIFT
bfins r4, sp, 0, CPU_SHIFT-1
mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */ mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */
addi sp, sp, -STACK_TOP_DELTA
{ {
move lr, zero /* stop backtraces in the called function */ move lr, zero /* stop backtraces in the called function */
jr r0 jr r0
......
/* Hypervisor call vector addresses; see <hv/hypervisor.h> */
.macro gensym sym, val, size
.org \val
.global _\sym
.type _\sym,function
_\sym:
.size _\sym,\size
#ifndef CONFIG_TILE_HVGLUE_TRACE
.globl \sym
.set \sym,_\sym
#endif
.endm
.section .hvglue,"x",@nobits
.align 8
gensym hv_init, 0x20, 32
gensym hv_install_context, 0x40, 32
gensym hv_sysconf, 0x60, 32
gensym hv_get_rtc, 0x80, 32
gensym hv_set_rtc, 0xa0, 32
gensym hv_flush_asid, 0xc0, 32
gensym hv_flush_page, 0xe0, 32
gensym hv_flush_pages, 0x100, 32
gensym hv_restart, 0x120, 32
gensym hv_halt, 0x140, 32
gensym hv_power_off, 0x160, 32
gensym hv_inquire_physical, 0x180, 32
gensym hv_inquire_memory_controller, 0x1a0, 32
gensym hv_inquire_virtual, 0x1c0, 32
gensym hv_inquire_asid, 0x1e0, 32
gensym hv_nanosleep, 0x200, 32
gensym hv_console_read_if_ready, 0x220, 32
gensym hv_console_write, 0x240, 32
gensym hv_downcall_dispatch, 0x260, 32
gensym hv_inquire_topology, 0x280, 32
gensym hv_fs_findfile, 0x2a0, 32
gensym hv_fs_fstat, 0x2c0, 32
gensym hv_fs_pread, 0x2e0, 32
gensym hv_physaddr_read64, 0x300, 32
gensym hv_physaddr_write64, 0x320, 32
gensym hv_get_command_line, 0x340, 32
gensym hv_set_caching, 0x360, 32
gensym hv_bzero_page, 0x380, 32
gensym hv_register_message_state, 0x3a0, 32
gensym hv_send_message, 0x3c0, 32
gensym hv_receive_message, 0x3e0, 32
gensym hv_inquire_context, 0x400, 32
gensym hv_start_all_tiles, 0x420, 32
gensym hv_dev_open, 0x440, 32
gensym hv_dev_close, 0x460, 32
gensym hv_dev_pread, 0x480, 32
gensym hv_dev_pwrite, 0x4a0, 32
gensym hv_dev_poll, 0x4c0, 32
gensym hv_dev_poll_cancel, 0x4e0, 32
gensym hv_dev_preada, 0x500, 32
gensym hv_dev_pwritea, 0x520, 32
gensym hv_flush_remote, 0x540, 32
gensym hv_console_putc, 0x560, 32
gensym hv_inquire_tiles, 0x580, 32
gensym hv_confstr, 0x5a0, 32
gensym hv_reexec, 0x5c0, 32
gensym hv_set_command_line, 0x5e0, 32
gensym hv_clear_intr, 0x600, 32
gensym hv_enable_intr, 0x620, 32
gensym hv_disable_intr, 0x640, 32
gensym hv_raise_intr, 0x660, 32
gensym hv_trigger_ipi, 0x680, 32
gensym hv_store_mapping, 0x6a0, 32
gensym hv_inquire_realpa, 0x6c0, 32
gensym hv_flush_all, 0x6e0, 32
gensym hv_get_ipi_pte, 0x700, 32
gensym hv_set_pte_super_shift, 0x720, 32
gensym hv_console_set_ipi, 0x7e0, 32
gensym hv_glue_internals, 0x800, 30720
/* Hypervisor call vector addresses; see <hv/hypervisor.h> */
hv_init = TEXT_OFFSET + 0x10020;
hv_install_context = TEXT_OFFSET + 0x10040;
hv_sysconf = TEXT_OFFSET + 0x10060;
hv_get_rtc = TEXT_OFFSET + 0x10080;
hv_set_rtc = TEXT_OFFSET + 0x100a0;
hv_flush_asid = TEXT_OFFSET + 0x100c0;
hv_flush_page = TEXT_OFFSET + 0x100e0;
hv_flush_pages = TEXT_OFFSET + 0x10100;
hv_restart = TEXT_OFFSET + 0x10120;
hv_halt = TEXT_OFFSET + 0x10140;
hv_power_off = TEXT_OFFSET + 0x10160;
hv_inquire_physical = TEXT_OFFSET + 0x10180;
hv_inquire_memory_controller = TEXT_OFFSET + 0x101a0;
hv_inquire_virtual = TEXT_OFFSET + 0x101c0;
hv_inquire_asid = TEXT_OFFSET + 0x101e0;
hv_nanosleep = TEXT_OFFSET + 0x10200;
hv_console_read_if_ready = TEXT_OFFSET + 0x10220;
hv_console_write = TEXT_OFFSET + 0x10240;
hv_downcall_dispatch = TEXT_OFFSET + 0x10260;
hv_inquire_topology = TEXT_OFFSET + 0x10280;
hv_fs_findfile = TEXT_OFFSET + 0x102a0;
hv_fs_fstat = TEXT_OFFSET + 0x102c0;
hv_fs_pread = TEXT_OFFSET + 0x102e0;
hv_physaddr_read64 = TEXT_OFFSET + 0x10300;
hv_physaddr_write64 = TEXT_OFFSET + 0x10320;
hv_get_command_line = TEXT_OFFSET + 0x10340;
hv_set_caching = TEXT_OFFSET + 0x10360;
hv_bzero_page = TEXT_OFFSET + 0x10380;
hv_register_message_state = TEXT_OFFSET + 0x103a0;
hv_send_message = TEXT_OFFSET + 0x103c0;
hv_receive_message = TEXT_OFFSET + 0x103e0;
hv_inquire_context = TEXT_OFFSET + 0x10400;
hv_start_all_tiles = TEXT_OFFSET + 0x10420;
hv_dev_open = TEXT_OFFSET + 0x10440;
hv_dev_close = TEXT_OFFSET + 0x10460;
hv_dev_pread = TEXT_OFFSET + 0x10480;
hv_dev_pwrite = TEXT_OFFSET + 0x104a0;
hv_dev_poll = TEXT_OFFSET + 0x104c0;
hv_dev_poll_cancel = TEXT_OFFSET + 0x104e0;
hv_dev_preada = TEXT_OFFSET + 0x10500;
hv_dev_pwritea = TEXT_OFFSET + 0x10520;
hv_flush_remote = TEXT_OFFSET + 0x10540;
hv_console_putc = TEXT_OFFSET + 0x10560;
hv_inquire_tiles = TEXT_OFFSET + 0x10580;
hv_confstr = TEXT_OFFSET + 0x105a0;
hv_reexec = TEXT_OFFSET + 0x105c0;
hv_set_command_line = TEXT_OFFSET + 0x105e0;
hv_clear_intr = TEXT_OFFSET + 0x10600;
hv_enable_intr = TEXT_OFFSET + 0x10620;
hv_disable_intr = TEXT_OFFSET + 0x10640;
hv_raise_intr = TEXT_OFFSET + 0x10660;
hv_trigger_ipi = TEXT_OFFSET + 0x10680;
hv_store_mapping = TEXT_OFFSET + 0x106a0;
hv_inquire_realpa = TEXT_OFFSET + 0x106c0;
hv_flush_all = TEXT_OFFSET + 0x106e0;
hv_get_ipi_pte = TEXT_OFFSET + 0x10700;
hv_set_pte_super_shift = TEXT_OFFSET + 0x10720;
hv_glue_internals = TEXT_OFFSET + 0x10740;
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -55,7 +55,8 @@ static DEFINE_PER_CPU(int, irq_depth); ...@@ -55,7 +55,8 @@ static DEFINE_PER_CPU(int, irq_depth);
/* State for allocating IRQs on Gx. */ /* State for allocating IRQs on Gx. */
#if CHIP_HAS_IPI() #if CHIP_HAS_IPI()
static unsigned long available_irqs = ~(1UL << IRQ_RESCHEDULE); static unsigned long available_irqs = ((1UL << NR_IRQS) - 1) &
(~(1UL << IRQ_RESCHEDULE));
static DEFINE_SPINLOCK(available_irqs_lock); static DEFINE_SPINLOCK(available_irqs_lock);
#endif #endif
...@@ -73,7 +74,8 @@ static DEFINE_SPINLOCK(available_irqs_lock); ...@@ -73,7 +74,8 @@ static DEFINE_SPINLOCK(available_irqs_lock);
/* /*
* The interrupt handling path, implemented in terms of HV interrupt * The interrupt handling path, implemented in terms of HV interrupt
* emulation on TILE64 and TILEPro, and IPI hardware on TILE-Gx. * emulation on TILEPro, and IPI hardware on TILE-Gx.
* Entered with interrupts disabled.
*/ */
void tile_dev_intr(struct pt_regs *regs, int intnum) void tile_dev_intr(struct pt_regs *regs, int intnum)
{ {
...@@ -233,7 +235,7 @@ void tile_irq_activate(unsigned int irq, int tile_irq_type) ...@@ -233,7 +235,7 @@ void tile_irq_activate(unsigned int irq, int tile_irq_type)
{ {
/* /*
* We use handle_level_irq() by default because the pending * We use handle_level_irq() by default because the pending
* interrupt vector (whether modeled by the HV on TILE64 and * interrupt vector (whether modeled by the HV on
* TILEPro or implemented in hardware on TILE-Gx) has * TILEPro or implemented in hardware on TILE-Gx) has
* level-style semantics for each bit. An interrupt fires * level-style semantics for each bit. An interrupt fires
* whenever a bit is high, not just at edges. * whenever a bit is high, not just at edges.
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include <linux/capability.h> #include <linux/capability.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/bootmem.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
...@@ -52,6 +51,8 @@ ...@@ -52,6 +51,8 @@
* *
*/ */
static int pci_probe = 1;
/* /*
* This flag tells if the platform is TILEmpower that needs * This flag tells if the platform is TILEmpower that needs
* special configuration for the PLX switch chip. * special configuration for the PLX switch chip.
...@@ -144,6 +145,11 @@ int __init tile_pci_init(void) ...@@ -144,6 +145,11 @@ int __init tile_pci_init(void)
{ {
int i; int i;
if (!pci_probe) {
pr_info("PCI: disabled by boot argument\n");
return 0;
}
pr_info("PCI: Searching for controllers...\n"); pr_info("PCI: Searching for controllers...\n");
/* Re-init number of PCIe controllers to support hot-plug feature. */ /* Re-init number of PCIe controllers to support hot-plug feature. */
...@@ -192,7 +198,6 @@ int __init tile_pci_init(void) ...@@ -192,7 +198,6 @@ int __init tile_pci_init(void)
controller->hv_cfg_fd[0] = hv_cfg_fd0; controller->hv_cfg_fd[0] = hv_cfg_fd0;
controller->hv_cfg_fd[1] = hv_cfg_fd1; controller->hv_cfg_fd[1] = hv_cfg_fd1;
controller->hv_mem_fd = hv_mem_fd; controller->hv_mem_fd = hv_mem_fd;
controller->first_busno = 0;
controller->last_busno = 0xff; controller->last_busno = 0xff;
controller->ops = &tile_cfg_ops; controller->ops = &tile_cfg_ops;
...@@ -283,7 +288,7 @@ int __init pcibios_init(void) ...@@ -283,7 +288,7 @@ int __init pcibios_init(void)
* known to require at least 20ms here, but we use a more * known to require at least 20ms here, but we use a more
* conservative value. * conservative value.
*/ */
mdelay(250); msleep(250);
/* Scan all of the recorded PCI controllers. */ /* Scan all of the recorded PCI controllers. */
for (i = 0; i < TILE_NUM_PCIE; i++) { for (i = 0; i < TILE_NUM_PCIE; i++) {
...@@ -304,18 +309,10 @@ int __init pcibios_init(void) ...@@ -304,18 +309,10 @@ int __init pcibios_init(void)
pr_info("PCI: initializing controller #%d\n", i); pr_info("PCI: initializing controller #%d\n", i);
/*
* This comes from the generic Linux PCI driver.
*
* It reads the PCI tree for this bus into the Linux
* data structures.
*
* This is inlined in linux/pci.h and calls into
* pci_scan_bus_parented() in probe.c.
*/
pci_add_resource(&resources, &ioport_resource); pci_add_resource(&resources, &ioport_resource);
pci_add_resource(&resources, &iomem_resource); pci_add_resource(&resources, &iomem_resource);
bus = pci_scan_root_bus(NULL, 0, controller->ops, controller, &resources); bus = pci_scan_root_bus(NULL, 0, controller->ops,
controller, &resources);
controller->root_bus = bus; controller->root_bus = bus;
controller->last_busno = bus->busn_res.end; controller->last_busno = bus->busn_res.end;
} }
...@@ -388,6 +385,16 @@ void pcibios_set_master(struct pci_dev *dev) ...@@ -388,6 +385,16 @@ void pcibios_set_master(struct pci_dev *dev)
/* No special bus mastering setup handling. */ /* No special bus mastering setup handling. */
} }
/* Process any "pci=" kernel boot arguments. */
char *__init pcibios_setup(char *str)
{
if (!strcmp(str, "off")) {
pci_probe = 0;
return NULL;
}
return str;
}
/* /*
* Enable memory and/or address decoding, as appropriate, for the * Enable memory and/or address decoding, as appropriate, for the
* device described by the 'dev' struct. * device described by the 'dev' struct.
......
This diff is collapsed.
...@@ -113,7 +113,6 @@ arch_initcall(proc_tile_init); ...@@ -113,7 +113,6 @@ arch_initcall(proc_tile_init);
* Support /proc/sys/tile directory * Support /proc/sys/tile directory
*/ */
#ifndef __tilegx__ /* FIXME: GX: no support for unaligned access yet */
static ctl_table unaligned_subtable[] = { static ctl_table unaligned_subtable[] = {
{ {
.procname = "enabled", .procname = "enabled",
...@@ -160,4 +159,3 @@ static int __init proc_sys_tile_init(void) ...@@ -160,4 +159,3 @@ static int __init proc_sys_tile_init(void)
} }
arch_initcall(proc_sys_tile_init); arch_initcall(proc_sys_tile_init);
#endif
This diff is collapsed.
This diff is collapsed.
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
void machine_halt(void) void machine_halt(void)
{ {
warn_early_printk();
arch_local_irq_disable_all(); arch_local_irq_disable_all();
smp_send_stop(); smp_send_stop();
hv_halt(); hv_halt();
...@@ -35,7 +34,6 @@ void machine_halt(void) ...@@ -35,7 +34,6 @@ void machine_halt(void)
void machine_power_off(void) void machine_power_off(void)
{ {
warn_early_printk();
arch_local_irq_disable_all(); arch_local_irq_disable_all();
smp_send_stop(); smp_send_stop();
hv_power_off(); hv_power_off();
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#include <asm/switch_to.h> #include <asm/switch_to.h>
/* /*
* See <asm/system.h>; called with prev and next task_struct pointers. * See <asm/switch_to.h>; called with prev and next task_struct pointers.
* "prev" is returned in r0 for _switch_to and also for ret_from_fork. * "prev" is returned in r0 for _switch_to and also for ret_from_fork.
* *
* We want to save pc/sp in "prev", and get the new pc/sp from "next". * We want to save pc/sp in "prev", and get the new pc/sp from "next".
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
*/ */
#if CALLEE_SAVED_REGS_COUNT != 24 #if CALLEE_SAVED_REGS_COUNT != 24
# error Mismatch between <asm/system.h> and kernel/entry.S # error Mismatch between <asm/switch_to.h> and kernel/entry.S
#endif #endif
#define FRAME_SIZE ((2 + CALLEE_SAVED_REGS_COUNT) * 4) #define FRAME_SIZE ((2 + CALLEE_SAVED_REGS_COUNT) * 4)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment