Commit 95b8b595 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'loongarch-6.1' of...

Merge tag 'loongarch-6.1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson

Pull LoongArch updates from Huacai Chen:

 - Use EXPLICIT_RELOCS (ABIv2.0)

 - Use generic BUG() handler

 - Refactor TLB/Cache operations

 - Add qspinlock support

 - Add perf events support

 - Add kexec/kdump support

 - Add BPF JIT support

 - Add ACPI-based laptop driver

 - Update the default config file

* tag 'loongarch-6.1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson: (25 commits)
  LoongArch: Update Loongson-3 default config file
  LoongArch: Add ACPI-based generic laptop driver
  LoongArch: Add BPF JIT support
  LoongArch: Add some instruction opcodes and formats
  LoongArch: Move {signed,unsigned}_imm_check() to inst.h
  LoongArch: Add kdump support
  LoongArch: Add kexec support
  LoongArch: Use generic BUG() handler
  LoongArch: Add SysRq-x (TLB Dump) support
  LoongArch: Add perf events support
  LoongArch: Add qspinlock support
  LoongArch: Use TLB for ioremap()
  LoongArch: Support access filter to /dev/mem interface
  LoongArch: Refactor cache probe and flush methods
  LoongArch: mm: Refactor TLB exception handlers
  LoongArch: Support R_LARCH_GOT_PC_{LO12,HI20} in modules
  LoongArch: Support PC-relative relocations in modules
  LoongArch: Define ELF relocation types added in ABIv2.0
  LoongArch: Adjust symbol addressing for AS_HAS_EXPLICIT_RELOCS
  LoongArch: Add Kconfig option AS_HAS_EXPLICIT_RELOCS
  ...
parents 60ac35bf 2c8577f5
obj-y += kernel/ obj-y += kernel/
obj-y += mm/ obj-y += mm/
obj-y += net/
obj-y += vdso/ obj-y += vdso/
# for cleaning # for cleaning
......
...@@ -50,6 +50,7 @@ config LOONGARCH ...@@ -50,6 +50,7 @@ config LOONGARCH
select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select ARCH_WANT_LD_ORPHAN_WARN select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANTS_NO_INSTR select ARCH_WANTS_NO_INSTR
...@@ -61,6 +62,7 @@ config LOONGARCH ...@@ -61,6 +62,7 @@ config LOONGARCH
select GENERIC_CPU_AUTOPROBE select GENERIC_CPU_AUTOPROBE
select GENERIC_ENTRY select GENERIC_ENTRY
select GENERIC_GETTIMEOFDAY select GENERIC_GETTIMEOFDAY
select GENERIC_IOREMAP if !ARCH_IOREMAP
select GENERIC_IRQ_MULTI_HANDLER select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_PROBE select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
...@@ -69,6 +71,7 @@ config LOONGARCH ...@@ -69,6 +71,7 @@ config LOONGARCH
select GENERIC_LIB_CMPDI2 select GENERIC_LIB_CMPDI2
select GENERIC_LIB_LSHRDI3 select GENERIC_LIB_LSHRDI3
select GENERIC_LIB_UCMPDI2 select GENERIC_LIB_UCMPDI2
select GENERIC_LIB_DEVMEM_IS_ALLOWED
select GENERIC_PCI_IOMAP select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
...@@ -83,6 +86,7 @@ config LOONGARCH ...@@ -83,6 +86,7 @@ config LOONGARCH
select HAVE_CONTEXT_TRACKING_USER select HAVE_CONTEXT_TRACKING_USER
select HAVE_DEBUG_STACKOVERFLOW select HAVE_DEBUG_STACKOVERFLOW
select HAVE_DMA_CONTIGUOUS select HAVE_DMA_CONTIGUOUS
select HAVE_EBPF_JIT
select HAVE_EXIT_THREAD select HAVE_EXIT_THREAD
select HAVE_FAST_GUP select HAVE_FAST_GUP
select HAVE_GENERIC_VDSO select HAVE_GENERIC_VDSO
...@@ -93,6 +97,8 @@ config LOONGARCH ...@@ -93,6 +97,8 @@ config LOONGARCH
select HAVE_NMI select HAVE_NMI
select HAVE_PCI select HAVE_PCI
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ select HAVE_RSEQ
select HAVE_SETUP_PER_CPU_AREA if NUMA select HAVE_SETUP_PER_CPU_AREA if NUMA
...@@ -136,6 +142,14 @@ config CPU_HAS_PREFETCH ...@@ -136,6 +142,14 @@ config CPU_HAS_PREFETCH
bool bool
default y default y
config GENERIC_BUG
def_bool y
depends on BUG
config GENERIC_BUG_RELATIVE_POINTERS
def_bool y
depends on GENERIC_BUG
config GENERIC_CALIBRATE_DELAY config GENERIC_CALIBRATE_DELAY
def_bool y def_bool y
...@@ -157,7 +171,7 @@ config STACKTRACE_SUPPORT ...@@ -157,7 +171,7 @@ config STACKTRACE_SUPPORT
bool bool
default y default y
# MACH_LOONGSON32 and MACH_LOONGSON64 are delibrately carried over from the # MACH_LOONGSON32 and MACH_LOONGSON64 are deliberately carried over from the
# MIPS Loongson code, to preserve Loongson-specific code paths in drivers that # MIPS Loongson code, to preserve Loongson-specific code paths in drivers that
# are shared between architectures, and specifically expecting the symbols. # are shared between architectures, and specifically expecting the symbols.
config MACH_LOONGSON32 config MACH_LOONGSON32
...@@ -166,6 +180,9 @@ config MACH_LOONGSON32 ...@@ -166,6 +180,9 @@ config MACH_LOONGSON32
config MACH_LOONGSON64 config MACH_LOONGSON64
def_bool 64BIT def_bool 64BIT
config FIX_EARLYCON_MEM
def_bool y
config PAGE_SIZE_4KB config PAGE_SIZE_4KB
bool bool
...@@ -194,6 +211,9 @@ config SCHED_OMIT_FRAME_POINTER ...@@ -194,6 +211,9 @@ config SCHED_OMIT_FRAME_POINTER
bool bool
default y default y
config AS_HAS_EXPLICIT_RELOCS
def_bool $(as-instr,x:pcalau12i \$t0$(comma)%pc_hi20(x))
menu "Kernel type and options" menu "Kernel type and options"
source "kernel/Kconfig.hz" source "kernel/Kconfig.hz"
...@@ -399,6 +419,46 @@ config ARCH_FORCE_MAX_ORDER ...@@ -399,6 +419,46 @@ config ARCH_FORCE_MAX_ORDER
The page size is not necessarily 4KB. Keep this in mind The page size is not necessarily 4KB. Keep this in mind
when choosing a value for this option. when choosing a value for this option.
config ARCH_IOREMAP
bool "Enable LoongArch DMW-based ioremap()"
help
We use generic TLB-based ioremap() by default since it has page
protection support. However, you can enable LoongArch DMW-based
ioremap() for better performance.
config KEXEC
bool "Kexec system call"
select KEXEC_CORE
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
but it is independent of the system firmware. And like a reboot
you can start any kernel with it, not just Linux.
The name comes from the similarity to the exec system call.
config CRASH_DUMP
bool "Build kdump crash kernel"
help
Generate crash dump after being started by kexec. This should
be normally only set in special crash dump kernels which are
loaded in the main kernel with kexec-tools into a specially
reserved region and then later executed after a crash by
kdump/kexec.
For more details see Documentation/admin-guide/kdump/kdump.rst
config PHYSICAL_START
hex "Physical address where the kernel is loaded"
default "0x90000000a0000000"
depends on CRASH_DUMP
help
This gives the XKPRANGE address where the kernel is loaded.
If you plan to use kernel for capturing the crash dump change
this value to start of the reserved region (the "X" value as
specified in the "crashkernel=YM@XM" command line boot parameter
passed to the panic-ed kernel).
config SECCOMP config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode" bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS depends on PROC_FS
......
...@@ -43,15 +43,37 @@ endif ...@@ -43,15 +43,37 @@ endif
cflags-y += -G0 -pipe -msoft-float cflags-y += -G0 -pipe -msoft-float
LDFLAGS_vmlinux += -G0 -static -n -nostdlib LDFLAGS_vmlinux += -G0 -static -n -nostdlib
# When the assembler supports explicit relocation hint, we must use it.
# GCC may have -mexplicit-relocs off by default if it was built with an old
# assembler, so we force it via an option.
#
# When the assembler does not supports explicit relocation hint, we can't use
# it. Disable it if the compiler supports it.
#
# If you've seen "unknown reloc hint" message building the kernel and you are
# now wondering why "-mexplicit-relocs" is not wrapped with cc-option: the
# combination of a "new" assembler and "old" compiler is not supported. Either
# upgrade the compiler or downgrade the assembler.
ifdef CONFIG_AS_HAS_EXPLICIT_RELOCS
cflags-y += -mexplicit-relocs
KBUILD_CFLAGS_KERNEL += -mdirect-extern-access
else
cflags-y += $(call cc-option,-mno-explicit-relocs)
KBUILD_AFLAGS_KERNEL += -Wa,-mla-global-with-pcrel KBUILD_AFLAGS_KERNEL += -Wa,-mla-global-with-pcrel
KBUILD_CFLAGS_KERNEL += -Wa,-mla-global-with-pcrel KBUILD_CFLAGS_KERNEL += -Wa,-mla-global-with-pcrel
KBUILD_AFLAGS_MODULE += -Wa,-mla-global-with-abs KBUILD_AFLAGS_MODULE += -Wa,-mla-global-with-abs
KBUILD_CFLAGS_MODULE += -fplt -Wa,-mla-global-with-abs,-mla-local-with-abs KBUILD_CFLAGS_MODULE += -fplt -Wa,-mla-global-with-abs,-mla-local-with-abs
endif
cflags-y += -ffreestanding cflags-y += -ffreestanding
cflags-y += $(call cc-option, -mno-check-zero-division) cflags-y += $(call cc-option, -mno-check-zero-division)
ifndef CONFIG_PHYSICAL_START
load-y = 0x9000000000200000 load-y = 0x9000000000200000
else
load-y = $(CONFIG_PHYSICAL_START)
endif
bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y)
drivers-$(CONFIG_PCI) += arch/loongarch/pci/ drivers-$(CONFIG_PCI) += arch/loongarch/pci/
......
...@@ -4,6 +4,7 @@ CONFIG_POSIX_MQUEUE=y ...@@ -4,6 +4,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_NO_HZ=y CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y CONFIG_HIGH_RES_TIMERS=y
CONFIG_BPF_SYSCALL=y CONFIG_BPF_SYSCALL=y
CONFIG_BPF_JIT=y
CONFIG_PREEMPT=y CONFIG_PREEMPT=y
CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_BSD_PROCESS_ACCT_V3=y
...@@ -45,6 +46,7 @@ CONFIG_SMP=y ...@@ -45,6 +46,7 @@ CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y CONFIG_HOTPLUG_CPU=y
CONFIG_NR_CPUS=64 CONFIG_NR_CPUS=64
CONFIG_NUMA=y CONFIG_NUMA=y
CONFIG_KEXEC=y
CONFIG_PAGE_SIZE_16KB=y CONFIG_PAGE_SIZE_16KB=y
CONFIG_HZ_250=y CONFIG_HZ_250=y
CONFIG_ACPI=y CONFIG_ACPI=y
...@@ -55,6 +57,7 @@ CONFIG_ACPI_DOCK=y ...@@ -55,6 +57,7 @@ CONFIG_ACPI_DOCK=y
CONFIG_ACPI_IPMI=m CONFIG_ACPI_IPMI=m
CONFIG_ACPI_PCI_SLOT=y CONFIG_ACPI_PCI_SLOT=y
CONFIG_ACPI_HOTPLUG_MEMORY=y CONFIG_ACPI_HOTPLUG_MEMORY=y
CONFIG_EFI_ZBOOT=y
CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y
CONFIG_EFI_CAPSULE_LOADER=m CONFIG_EFI_CAPSULE_LOADER=m
CONFIG_EFI_TEST=m CONFIG_EFI_TEST=m
...@@ -65,6 +68,8 @@ CONFIG_MODULE_FORCE_UNLOAD=y ...@@ -65,6 +68,8 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y CONFIG_MODVERSIONS=y
CONFIG_BLK_DEV_THROTTLING=y CONFIG_BLK_DEV_THROTTLING=y
CONFIG_PARTITION_ADVANCED=y CONFIG_PARTITION_ADVANCED=y
CONFIG_BSD_DISKLABEL=y
CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_IOSCHED_BFQ=y CONFIG_IOSCHED_BFQ=y
CONFIG_BFQ_GROUP_IOSCHED=y CONFIG_BFQ_GROUP_IOSCHED=y
CONFIG_BINFMT_MISC=m CONFIG_BINFMT_MISC=m
...@@ -82,8 +87,11 @@ CONFIG_ZSMALLOC=m ...@@ -82,8 +87,11 @@ CONFIG_ZSMALLOC=m
CONFIG_NET=y CONFIG_NET=y
CONFIG_PACKET=y CONFIG_PACKET=y
CONFIG_UNIX=y CONFIG_UNIX=y
CONFIG_TLS=m
CONFIG_TLS_DEVICE=y
CONFIG_XFRM_USER=y CONFIG_XFRM_USER=y
CONFIG_NET_KEY=y CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
CONFIG_INET=y CONFIG_INET=y
CONFIG_IP_MULTICAST=y CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_ADVANCED_ROUTER=y
...@@ -95,6 +103,7 @@ CONFIG_IP_PNP_DHCP=y ...@@ -95,6 +103,7 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_IP_MROUTE=y CONFIG_IP_MROUTE=y
CONFIG_INET_ESP=m CONFIG_INET_ESP=m
CONFIG_INET_UDP_DIAG=y CONFIG_INET_UDP_DIAG=y
...@@ -102,6 +111,7 @@ CONFIG_TCP_CONG_ADVANCED=y ...@@ -102,6 +111,7 @@ CONFIG_TCP_CONG_ADVANCED=y
CONFIG_TCP_CONG_BBR=m CONFIG_TCP_CONG_BBR=m
CONFIG_IPV6_ROUTER_PREF=y CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y CONFIG_IPV6_ROUTE_INFO=y
CONFIG_INET6_ESP=m
CONFIG_IPV6_MROUTE=y CONFIG_IPV6_MROUTE=y
CONFIG_NETWORK_PHY_TIMESTAMPING=y CONFIG_NETWORK_PHY_TIMESTAMPING=y
CONFIG_NETFILTER=y CONFIG_NETFILTER=y
...@@ -112,10 +122,11 @@ CONFIG_NF_LOG_NETDEV=m ...@@ -112,10 +122,11 @@ CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_NETBIOS_NS=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m
CONFIG_NF_CONNTRACK_SNMP=m
CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m CONFIG_NF_CT_NETLINK=m
CONFIG_NF_TABLES=m CONFIG_NF_TABLES=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_CONNLIMIT=m CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m CONFIG_NFT_LIMIT=m
...@@ -200,7 +211,6 @@ CONFIG_NF_TABLES_IPV4=y ...@@ -200,7 +211,6 @@ CONFIG_NF_TABLES_IPV4=y
CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y CONFIG_NF_TABLES_ARP=y
CONFIG_NF_LOG_ARP=m
CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m CONFIG_IP_NF_MATCH_ECN=m
...@@ -254,10 +264,14 @@ CONFIG_BPFILTER=y ...@@ -254,10 +264,14 @@ CONFIG_BPFILTER=y
CONFIG_IP_SCTP=m CONFIG_IP_SCTP=m
CONFIG_RDS=y CONFIG_RDS=y
CONFIG_L2TP=m CONFIG_L2TP=m
CONFIG_L2TP_V3=y
CONFIG_L2TP_IP=m
CONFIG_L2TP_ETH=m
CONFIG_BRIDGE=m CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y CONFIG_VLAN_8021Q_GVRP=y
CONFIG_VLAN_8021Q_MVRP=y CONFIG_VLAN_8021Q_MVRP=y
CONFIG_LLC2=m
CONFIG_NET_SCHED=y CONFIG_NET_SCHED=y
CONFIG_NET_SCH_HTB=m CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_PRIO=m CONFIG_NET_SCH_PRIO=m
...@@ -282,9 +296,33 @@ CONFIG_VSOCKETS=m ...@@ -282,9 +296,33 @@ CONFIG_VSOCKETS=m
CONFIG_VIRTIO_VSOCKETS=m CONFIG_VIRTIO_VSOCKETS=m
CONFIG_NETLINK_DIAG=y CONFIG_NETLINK_DIAG=y
CONFIG_CGROUP_NET_PRIO=y CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_STREAM_PARSER=y
CONFIG_BT=m CONFIG_BT=m
CONFIG_BT_RFCOMM=m
CONFIG_BT_RFCOMM_TTY=y
CONFIG_BT_BNEP=m
CONFIG_BT_BNEP_MC_FILTER=y
CONFIG_BT_BNEP_PROTO_FILTER=y
CONFIG_BT_HIDP=m
CONFIG_BT_HS=y
CONFIG_BT_HCIBTUSB=m CONFIG_BT_HCIBTUSB=m
# CONFIG_BT_HCIBTUSB_BCM is not set CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y
CONFIG_BT_HCIBTUSB_MTK=y
CONFIG_BT_HCIUART=m
CONFIG_BT_HCIUART_BCSP=y
CONFIG_BT_HCIUART_ATH3K=y
CONFIG_BT_HCIUART_INTEL=y
CONFIG_BT_HCIUART_AG6XX=y
CONFIG_BT_HCIBCM203X=m
CONFIG_BT_HCIBPA10X=m
CONFIG_BT_HCIBFUSB=m
CONFIG_BT_HCIDTL1=m
CONFIG_BT_HCIBT3C=m
CONFIG_BT_HCIBLUECARD=m
CONFIG_BT_HCIVHCI=m
CONFIG_BT_MRVL=m
CONFIG_BT_ATH3K=m
CONFIG_BT_VIRTIO=m
CONFIG_CFG80211=m CONFIG_CFG80211=m
CONFIG_CFG80211_WEXT=y CONFIG_CFG80211_WEXT=y
CONFIG_MAC80211=m CONFIG_MAC80211=m
...@@ -329,7 +367,6 @@ CONFIG_PARPORT_PC_FIFO=y ...@@ -329,7 +367,6 @@ CONFIG_PARPORT_PC_FIFO=y
CONFIG_ZRAM=m CONFIG_ZRAM=m
CONFIG_ZRAM_DEF_COMP_ZSTD=y CONFIG_ZRAM_DEF_COMP_ZSTD=y
CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=y
CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_BLK_DEV_RAM_SIZE=8192
...@@ -486,6 +523,7 @@ CONFIG_PPP_FILTER=y ...@@ -486,6 +523,7 @@ CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m CONFIG_PPP_MPPE=m
CONFIG_PPP_MULTILINK=y CONFIG_PPP_MULTILINK=y
CONFIG_PPPOE=m CONFIG_PPPOE=m
CONFIG_PPTP=m
CONFIG_PPPOL2TP=m CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m CONFIG_PPP_SYNC_TTY=m
...@@ -505,7 +543,6 @@ CONFIG_ATH9K_HTC=m ...@@ -505,7 +543,6 @@ CONFIG_ATH9K_HTC=m
CONFIG_IWLWIFI=m CONFIG_IWLWIFI=m
CONFIG_IWLDVM=m CONFIG_IWLDVM=m
CONFIG_IWLMVM=m CONFIG_IWLMVM=m
CONFIG_IWLWIFI_BCAST_FILTERING=y
CONFIG_HOSTAP=m CONFIG_HOSTAP=m
CONFIG_MT7601U=m CONFIG_MT7601U=m
CONFIG_RT2X00=m CONFIG_RT2X00=m
...@@ -521,6 +558,14 @@ CONFIG_RTL8821AE=m ...@@ -521,6 +558,14 @@ CONFIG_RTL8821AE=m
CONFIG_RTL8192CU=m CONFIG_RTL8192CU=m
# CONFIG_RTLWIFI_DEBUG is not set # CONFIG_RTLWIFI_DEBUG is not set
CONFIG_RTL8XXXU=m CONFIG_RTL8XXXU=m
CONFIG_RTW88=m
CONFIG_RTW88_8822BE=m
CONFIG_RTW88_8822CE=m
CONFIG_RTW88_8723DE=m
CONFIG_RTW88_8821CE=m
CONFIG_RTW89=m
CONFIG_RTW89_8852AE=m
CONFIG_RTW89_8852CE=m
CONFIG_ZD1211RW=m CONFIG_ZD1211RW=m
CONFIG_USB_NET_RNDIS_WLAN=m CONFIG_USB_NET_RNDIS_WLAN=m
CONFIG_INPUT_MOUSEDEV=y CONFIG_INPUT_MOUSEDEV=y
...@@ -651,6 +696,11 @@ CONFIG_USB_SERIAL_FTDI_SIO=m ...@@ -651,6 +696,11 @@ CONFIG_USB_SERIAL_FTDI_SIO=m
CONFIG_USB_SERIAL_PL2303=m CONFIG_USB_SERIAL_PL2303=m
CONFIG_USB_SERIAL_OPTION=m CONFIG_USB_SERIAL_OPTION=m
CONFIG_USB_GADGET=y CONFIG_USB_GADGET=y
CONFIG_TYPEC=m
CONFIG_TYPEC_TCPM=m
CONFIG_TYPEC_TCPCI=m
CONFIG_TYPEC_UCSI=m
CONFIG_UCSI_ACPI=m
CONFIG_INFINIBAND=m CONFIG_INFINIBAND=m
CONFIG_RTC_CLASS=y CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_EFI=y CONFIG_RTC_DRV_EFI=y
...@@ -688,7 +738,6 @@ CONFIG_COMEDI_NI_PCIDIO=m ...@@ -688,7 +738,6 @@ CONFIG_COMEDI_NI_PCIDIO=m
CONFIG_COMEDI_NI_PCIMIO=m CONFIG_COMEDI_NI_PCIMIO=m
CONFIG_STAGING=y CONFIG_STAGING=y
CONFIG_R8188EU=m CONFIG_R8188EU=m
# CONFIG_88EU_AP_MODE is not set
CONFIG_PM_DEVFREQ=y CONFIG_PM_DEVFREQ=y
CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
CONFIG_DEVFREQ_GOV_PERFORMANCE=y CONFIG_DEVFREQ_GOV_PERFORMANCE=y
...@@ -772,14 +821,12 @@ CONFIG_CRYPTO_CRYPTD=m ...@@ -772,14 +821,12 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TEA=m
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
generic-y += dma-contiguous.h generic-y += dma-contiguous.h
generic-y += export.h generic-y += export.h
generic-y += mcs_spinlock.h
generic-y += parport.h generic-y += parport.h
generic-y += early_ioremap.h generic-y += early_ioremap.h
generic-y += qrwlock.h generic-y += qrwlock.h
generic-y += qrwlock_types.h generic-y += qspinlock.h
generic-y += spinlock.h
generic-y += spinlock_types.h
generic-y += rwsem.h generic-y += rwsem.h
generic-y += segment.h generic-y += segment.h
generic-y += user.h generic-y += user.h
......
...@@ -40,4 +40,9 @@ extern unsigned long fw_arg0, fw_arg1, fw_arg2; ...@@ -40,4 +40,9 @@ extern unsigned long fw_arg0, fw_arg1, fw_arg2;
extern struct loongson_board_info b_info; extern struct loongson_board_info b_info;
extern struct loongson_system_configuration loongson_sysconf; extern struct loongson_system_configuration loongson_sysconf;
static inline bool io_master(int cpu)
{
return test_bit(cpu, &loongson_sysconf.cores_io_master);
}
#endif /* _ASM_BOOTINFO_H */ #endif /* _ASM_BOOTINFO_H */
...@@ -2,21 +2,59 @@ ...@@ -2,21 +2,59 @@
#ifndef __ASM_BUG_H #ifndef __ASM_BUG_H
#define __ASM_BUG_H #define __ASM_BUG_H
#include <linux/compiler.h> #include <asm/break.h>
#include <linux/stringify.h>
#ifndef CONFIG_DEBUG_BUGVERBOSE
#define _BUGVERBOSE_LOCATION(file, line)
#else
#define __BUGVERBOSE_LOCATION(file, line) \
.pushsection .rodata.str, "aMS", @progbits, 1; \
10002: .string file; \
.popsection; \
\
.long 10002b - .; \
.short line;
#define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line)
#endif
#ifdef CONFIG_BUG #ifndef CONFIG_GENERIC_BUG
#define __BUG_ENTRY(flags)
#else
#define __BUG_ENTRY(flags) \
.pushsection __bug_table, "aw"; \
.align 2; \
10000: .long 10001f - .; \
_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
.short flags; \
.popsection; \
10001:
#endif
#include <asm/break.h> #define ASM_BUG_FLAGS(flags) \
__BUG_ENTRY(flags) \
break BRK_BUG
static inline void __noreturn BUG(void) #define ASM_BUG() ASM_BUG_FLAGS(0)
{
__asm__ __volatile__("break %0" : : "i" (BRK_BUG));
unreachable();
}
#define HAVE_ARCH_BUG #define __BUG_FLAGS(flags) \
asm_inline volatile (__stringify(ASM_BUG_FLAGS(flags)));
#endif #define __WARN_FLAGS(flags) \
do { \
instrumentation_begin(); \
__BUG_FLAGS(BUGFLAG_WARNING|(flags)); \
instrumentation_end(); \
} while (0)
#define BUG() \
do { \
instrumentation_begin(); \
__BUG_FLAGS(0); \
unreachable(); \
} while (0)
#define HAVE_ARCH_BUG
#include <asm-generic/bug.h> #include <asm-generic/bug.h>
......
...@@ -6,10 +6,33 @@ ...@@ -6,10 +6,33 @@
#define _ASM_CACHEFLUSH_H #define _ASM_CACHEFLUSH_H
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/cpu-features.h> #include <asm/cpu-info.h>
#include <asm/cacheops.h> #include <asm/cacheops.h>
extern void local_flush_icache_range(unsigned long start, unsigned long end); static inline bool cache_present(struct cache_desc *cdesc)
{
return cdesc->flags & CACHE_PRESENT;
}
static inline bool cache_private(struct cache_desc *cdesc)
{
return cdesc->flags & CACHE_PRIVATE;
}
static inline bool cache_inclusive(struct cache_desc *cdesc)
{
return cdesc->flags & CACHE_INCLUSIVE;
}
static inline unsigned int cpu_last_level_cache_line_size(void)
{
int cache_present = boot_cpu_data.cache_leaves_present;
return boot_cpu_data.cache_leaves[cache_present - 1].linesz;
}
asmlinkage void __flush_cache_all(void);
void local_flush_icache_range(unsigned long start, unsigned long end);
#define flush_icache_range local_flush_icache_range #define flush_icache_range local_flush_icache_range
#define flush_icache_user_range local_flush_icache_range #define flush_icache_user_range local_flush_icache_range
...@@ -35,44 +58,30 @@ extern void local_flush_icache_range(unsigned long start, unsigned long end); ...@@ -35,44 +58,30 @@ extern void local_flush_icache_range(unsigned long start, unsigned long end);
: \ : \
: "i" (op), "ZC" (*(unsigned char *)(addr))) : "i" (op), "ZC" (*(unsigned char *)(addr)))
static inline void flush_icache_line_indexed(unsigned long addr) static inline void flush_cache_line(int leaf, unsigned long addr)
{
cache_op(Index_Invalidate_I, addr);
}
static inline void flush_dcache_line_indexed(unsigned long addr)
{
cache_op(Index_Writeback_Inv_D, addr);
}
static inline void flush_vcache_line_indexed(unsigned long addr)
{
cache_op(Index_Writeback_Inv_V, addr);
}
static inline void flush_scache_line_indexed(unsigned long addr)
{
cache_op(Index_Writeback_Inv_S, addr);
}
static inline void flush_icache_line(unsigned long addr)
{
cache_op(Hit_Invalidate_I, addr);
}
static inline void flush_dcache_line(unsigned long addr)
{
cache_op(Hit_Writeback_Inv_D, addr);
}
static inline void flush_vcache_line(unsigned long addr)
{
cache_op(Hit_Writeback_Inv_V, addr);
}
static inline void flush_scache_line(unsigned long addr)
{ {
cache_op(Hit_Writeback_Inv_S, addr); switch (leaf) {
case Cache_LEAF0:
cache_op(Index_Writeback_Inv_LEAF0, addr);
break;
case Cache_LEAF1:
cache_op(Index_Writeback_Inv_LEAF1, addr);
break;
case Cache_LEAF2:
cache_op(Index_Writeback_Inv_LEAF2, addr);
break;
case Cache_LEAF3:
cache_op(Index_Writeback_Inv_LEAF3, addr);
break;
case Cache_LEAF4:
cache_op(Index_Writeback_Inv_LEAF4, addr);
break;
case Cache_LEAF5:
cache_op(Index_Writeback_Inv_LEAF5, addr);
break;
default:
break;
}
} }
#include <asm-generic/cacheflush.h> #include <asm-generic/cacheflush.h>
......
...@@ -8,16 +8,18 @@ ...@@ -8,16 +8,18 @@
#define __ASM_CACHEOPS_H #define __ASM_CACHEOPS_H
/* /*
* Most cache ops are split into a 2 bit field identifying the cache, and a 3 * Most cache ops are split into a 3 bit field identifying the cache, and a 2
* bit field identifying the cache operation. * bit field identifying the cache operation.
*/ */
#define CacheOp_Cache 0x03 #define CacheOp_Cache 0x07
#define CacheOp_Op 0x1c #define CacheOp_Op 0x18
#define Cache_I 0x00 #define Cache_LEAF0 0x00
#define Cache_D 0x01 #define Cache_LEAF1 0x01
#define Cache_V 0x02 #define Cache_LEAF2 0x02
#define Cache_S 0x03 #define Cache_LEAF3 0x03
#define Cache_LEAF4 0x04
#define Cache_LEAF5 0x05
#define Index_Invalidate 0x08 #define Index_Invalidate 0x08
#define Index_Writeback_Inv 0x08 #define Index_Writeback_Inv 0x08
...@@ -25,13 +27,17 @@ ...@@ -25,13 +27,17 @@
#define Hit_Writeback_Inv 0x10 #define Hit_Writeback_Inv 0x10
#define CacheOp_User_Defined 0x18 #define CacheOp_User_Defined 0x18
#define Index_Invalidate_I (Cache_I | Index_Invalidate) #define Index_Writeback_Inv_LEAF0 (Cache_LEAF0 | Index_Writeback_Inv)
#define Index_Writeback_Inv_D (Cache_D | Index_Writeback_Inv) #define Index_Writeback_Inv_LEAF1 (Cache_LEAF1 | Index_Writeback_Inv)
#define Index_Writeback_Inv_V (Cache_V | Index_Writeback_Inv) #define Index_Writeback_Inv_LEAF2 (Cache_LEAF2 | Index_Writeback_Inv)
#define Index_Writeback_Inv_S (Cache_S | Index_Writeback_Inv) #define Index_Writeback_Inv_LEAF3 (Cache_LEAF3 | Index_Writeback_Inv)
#define Hit_Invalidate_I (Cache_I | Hit_Invalidate) #define Index_Writeback_Inv_LEAF4 (Cache_LEAF4 | Index_Writeback_Inv)
#define Hit_Writeback_Inv_D (Cache_D | Hit_Writeback_Inv) #define Index_Writeback_Inv_LEAF5 (Cache_LEAF5 | Index_Writeback_Inv)
#define Hit_Writeback_Inv_V (Cache_V | Hit_Writeback_Inv) #define Hit_Writeback_Inv_LEAF0 (Cache_LEAF0 | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_S (Cache_S | Hit_Writeback_Inv) #define Hit_Writeback_Inv_LEAF1 (Cache_LEAF1 | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_LEAF2 (Cache_LEAF2 | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_LEAF3 (Cache_LEAF3 | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_LEAF4 (Cache_LEAF4 | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_LEAF5 (Cache_LEAF5 | Hit_Writeback_Inv)
#endif /* __ASM_CACHEOPS_H */ #endif /* __ASM_CACHEOPS_H */
...@@ -61,8 +61,8 @@ static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val, ...@@ -61,8 +61,8 @@ static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val,
return (old32 & mask) >> shift; return (old32 & mask) >> shift;
} }
static inline unsigned long __xchg(volatile void *ptr, unsigned long x, static __always_inline unsigned long
int size) __xchg(volatile void *ptr, unsigned long x, int size)
{ {
switch (size) { switch (size) {
case 1: case 1:
...@@ -159,8 +159,8 @@ static inline unsigned int __cmpxchg_small(volatile void *ptr, unsigned int old, ...@@ -159,8 +159,8 @@ static inline unsigned int __cmpxchg_small(volatile void *ptr, unsigned int old,
return (old32 & mask) >> shift; return (old32 & mask) >> shift;
} }
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, static __always_inline unsigned long
unsigned long new, unsigned int size) __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size)
{ {
switch (size) { switch (size) {
case 1: case 1:
......
...@@ -19,11 +19,6 @@ ...@@ -19,11 +19,6 @@
#define cpu_has_loongarch32 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_32BIT) #define cpu_has_loongarch32 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_32BIT)
#define cpu_has_loongarch64 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT) #define cpu_has_loongarch64 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
#define cpu_icache_line_size() cpu_data[0].icache.linesz
#define cpu_dcache_line_size() cpu_data[0].dcache.linesz
#define cpu_vcache_line_size() cpu_data[0].vcache.linesz
#define cpu_scache_line_size() cpu_data[0].scache.linesz
#ifdef CONFIG_32BIT #ifdef CONFIG_32BIT
# define cpu_has_64bits (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT) # define cpu_has_64bits (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
# define cpu_vabits 31 # define cpu_vabits 31
......
...@@ -10,18 +10,28 @@ ...@@ -10,18 +10,28 @@
#include <asm/loongarch.h> #include <asm/loongarch.h>
/* cache_desc->flags */
enum {
CACHE_PRESENT = (1 << 0),
CACHE_PRIVATE = (1 << 1), /* core private cache */
CACHE_INCLUSIVE = (1 << 2), /* include the inner level caches */
};
/* /*
* Descriptor for a cache * Descriptor for a cache
*/ */
struct cache_desc { struct cache_desc {
unsigned int waysize; /* Bytes per way */ unsigned char type;
unsigned char level;
unsigned short sets; /* Number of lines per set */ unsigned short sets; /* Number of lines per set */
unsigned char ways; /* Number of ways */ unsigned char ways; /* Number of ways */
unsigned char linesz; /* Size of line in bytes */ unsigned char linesz; /* Size of line in bytes */
unsigned char waybit; /* Bits to select in a cache set */
unsigned char flags; /* Flags describing cache properties */ unsigned char flags; /* Flags describing cache properties */
}; };
#define CACHE_LEVEL_MAX 3
#define CACHE_LEAVES_MAX 6
struct cpuinfo_loongarch { struct cpuinfo_loongarch {
u64 asid_cache; u64 asid_cache;
unsigned long asid_mask; unsigned long asid_mask;
...@@ -40,11 +50,8 @@ struct cpuinfo_loongarch { ...@@ -40,11 +50,8 @@ struct cpuinfo_loongarch {
int tlbsizemtlb; int tlbsizemtlb;
int tlbsizestlbsets; int tlbsizestlbsets;
int tlbsizestlbways; int tlbsizestlbways;
struct cache_desc icache; /* Primary I-cache */ int cache_leaves_present; /* number of cache_leaves[] elements */
struct cache_desc dcache; /* Primary D or combined I/D cache */ struct cache_desc cache_leaves[CACHE_LEAVES_MAX];
struct cache_desc vcache; /* Victim cache, between pcache and scache */
struct cache_desc scache; /* Secondary cache */
struct cache_desc tcache; /* Tertiary/split secondary cache */
int core; /* physical core number in package */ int core; /* physical core number in package */
int package;/* physical package number */ int package;/* physical package number */
int vabits; /* Virtual Address size in bits */ int vabits; /* Virtual Address size in bits */
......
...@@ -74,6 +74,43 @@ ...@@ -74,6 +74,43 @@
#define R_LARCH_SUB64 56 #define R_LARCH_SUB64 56
#define R_LARCH_GNU_VTINHERIT 57 #define R_LARCH_GNU_VTINHERIT 57
#define R_LARCH_GNU_VTENTRY 58 #define R_LARCH_GNU_VTENTRY 58
#define R_LARCH_B16 64
#define R_LARCH_B21 65
#define R_LARCH_B26 66
#define R_LARCH_ABS_HI20 67
#define R_LARCH_ABS_LO12 68
#define R_LARCH_ABS64_LO20 69
#define R_LARCH_ABS64_HI12 70
#define R_LARCH_PCALA_HI20 71
#define R_LARCH_PCALA_LO12 72
#define R_LARCH_PCALA64_LO20 73
#define R_LARCH_PCALA64_HI12 74
#define R_LARCH_GOT_PC_HI20 75
#define R_LARCH_GOT_PC_LO12 76
#define R_LARCH_GOT64_PC_LO20 77
#define R_LARCH_GOT64_PC_HI12 78
#define R_LARCH_GOT_HI20 79
#define R_LARCH_GOT_LO12 80
#define R_LARCH_GOT64_LO20 81
#define R_LARCH_GOT64_HI12 82
#define R_LARCH_TLS_LE_HI20 83
#define R_LARCH_TLS_LE_LO12 84
#define R_LARCH_TLS_LE64_LO20 85
#define R_LARCH_TLS_LE64_HI12 86
#define R_LARCH_TLS_IE_PC_HI20 87
#define R_LARCH_TLS_IE_PC_LO12 88
#define R_LARCH_TLS_IE64_PC_LO20 89
#define R_LARCH_TLS_IE64_PC_HI12 90
#define R_LARCH_TLS_IE_HI20 91
#define R_LARCH_TLS_IE_LO12 92
#define R_LARCH_TLS_IE64_LO20 93
#define R_LARCH_TLS_IE64_HI12 94
#define R_LARCH_TLS_LD_PC_HI20 95
#define R_LARCH_TLS_LD_HI20 96
#define R_LARCH_TLS_GD_PC_HI20 97
#define R_LARCH_TLS_GD_HI20 98
#define R_LARCH_32_PCREL 99
#define R_LARCH_RELAX 100
#ifndef ELF_ARCH #ifndef ELF_ARCH
......
...@@ -10,4 +10,19 @@ ...@@ -10,4 +10,19 @@
#define NR_FIX_BTMAPS 64 #define NR_FIX_BTMAPS 64
enum fixed_addresses {
FIX_HOLE,
FIX_EARLYCON_MEM_BASE,
__end_of_fixed_addresses
};
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
#define FIXMAP_PAGE_IO PAGE_KERNEL_SUC
extern void __set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags);
#include <asm-generic/fixmap.h>
#endif #endif
This diff is collapsed.
...@@ -27,71 +27,38 @@ extern void __init early_iounmap(void __iomem *addr, unsigned long size); ...@@ -27,71 +27,38 @@ extern void __init early_iounmap(void __iomem *addr, unsigned long size);
#define early_memremap early_ioremap #define early_memremap early_ioremap
#define early_memunmap early_iounmap #define early_memunmap early_iounmap
#ifdef CONFIG_ARCH_IOREMAP
static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
unsigned long prot_val) unsigned long prot_val)
{ {
if (prot_val == _CACHE_CC) if (prot_val & _CACHE_CC)
return (void __iomem *)(unsigned long)(CACHE_BASE + offset); return (void __iomem *)(unsigned long)(CACHE_BASE + offset);
else else
return (void __iomem *)(unsigned long)(UNCACHE_BASE + offset); return (void __iomem *)(unsigned long)(UNCACHE_BASE + offset);
} }
/* #define ioremap(offset, size) \
* ioremap - map bus memory into CPU space ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_SUC))
* @offset: bus address of the memory
* @size: size of the resource to map
*
* ioremap performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
* address.
*/
#define ioremap(offset, size) \
ioremap_prot((offset), (size), _CACHE_SUC)
/* #define iounmap(addr) ((void)(addr))
* ioremap_wc - map bus memory into CPU space
* @offset: bus address of the memory #endif
* @size: size of the resource to map
*
* ioremap_wc performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
* address.
*
* This version of ioremap ensures that the memory is marked uncachable
* but accelerated by means of write-combining feature. It is specifically
* useful for PCIe prefetchable windows, which may vastly improve a
* communications performance. If it was determined on boot stage, what
* CPU CCA doesn't support WUC, the method shall fall-back to the
* _CACHE_SUC option (see cpu_probe() method).
*/
#define ioremap_wc(offset, size) \
ioremap_prot((offset), (size), _CACHE_WUC)
/* /*
* ioremap_cache - map bus memory into CPU space * On LoongArch, ioremap() has two variants, ioremap_wc() and ioremap_cache().
* @offset: bus address of the memory * They map bus memory into CPU space, the mapped memory is marked uncachable
* @size: size of the resource to map * (_CACHE_SUC), uncachable but accelerated by write-combine (_CACHE_WUC) and
* cachable (_CACHE_CC) respectively for CPU access.
* *
* ioremap_cache performs a platform specific sequence of operations to * @offset: bus address of the memory
* make bus memory CPU accessible via the readb/readw/readl/writeb/ * @size: size of the resource to map
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
* address.
*
* This version of ioremap ensures that the memory is marked cachable by
* the CPU. Also enables full write-combining. Useful for some
* memory-like regions on I/O busses.
*/ */
#define ioremap_cache(offset, size) \ #define ioremap_wc(offset, size) \
ioremap_prot((offset), (size), _CACHE_CC) ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_WUC))
static inline void iounmap(const volatile void __iomem *addr) #define ioremap_cache(offset, size) \
{ ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL))
}
#define mmiowb() asm volatile ("dbar 0" ::: "memory") #define mmiowb() asm volatile ("dbar 0" ::: "memory")
...@@ -107,4 +74,8 @@ extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t ...@@ -107,4 +74,8 @@ extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t
#include <asm-generic/io.h> #include <asm-generic/io.h>
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
#endif /* _ASM_IO_H */ #endif /* _ASM_IO_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* kexec.h for kexec
*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_KEXEC_H
#define _ASM_KEXEC_H
#include <asm/stacktrace.h>
#include <asm/page.h>
/* Maximum physical address we can use pages from */
#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
/* Maximum address we can reach in physical address mode */
#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
/* Maximum address we can use for the control code buffer */
#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
/* Reserve a page for the control code buffer */
#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_LOONGARCH
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs)
{
if (oldregs)
memcpy(newregs, oldregs, sizeof(*newregs));
else
prepare_frametrace(newregs);
}
#define ARCH_HAS_KIMAGE_ARCH
struct kimage_arch {
unsigned long efi_boot;
unsigned long cmdline_ptr;
unsigned long systable_ptr;
};
typedef void (*do_kexec_t)(unsigned long efi_boot,
unsigned long cmdline_ptr,
unsigned long systable_ptr,
unsigned long start_addr,
unsigned long first_ind_entry);
struct kimage;
extern const unsigned char relocate_new_kernel[];
extern const size_t relocate_new_kernel_size;
extern void kexec_reboot(void);
#ifdef CONFIG_SMP
extern atomic_t kexec_ready_to_reboot;
extern const unsigned char kexec_smp_wait[];
#endif
#endif /* !_ASM_KEXEC_H */
...@@ -187,36 +187,15 @@ static inline u32 read_cpucfg(u32 reg) ...@@ -187,36 +187,15 @@ static inline u32 read_cpucfg(u32 reg)
#define CPUCFG16_L3_DINCL BIT(16) #define CPUCFG16_L3_DINCL BIT(16)
#define LOONGARCH_CPUCFG17 0x11 #define LOONGARCH_CPUCFG17 0x11
#define CPUCFG17_L1I_WAYS_M GENMASK(15, 0)
#define CPUCFG17_L1I_SETS_M GENMASK(23, 16)
#define CPUCFG17_L1I_SIZE_M GENMASK(30, 24)
#define CPUCFG17_L1I_WAYS 0
#define CPUCFG17_L1I_SETS 16
#define CPUCFG17_L1I_SIZE 24
#define LOONGARCH_CPUCFG18 0x12 #define LOONGARCH_CPUCFG18 0x12
#define CPUCFG18_L1D_WAYS_M GENMASK(15, 0)
#define CPUCFG18_L1D_SETS_M GENMASK(23, 16)
#define CPUCFG18_L1D_SIZE_M GENMASK(30, 24)
#define CPUCFG18_L1D_WAYS 0
#define CPUCFG18_L1D_SETS 16
#define CPUCFG18_L1D_SIZE 24
#define LOONGARCH_CPUCFG19 0x13 #define LOONGARCH_CPUCFG19 0x13
#define CPUCFG19_L2_WAYS_M GENMASK(15, 0)
#define CPUCFG19_L2_SETS_M GENMASK(23, 16)
#define CPUCFG19_L2_SIZE_M GENMASK(30, 24)
#define CPUCFG19_L2_WAYS 0
#define CPUCFG19_L2_SETS 16
#define CPUCFG19_L2_SIZE 24
#define LOONGARCH_CPUCFG20 0x14 #define LOONGARCH_CPUCFG20 0x14
#define CPUCFG20_L3_WAYS_M GENMASK(15, 0) #define CPUCFG_CACHE_WAYS_M GENMASK(15, 0)
#define CPUCFG20_L3_SETS_M GENMASK(23, 16) #define CPUCFG_CACHE_SETS_M GENMASK(23, 16)
#define CPUCFG20_L3_SIZE_M GENMASK(30, 24) #define CPUCFG_CACHE_LSIZE_M GENMASK(30, 24)
#define CPUCFG20_L3_WAYS 0 #define CPUCFG_CACHE_WAYS 0
#define CPUCFG20_L3_SETS 16 #define CPUCFG_CACHE_SETS 16
#define CPUCFG20_L3_SIZE 24 #define CPUCFG_CACHE_LSIZE 24
#define LOONGARCH_CPUCFG48 0x30 #define LOONGARCH_CPUCFG48 0x30
#define CPUCFG48_MCSR_LCK BIT(0) #define CPUCFG48_MCSR_LCK BIT(0)
......
...@@ -17,10 +17,15 @@ struct mod_section { ...@@ -17,10 +17,15 @@ struct mod_section {
}; };
struct mod_arch_specific { struct mod_arch_specific {
struct mod_section got;
struct mod_section plt; struct mod_section plt;
struct mod_section plt_idx; struct mod_section plt_idx;
}; };
struct got_entry {
Elf_Addr symbol_addr;
};
struct plt_entry { struct plt_entry {
u32 inst_lu12iw; u32 inst_lu12iw;
u32 inst_lu32id; u32 inst_lu32id;
...@@ -29,10 +34,16 @@ struct plt_entry { ...@@ -29,10 +34,16 @@ struct plt_entry {
}; };
struct plt_idx_entry { struct plt_idx_entry {
unsigned long symbol_addr; Elf_Addr symbol_addr;
}; };
Elf_Addr module_emit_plt_entry(struct module *mod, unsigned long val); Elf_Addr module_emit_got_entry(struct module *mod, Elf_Addr val);
Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Addr val);
static inline struct got_entry emit_got_entry(Elf_Addr val)
{
return (struct got_entry) { val };
}
static inline struct plt_entry emit_plt_entry(unsigned long val) static inline struct plt_entry emit_plt_entry(unsigned long val)
{ {
...@@ -77,4 +88,16 @@ static inline struct plt_entry *get_plt_entry(unsigned long val, ...@@ -77,4 +88,16 @@ static inline struct plt_entry *get_plt_entry(unsigned long val,
return plt + plt_idx; return plt + plt_idx;
} }
static inline struct got_entry *get_got_entry(Elf_Addr val,
const struct mod_section *sec)
{
struct got_entry *got = (struct got_entry *)sec->shdr->sh_addr;
int i;
for (i = 0; i < sec->num_entries; i++)
if (got[i].symbol_addr == val)
return &got[i];
return NULL;
}
#endif /* _ASM_MODULE_H */ #endif /* _ASM_MODULE_H */
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
/* Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ /* Copyright (C) 2020-2022 Loongson Technology Corporation Limited */
SECTIONS { SECTIONS {
. = ALIGN(4); . = ALIGN(4);
.got : { BYTE(0) }
.plt : { BYTE(0) } .plt : { BYTE(0) }
.plt.idx : { BYTE(0) } .plt.idx : { BYTE(0) }
} }
...@@ -8,6 +8,15 @@ ...@@ -8,6 +8,15 @@
#include <asm/cmpxchg.h> #include <asm/cmpxchg.h>
#include <asm/loongarch.h> #include <asm/loongarch.h>
/*
* The "address" (in fact, offset from $r21) of a per-CPU variable is close to
* the loading address of main kernel image, but far from where the modules are
* loaded. Tell the compiler this fact when using explicit relocs.
*/
#if defined(MODULE) && defined(CONFIG_AS_HAS_EXPLICIT_RELOCS)
#define PER_CPU_ATTRIBUTES __attribute__((model("extreme")))
#endif
/* Use r21 for fast access */ /* Use r21 for fast access */
register unsigned long __my_cpu_offset __asm__("$r21"); register unsigned long __my_cpu_offset __asm__("$r21");
......
...@@ -6,5 +6,7 @@ ...@@ -6,5 +6,7 @@
#ifndef __LOONGARCH_PERF_EVENT_H__ #ifndef __LOONGARCH_PERF_EVENT_H__
#define __LOONGARCH_PERF_EVENT_H__ #define __LOONGARCH_PERF_EVENT_H__
/* Nothing to show here; the file is required by linux/perf_event.h. */
#define perf_arch_bpf_user_pt_regs(regs) (struct user_pt_regs *)regs
#endif /* __LOONGARCH_PERF_EVENT_H__ */ #endif /* __LOONGARCH_PERF_EVENT_H__ */
...@@ -83,8 +83,11 @@ ...@@ -83,8 +83,11 @@
_PAGE_GLOBAL | _PAGE_KERN | _CACHE_SUC) _PAGE_GLOBAL | _PAGE_KERN | _CACHE_SUC)
#define PAGE_KERNEL_WUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ #define PAGE_KERNEL_WUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
_PAGE_GLOBAL | _PAGE_KERN | _CACHE_WUC) _PAGE_GLOBAL | _PAGE_KERN | _CACHE_WUC)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL_SUC)
#define pgprot_noncached pgprot_noncached #define pgprot_noncached pgprot_noncached
static inline pgprot_t pgprot_noncached(pgprot_t _prot) static inline pgprot_t pgprot_noncached(pgprot_t _prot)
......
...@@ -13,7 +13,9 @@ ...@@ -13,7 +13,9 @@
extern unsigned long eentry; extern unsigned long eentry;
extern unsigned long tlbrentry; extern unsigned long tlbrentry;
extern void tlb_init(int cpu);
extern void cpu_cache_init(void); extern void cpu_cache_init(void);
extern void cache_error_setup(void);
extern void per_cpu_trap_init(int cpu); extern void per_cpu_trap_init(int cpu);
extern void set_handler(unsigned long offset, void *addr, unsigned long len); extern void set_handler(unsigned long offset, void *addr, unsigned long len);
extern void set_merr_handler(unsigned long offset, void *addr, unsigned long len); extern void set_merr_handler(unsigned long offset, void *addr, unsigned long len);
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_SPINLOCK_H
#define _ASM_SPINLOCK_H
#include <asm/processor.h>
#include <asm/qspinlock.h>
#include <asm/qrwlock.h>
#endif /* _ASM_SPINLOCK_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_SPINLOCK_TYPES_H
#define _ASM_SPINLOCK_TYPES_H
#include <asm-generic/qspinlock_types.h>
#include <asm-generic/qrwlock_types.h>
#endif
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
#define _UAPI__ASM_BPF_PERF_EVENT_H__
#include <linux/ptrace.h>
typedef struct user_pt_regs bpf_user_pt_regs_t;
#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_LOONGARCH_PERF_REGS_H
#define _ASM_LOONGARCH_PERF_REGS_H
enum perf_event_loongarch_regs {
PERF_REG_LOONGARCH_PC,
PERF_REG_LOONGARCH_R1,
PERF_REG_LOONGARCH_R2,
PERF_REG_LOONGARCH_R3,
PERF_REG_LOONGARCH_R4,
PERF_REG_LOONGARCH_R5,
PERF_REG_LOONGARCH_R6,
PERF_REG_LOONGARCH_R7,
PERF_REG_LOONGARCH_R8,
PERF_REG_LOONGARCH_R9,
PERF_REG_LOONGARCH_R10,
PERF_REG_LOONGARCH_R11,
PERF_REG_LOONGARCH_R12,
PERF_REG_LOONGARCH_R13,
PERF_REG_LOONGARCH_R14,
PERF_REG_LOONGARCH_R15,
PERF_REG_LOONGARCH_R16,
PERF_REG_LOONGARCH_R17,
PERF_REG_LOONGARCH_R18,
PERF_REG_LOONGARCH_R19,
PERF_REG_LOONGARCH_R20,
PERF_REG_LOONGARCH_R21,
PERF_REG_LOONGARCH_R22,
PERF_REG_LOONGARCH_R23,
PERF_REG_LOONGARCH_R24,
PERF_REG_LOONGARCH_R25,
PERF_REG_LOONGARCH_R26,
PERF_REG_LOONGARCH_R27,
PERF_REG_LOONGARCH_R28,
PERF_REG_LOONGARCH_R29,
PERF_REG_LOONGARCH_R30,
PERF_REG_LOONGARCH_R31,
PERF_REG_LOONGARCH_MAX,
};
#endif /* _ASM_LOONGARCH_PERF_REGS_H */
...@@ -23,7 +23,14 @@ obj-$(CONFIG_SMP) += smp.o ...@@ -23,7 +23,14 @@ obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o
CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS) CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
...@@ -5,73 +5,34 @@ ...@@ -5,73 +5,34 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/ */
#include <linux/cacheinfo.h> #include <linux/cacheinfo.h>
#include <linux/topology.h>
#include <asm/bootinfo.h> #include <asm/bootinfo.h>
#include <asm/cpu-info.h> #include <asm/cpu-info.h>
/* Populates leaf and increments to next leaf */
#define populate_cache(cache, leaf, c_level, c_type) \
do { \
leaf->type = c_type; \
leaf->level = c_level; \
leaf->coherency_line_size = c->cache.linesz; \
leaf->number_of_sets = c->cache.sets; \
leaf->ways_of_associativity = c->cache.ways; \
leaf->size = c->cache.linesz * c->cache.sets * \
c->cache.ways; \
if (leaf->level > 2) \
leaf->size *= nodes_per_package; \
leaf++; \
} while (0)
int init_cache_level(unsigned int cpu) int init_cache_level(unsigned int cpu)
{ {
struct cpuinfo_loongarch *c = &current_cpu_data; int cache_present = current_cpu_data.cache_leaves_present;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
int levels = 0, leaves = 0;
/*
* If Dcache is not set, we assume the cache structures
* are not properly initialized.
*/
if (c->dcache.waysize)
levels += 1;
else
return -ENOENT;
leaves += (c->icache.waysize) ? 2 : 1;
if (c->vcache.waysize) {
levels++;
leaves++;
}
if (c->scache.waysize) { this_cpu_ci->num_levels =
levels++; current_cpu_data.cache_leaves[cache_present - 1].level;
leaves++; this_cpu_ci->num_leaves = cache_present;
}
if (c->tcache.waysize) {
levels++;
leaves++;
}
this_cpu_ci->num_levels = levels;
this_cpu_ci->num_leaves = leaves;
return 0; return 0;
} }
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
struct cacheinfo *sib_leaf) struct cacheinfo *sib_leaf)
{ {
return !((this_leaf->level == 1) || (this_leaf->level == 2)); return (!(*(unsigned char *)(this_leaf->priv) & CACHE_PRIVATE)
&& !(*(unsigned char *)(sib_leaf->priv) & CACHE_PRIVATE));
} }
static void cache_cpumap_setup(unsigned int cpu) static void cache_cpumap_setup(unsigned int cpu)
{ {
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf, *sib_leaf;
unsigned int index; unsigned int index;
struct cacheinfo *this_leaf, *sib_leaf;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
for (index = 0; index < this_cpu_ci->num_leaves; index++) { for (index = 0; index < this_cpu_ci->num_leaves; index++) {
unsigned int i; unsigned int i;
...@@ -85,8 +46,10 @@ static void cache_cpumap_setup(unsigned int cpu) ...@@ -85,8 +46,10 @@ static void cache_cpumap_setup(unsigned int cpu)
for_each_online_cpu(i) { for_each_online_cpu(i) {
struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i); struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
if (i == cpu || !sib_cpu_ci->info_list) if (i == cpu || !sib_cpu_ci->info_list ||
continue;/* skip if itself or no cacheinfo */ (cpu_to_node(i) != cpu_to_node(cpu)))
continue;
sib_leaf = sib_cpu_ci->info_list + index; sib_leaf = sib_cpu_ci->info_list + index;
if (cache_leaves_are_shared(this_leaf, sib_leaf)) { if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
...@@ -98,31 +61,24 @@ static void cache_cpumap_setup(unsigned int cpu) ...@@ -98,31 +61,24 @@ static void cache_cpumap_setup(unsigned int cpu)
int populate_cache_leaves(unsigned int cpu) int populate_cache_leaves(unsigned int cpu)
{ {
int level = 1, nodes_per_package = 1; int i, cache_present = current_cpu_data.cache_leaves_present;
struct cpuinfo_loongarch *c = &current_cpu_data;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf = this_cpu_ci->info_list; struct cacheinfo *this_leaf = this_cpu_ci->info_list;
struct cache_desc *cd, *cdesc = current_cpu_data.cache_leaves;
if (loongson_sysconf.nr_nodes > 1)
nodes_per_package = loongson_sysconf.cores_per_package for (i = 0; i < cache_present; i++) {
/ loongson_sysconf.cores_per_node; cd = cdesc + i;
if (c->icache.waysize) { this_leaf->type = cd->type;
populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA); this_leaf->level = cd->level;
populate_cache(icache, this_leaf, level++, CACHE_TYPE_INST); this_leaf->coherency_line_size = cd->linesz;
} else { this_leaf->number_of_sets = cd->sets;
populate_cache(dcache, this_leaf, level++, CACHE_TYPE_UNIFIED); this_leaf->ways_of_associativity = cd->ways;
this_leaf->size = cd->linesz * cd->sets * cd->ways;
this_leaf->priv = &cd->flags;
this_leaf++;
} }
if (c->vcache.waysize)
populate_cache(vcache, this_leaf, level++, CACHE_TYPE_UNIFIED);
if (c->scache.waysize)
populate_cache(scache, this_leaf, level++, CACHE_TYPE_UNIFIED);
if (c->tcache.waysize)
populate_cache(tcache, this_leaf, level++, CACHE_TYPE_UNIFIED);
cache_cpumap_setup(cpu); cache_cpumap_setup(cpu);
this_cpu_ci->cpu_map_populated = true; this_cpu_ci->cpu_map_populated = true;
......
...@@ -187,7 +187,9 @@ static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int ...@@ -187,7 +187,9 @@ static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int
uint64_t *vendor = (void *)(&cpu_full_name[VENDOR_OFFSET]); uint64_t *vendor = (void *)(&cpu_full_name[VENDOR_OFFSET]);
uint64_t *cpuname = (void *)(&cpu_full_name[CPUNAME_OFFSET]); uint64_t *cpuname = (void *)(&cpu_full_name[CPUNAME_OFFSET]);
__cpu_full_name[cpu] = cpu_full_name; if (!__cpu_full_name[cpu])
__cpu_full_name[cpu] = cpu_full_name;
*vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR); *vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR);
*cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME); *cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME);
......
// SPDX-License-Identifier: GPL-2.0
#include <linux/crash_dump.h>
#include <linux/io.h>
#include <linux/uio.h>
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
size_t csize, unsigned long offset)
{
void *vaddr;
if (!csize)
return 0;
vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB);
if (!vaddr)
return -ENOMEM;
csize = copy_to_iter(vaddr + offset, csize, iter);
memunmap(vaddr);
return csize;
}
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <asm/addrspace.h> #include <asm/addrspace.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/bug.h>
#include <asm/regdef.h> #include <asm/regdef.h>
#include <asm/loongarch.h> #include <asm/loongarch.h>
#include <asm/stackframe.h> #include <asm/stackframe.h>
...@@ -20,7 +21,11 @@ ...@@ -20,7 +21,11 @@
_head: _head:
.word MZ_MAGIC /* "MZ", MS-DOS header */ .word MZ_MAGIC /* "MZ", MS-DOS header */
.org 0x3c /* 0x04 ~ 0x3b reserved */ .org 0x8
.dword kernel_entry /* Kernel entry point */
.dword _end - _text /* Kernel image effective size */
.quad 0 /* Kernel image load offset from start of RAM */
.org 0x3c /* 0x20 ~ 0x3b reserved */
.long pe_header - _head /* Offset to the PE header */ .long pe_header - _head /* Offset to the PE header */
pe_header: pe_header:
...@@ -57,19 +62,19 @@ SYM_CODE_START(kernel_entry) # kernel entry point ...@@ -57,19 +62,19 @@ SYM_CODE_START(kernel_entry) # kernel entry point
li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0 li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0
csrwr t0, LOONGARCH_CSR_EUEN csrwr t0, LOONGARCH_CSR_EUEN
la t0, __bss_start # clear .bss la.pcrel t0, __bss_start # clear .bss
st.d zero, t0, 0 st.d zero, t0, 0
la t1, __bss_stop - LONGSIZE la.pcrel t1, __bss_stop - LONGSIZE
1: 1:
addi.d t0, t0, LONGSIZE addi.d t0, t0, LONGSIZE
st.d zero, t0, 0 st.d zero, t0, 0
bne t0, t1, 1b bne t0, t1, 1b
la t0, fw_arg0 la.pcrel t0, fw_arg0
st.d a0, t0, 0 # firmware arguments st.d a0, t0, 0 # firmware arguments
la t0, fw_arg1 la.pcrel t0, fw_arg1
st.d a1, t0, 0 st.d a1, t0, 0
la t0, fw_arg2 la.pcrel t0, fw_arg2
st.d a2, t0, 0 st.d a2, t0, 0
/* KSave3 used for percpu base, initialized as 0 */ /* KSave3 used for percpu base, initialized as 0 */
...@@ -77,7 +82,7 @@ SYM_CODE_START(kernel_entry) # kernel entry point ...@@ -77,7 +82,7 @@ SYM_CODE_START(kernel_entry) # kernel entry point
/* GPR21 used for percpu base (runtime), initialized as 0 */ /* GPR21 used for percpu base (runtime), initialized as 0 */
move u0, zero move u0, zero
la tp, init_thread_union la.pcrel tp, init_thread_union
/* Set the SP after an empty pt_regs. */ /* Set the SP after an empty pt_regs. */
PTR_LI sp, (_THREAD_SIZE - 32 - PT_SIZE) PTR_LI sp, (_THREAD_SIZE - 32 - PT_SIZE)
PTR_ADD sp, sp, tp PTR_ADD sp, sp, tp
...@@ -85,6 +90,7 @@ SYM_CODE_START(kernel_entry) # kernel entry point ...@@ -85,6 +90,7 @@ SYM_CODE_START(kernel_entry) # kernel entry point
PTR_ADDI sp, sp, -4 * SZREG # init stack pointer PTR_ADDI sp, sp, -4 * SZREG # init stack pointer
bl start_kernel bl start_kernel
ASM_BUG()
SYM_CODE_END(kernel_entry) SYM_CODE_END(kernel_entry)
...@@ -116,6 +122,8 @@ SYM_CODE_START(smpboot_entry) ...@@ -116,6 +122,8 @@ SYM_CODE_START(smpboot_entry)
ld.d tp, t0, CPU_BOOT_TINFO ld.d tp, t0, CPU_BOOT_TINFO
bl start_secondary bl start_secondary
ASM_BUG()
SYM_CODE_END(smpboot_entry) SYM_CODE_END(smpboot_entry)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* machine_kexec.c for kexec
*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/kexec.h>
#include <linux/crash_dump.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/libfdt.h>
#include <linux/mm.h>
#include <linux/of_fdt.h>
#include <linux/reboot.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <asm/bootinfo.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
/* 0x100000 ~ 0x200000 is safe */
#define KEXEC_CONTROL_CODE TO_CACHE(0x100000UL)
#define KEXEC_CMDLINE_ADDR TO_CACHE(0x108000UL)
static unsigned long reboot_code_buffer;
static cpumask_t cpus_in_crash = CPU_MASK_NONE;
#ifdef CONFIG_SMP
static void (*relocated_kexec_smp_wait)(void *);
atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
#endif
static unsigned long efi_boot;
static unsigned long cmdline_ptr;
static unsigned long systable_ptr;
static unsigned long start_addr;
static unsigned long first_ind_entry;
static void kexec_image_info(const struct kimage *kimage)
{
unsigned long i;
pr_debug("kexec kimage info:\n");
pr_debug("\ttype: %d\n", kimage->type);
pr_debug("\tstart: %lx\n", kimage->start);
pr_debug("\thead: %lx\n", kimage->head);
pr_debug("\tnr_segments: %lu\n", kimage->nr_segments);
for (i = 0; i < kimage->nr_segments; i++) {
pr_debug("\t segment[%lu]: %016lx - %016lx", i,
kimage->segment[i].mem,
kimage->segment[i].mem + kimage->segment[i].memsz);
pr_debug("\t\t0x%lx bytes, %lu pages\n",
(unsigned long)kimage->segment[i].memsz,
(unsigned long)kimage->segment[i].memsz / PAGE_SIZE);
}
}
int machine_kexec_prepare(struct kimage *kimage)
{
int i;
char *bootloader = "kexec";
void *cmdline_ptr = (void *)KEXEC_CMDLINE_ADDR;
kexec_image_info(kimage);
kimage->arch.efi_boot = fw_arg0;
kimage->arch.systable_ptr = fw_arg2;
/* Find the command line */
for (i = 0; i < kimage->nr_segments; i++) {
if (!strncmp(bootloader, (char __user *)kimage->segment[i].buf, strlen(bootloader))) {
if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE))
kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr;
break;
}
}
if (!kimage->arch.cmdline_ptr) {
pr_err("Command line not included in the provided image\n");
return -EINVAL;
}
/* kexec/kdump need a safe page to save reboot_code_buffer */
kimage->control_code_page = virt_to_page((void *)KEXEC_CONTROL_CODE);
reboot_code_buffer = (unsigned long)page_address(kimage->control_code_page);
memcpy((void *)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size);
#ifdef CONFIG_SMP
/* All secondary cpus now may jump to kexec_smp_wait cycle */
relocated_kexec_smp_wait = reboot_code_buffer + (void *)(kexec_smp_wait - relocate_new_kernel);
#endif
return 0;
}
void machine_kexec_cleanup(struct kimage *kimage)
{
}
void kexec_reboot(void)
{
do_kexec_t do_kexec = NULL;
/*
* We know we were online, and there will be no incoming IPIs at
* this point. Mark online again before rebooting so that the crash
* analysis tool will see us correctly.
*/
set_cpu_online(smp_processor_id(), true);
/* Ensure remote CPUs observe that we're online before rebooting. */
smp_mb__after_atomic();
/*
* Make sure we get correct instructions written by the
* machine_kexec_prepare() CPU.
*/
__asm__ __volatile__ ("\tibar 0\n"::);
#ifdef CONFIG_SMP
/* All secondary cpus go to kexec_smp_wait */
if (smp_processor_id() > 0) {
relocated_kexec_smp_wait(NULL);
unreachable();
}
#endif
do_kexec = (void *)reboot_code_buffer;
do_kexec(efi_boot, cmdline_ptr, systable_ptr, start_addr, first_ind_entry);
unreachable();
}
#ifdef CONFIG_SMP
static void kexec_shutdown_secondary(void *regs)
{
int cpu = smp_processor_id();
if (!cpu_online(cpu))
return;
/* We won't be sent IPIs any more. */
set_cpu_online(cpu, false);
local_irq_disable();
while (!atomic_read(&kexec_ready_to_reboot))
cpu_relax();
kexec_reboot();
}
static void crash_shutdown_secondary(void *passed_regs)
{
int cpu = smp_processor_id();
struct pt_regs *regs = passed_regs;
/*
* If we are passed registers, use those. Otherwise get the
* regs from the last interrupt, which should be correct, as
* we are in an interrupt. But if the regs are not there,
* pull them from the top of the stack. They are probably
* wrong, but we need something to keep from crashing again.
*/
if (!regs)
regs = get_irq_regs();
if (!regs)
regs = task_pt_regs(current);
if (!cpu_online(cpu))
return;
/* We won't be sent IPIs any more. */
set_cpu_online(cpu, false);
local_irq_disable();
if (!cpumask_test_cpu(cpu, &cpus_in_crash))
crash_save_cpu(regs, cpu);
cpumask_set_cpu(cpu, &cpus_in_crash);
while (!atomic_read(&kexec_ready_to_reboot))
cpu_relax();
kexec_reboot();
}
void crash_smp_send_stop(void)
{
unsigned int ncpus;
unsigned long timeout;
static int cpus_stopped;
/*
* This function can be called twice in panic path, but obviously
* we should execute this only once.
*/
if (cpus_stopped)
return;
cpus_stopped = 1;
/* Excluding the panic cpu */
ncpus = num_online_cpus() - 1;
smp_call_function(crash_shutdown_secondary, NULL, 0);
smp_wmb();
/*
* The crash CPU sends an IPI and wait for other CPUs to
* respond. Delay of at least 10 seconds.
*/
timeout = MSEC_PER_SEC * 10;
pr_emerg("Sending IPI to other cpus...\n");
while ((cpumask_weight(&cpus_in_crash) < ncpus) && timeout--) {
mdelay(1);
cpu_relax();
}
}
#endif /* defined(CONFIG_SMP) */
void machine_shutdown(void)
{
int cpu;
/* All CPUs go to reboot_code_buffer */
for_each_possible_cpu(cpu)
if (!cpu_online(cpu))
cpu_device_up(get_cpu_device(cpu));
#ifdef CONFIG_SMP
smp_call_function(kexec_shutdown_secondary, NULL, 0);
#endif
}
void machine_crash_shutdown(struct pt_regs *regs)
{
int crashing_cpu;
local_irq_disable();
crashing_cpu = smp_processor_id();
crash_save_cpu(regs, crashing_cpu);
#ifdef CONFIG_SMP
crash_smp_send_stop();
#endif
cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
pr_info("Starting crashdump kernel...\n");
}
void machine_kexec(struct kimage *image)
{
unsigned long entry, *ptr;
struct kimage_arch *internal = &image->arch;
efi_boot = internal->efi_boot;
cmdline_ptr = internal->cmdline_ptr;
systable_ptr = internal->systable_ptr;
start_addr = (unsigned long)phys_to_virt(image->start);
first_ind_entry = (image->type == KEXEC_TYPE_DEFAULT) ?
(unsigned long)phys_to_virt(image->head & PAGE_MASK) : 0;
/*
* The generic kexec code builds a page list with physical
* addresses. they are directly accessible through XKPRANGE
* hence the phys_to_virt() call.
*/
for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
ptr = (entry & IND_INDIRECTION) ?
phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
*ptr & IND_DESTINATION)
*ptr = (unsigned long) phys_to_virt(*ptr);
}
/* Mark offline before disabling local irq. */
set_cpu_online(smp_processor_id(), false);
/* We do not want to be bothered. */
local_irq_disable();
pr_notice("EFI boot flag 0x%lx\n", efi_boot);
pr_notice("Command line at 0x%lx\n", cmdline_ptr);
pr_notice("System table at 0x%lx\n", systable_ptr);
pr_notice("We will call new kernel at 0x%lx\n", start_addr);
pr_notice("Bye ...\n");
/* Make reboot code buffer available to the boot CPU. */
flush_cache_all();
#ifdef CONFIG_SMP
atomic_set(&kexec_ready_to_reboot, 1);
#endif
kexec_reboot();
}
...@@ -58,7 +58,4 @@ void __init memblock_init(void) ...@@ -58,7 +58,4 @@ void __init memblock_init(void)
/* Reserve the kernel text/data/bss */ /* Reserve the kernel text/data/bss */
memblock_reserve(__pa_symbol(&_text), memblock_reserve(__pa_symbol(&_text),
__pa_symbol(&_end) - __pa_symbol(&_text)); __pa_symbol(&_end) - __pa_symbol(&_text));
/* Reserve the initrd */
reserve_initrd_mem();
} }
...@@ -7,7 +7,33 @@ ...@@ -7,7 +7,33 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
Elf_Addr module_emit_plt_entry(struct module *mod, unsigned long val) Elf_Addr module_emit_got_entry(struct module *mod, Elf_Addr val)
{
struct mod_section *got_sec = &mod->arch.got;
int i = got_sec->num_entries;
struct got_entry *got = get_got_entry(val, got_sec);
if (got)
return (Elf_Addr)got;
/* There is no GOT entry for val yet, create a new one. */
got = (struct got_entry *)got_sec->shdr->sh_addr;
got[i] = emit_got_entry(val);
got_sec->num_entries++;
if (got_sec->num_entries > got_sec->max_entries) {
/*
* This may happen when the module contains a GOT_HI20 without
* a paired GOT_LO12. Such a module is broken, reject it.
*/
pr_err("%s: module contains bad GOT relocation\n", mod->name);
return 0;
}
return (Elf_Addr)&got[i];
}
Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Addr val)
{ {
int nr; int nr;
struct mod_section *plt_sec = &mod->arch.plt; struct mod_section *plt_sec = &mod->arch.plt;
...@@ -50,15 +76,25 @@ static bool duplicate_rela(const Elf_Rela *rela, int idx) ...@@ -50,15 +76,25 @@ static bool duplicate_rela(const Elf_Rela *rela, int idx)
return false; return false;
} }
static void count_max_entries(Elf_Rela *relas, int num, unsigned int *plts) static void count_max_entries(Elf_Rela *relas, int num,
unsigned int *plts, unsigned int *gots)
{ {
unsigned int i, type; unsigned int i, type;
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
type = ELF_R_TYPE(relas[i].r_info); type = ELF_R_TYPE(relas[i].r_info);
if (type == R_LARCH_SOP_PUSH_PLT_PCREL) { switch (type) {
case R_LARCH_SOP_PUSH_PLT_PCREL:
case R_LARCH_B26:
if (!duplicate_rela(relas, i)) if (!duplicate_rela(relas, i))
(*plts)++; (*plts)++;
break;
case R_LARCH_GOT_PC_HI20:
if (!duplicate_rela(relas, i))
(*gots)++;
break;
default:
break; /* Do nothing. */
} }
} }
} }
...@@ -66,18 +102,24 @@ static void count_max_entries(Elf_Rela *relas, int num, unsigned int *plts) ...@@ -66,18 +102,24 @@ static void count_max_entries(Elf_Rela *relas, int num, unsigned int *plts)
int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod) char *secstrings, struct module *mod)
{ {
unsigned int i, num_plts = 0; unsigned int i, num_plts = 0, num_gots = 0;
/* /*
* Find the empty .plt sections. * Find the empty .plt sections.
*/ */
for (i = 0; i < ehdr->e_shnum; i++) { for (i = 0; i < ehdr->e_shnum; i++) {
if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt")) if (!strcmp(secstrings + sechdrs[i].sh_name, ".got"))
mod->arch.got.shdr = sechdrs + i;
else if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
mod->arch.plt.shdr = sechdrs + i; mod->arch.plt.shdr = sechdrs + i;
else if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt.idx")) else if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt.idx"))
mod->arch.plt_idx.shdr = sechdrs + i; mod->arch.plt_idx.shdr = sechdrs + i;
} }
if (!mod->arch.got.shdr) {
pr_err("%s: module GOT section(s) missing\n", mod->name);
return -ENOEXEC;
}
if (!mod->arch.plt.shdr) { if (!mod->arch.plt.shdr) {
pr_err("%s: module PLT section(s) missing\n", mod->name); pr_err("%s: module PLT section(s) missing\n", mod->name);
return -ENOEXEC; return -ENOEXEC;
...@@ -100,9 +142,16 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, ...@@ -100,9 +142,16 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
if (!(dst_sec->sh_flags & SHF_EXECINSTR)) if (!(dst_sec->sh_flags & SHF_EXECINSTR))
continue; continue;
count_max_entries(relas, num_rela, &num_plts); count_max_entries(relas, num_rela, &num_plts, &num_gots);
} }
mod->arch.got.shdr->sh_type = SHT_NOBITS;
mod->arch.got.shdr->sh_flags = SHF_ALLOC;
mod->arch.got.shdr->sh_addralign = L1_CACHE_BYTES;
mod->arch.got.shdr->sh_size = (num_gots + 1) * sizeof(struct got_entry);
mod->arch.got.num_entries = 0;
mod->arch.got.max_entries = num_gots;
mod->arch.plt.shdr->sh_type = SHT_NOBITS; mod->arch.plt.shdr->sh_type = SHT_NOBITS;
mod->arch.plt.shdr->sh_flags = SHF_EXECINSTR | SHF_ALLOC; mod->arch.plt.shdr->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
mod->arch.plt.shdr->sh_addralign = L1_CACHE_BYTES; mod->arch.plt.shdr->sh_addralign = L1_CACHE_BYTES;
......
...@@ -18,16 +18,6 @@ ...@@ -18,16 +18,6 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/kernel.h> #include <linux/kernel.h>
static inline bool signed_imm_check(long val, unsigned int bit)
{
return -(1L << (bit - 1)) <= val && val < (1L << (bit - 1));
}
static inline bool unsigned_imm_check(unsigned long val, unsigned int bit)
{
return val < (1UL << bit);
}
static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top) static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top)
{ {
if (*rela_stack_top >= RELA_STACK_DEPTH) if (*rela_stack_top >= RELA_STACK_DEPTH)
...@@ -281,6 +271,96 @@ static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v, ...@@ -281,6 +271,96 @@ static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v,
} }
} }
static int apply_r_larch_b26(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
{
ptrdiff_t offset = (void *)v - (void *)location;
union loongarch_instruction *insn = (union loongarch_instruction *)location;
if (offset >= SZ_128M)
v = module_emit_plt_entry(mod, v);
if (offset < -SZ_128M)
v = module_emit_plt_entry(mod, v);
offset = (void *)v - (void *)location;
if (offset & 3) {
pr_err("module %s: jump offset = 0x%llx unaligned! dangerous R_LARCH_B26 (%u) relocation\n",
mod->name, (long long)offset, type);
return -ENOEXEC;
}
if (!signed_imm_check(offset, 28)) {
pr_err("module %s: jump offset = 0x%llx overflow! dangerous R_LARCH_B26 (%u) relocation\n",
mod->name, (long long)offset, type);
return -ENOEXEC;
}
offset >>= 2;
insn->reg0i26_format.immediate_l = offset & 0xffff;
insn->reg0i26_format.immediate_h = (offset >> 16) & 0x3ff;
return 0;
}
static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
{
union loongarch_instruction *insn = (union loongarch_instruction *)location;
/* Use s32 for a sign-extension deliberately. */
s32 offset_hi20 = (void *)((v + 0x800) & ~0xfff) -
(void *)((Elf_Addr)location & ~0xfff);
Elf_Addr anchor = (((Elf_Addr)location) & ~0xfff) + offset_hi20;
ptrdiff_t offset_rem = (void *)v - (void *)anchor;
switch (type) {
case R_LARCH_PCALA_LO12:
insn->reg2i12_format.immediate = v & 0xfff;
break;
case R_LARCH_PCALA_HI20:
v = offset_hi20 >> 12;
insn->reg1i20_format.immediate = v & 0xfffff;
break;
case R_LARCH_PCALA64_LO20:
v = offset_rem >> 32;
insn->reg1i20_format.immediate = v & 0xfffff;
break;
case R_LARCH_PCALA64_HI12:
v = offset_rem >> 52;
insn->reg2i12_format.immediate = v & 0xfff;
break;
default:
pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
return -EINVAL;
}
return 0;
}
static int apply_r_larch_got_pc(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
{
Elf_Addr got = module_emit_got_entry(mod, v);
if (!got)
return -EINVAL;
switch (type) {
case R_LARCH_GOT_PC_LO12:
type = R_LARCH_PCALA_LO12;
break;
case R_LARCH_GOT_PC_HI20:
type = R_LARCH_PCALA_HI20;
break;
default:
pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
return -EINVAL;
}
return apply_r_larch_pcala(mod, location, got, rela_stack, rela_stack_top, type);
}
/* /*
* reloc_handlers_rela() - Apply a particular relocation to a module * reloc_handlers_rela() - Apply a particular relocation to a module
* @mod: the module to apply the reloc to * @mod: the module to apply the reloc to
...@@ -296,7 +376,7 @@ typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_Addr v, ...@@ -296,7 +376,7 @@ typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_Addr v,
/* The handlers for known reloc types */ /* The handlers for known reloc types */
static reloc_rela_handler reloc_rela_handlers[] = { static reloc_rela_handler reloc_rela_handlers[] = {
[R_LARCH_NONE ... R_LARCH_SUB64] = apply_r_larch_error, [R_LARCH_NONE ... R_LARCH_RELAX] = apply_r_larch_error,
[R_LARCH_NONE] = apply_r_larch_none, [R_LARCH_NONE] = apply_r_larch_none,
[R_LARCH_32] = apply_r_larch_32, [R_LARCH_32] = apply_r_larch_32,
...@@ -310,6 +390,9 @@ static reloc_rela_handler reloc_rela_handlers[] = { ...@@ -310,6 +390,9 @@ static reloc_rela_handler reloc_rela_handlers[] = {
[R_LARCH_SOP_SUB ... R_LARCH_SOP_IF_ELSE] = apply_r_larch_sop, [R_LARCH_SOP_SUB ... R_LARCH_SOP_IF_ELSE] = apply_r_larch_sop,
[R_LARCH_SOP_POP_32_S_10_5 ... R_LARCH_SOP_POP_32_U] = apply_r_larch_sop_imm_field, [R_LARCH_SOP_POP_32_S_10_5 ... R_LARCH_SOP_POP_32_U] = apply_r_larch_sop_imm_field,
[R_LARCH_ADD32 ... R_LARCH_SUB64] = apply_r_larch_add_sub, [R_LARCH_ADD32 ... R_LARCH_SUB64] = apply_r_larch_add_sub,
[R_LARCH_B26] = apply_r_larch_b26,
[R_LARCH_PCALA_HI20...R_LARCH_PCALA64_HI12] = apply_r_larch_pcala,
[R_LARCH_GOT_PC_HI20...R_LARCH_GOT_PC_LO12] = apply_r_larch_got_pc,
}; };
int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
......
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*
* Derived from MIPS:
* Copyright (C) 2013 Cavium, Inc.
*/
#include <linux/perf_event.h>
#include <asm/ptrace.h>
#ifdef CONFIG_32BIT
u64 perf_reg_abi(struct task_struct *tsk)
{
return PERF_SAMPLE_REGS_ABI_32;
}
#else /* Must be CONFIG_64BIT */
u64 perf_reg_abi(struct task_struct *tsk)
{
if (test_tsk_thread_flag(tsk, TIF_32BIT_REGS))
return PERF_SAMPLE_REGS_ABI_32;
else
return PERF_SAMPLE_REGS_ABI_64;
}
#endif /* CONFIG_32BIT */
int perf_reg_validate(u64 mask)
{
if (!mask)
return -EINVAL;
if (mask & ~((1ull << PERF_REG_LOONGARCH_MAX) - 1))
return -EINVAL;
return 0;
}
u64 perf_reg_value(struct pt_regs *regs, int idx)
{
if (WARN_ON_ONCE((u32)idx >= PERF_REG_LOONGARCH_MAX))
return 0;
if ((u32)idx == PERF_REG_LOONGARCH_PC)
return regs->csr_era;
return regs->regs[idx];
}
void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current);
}
/* SPDX-License-Identifier: GPL-2.0 */
/*
* relocate_kernel.S for kexec
*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <linux/kexec.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/regdef.h>
#include <asm/loongarch.h>
#include <asm/stackframe.h>
#include <asm/addrspace.h>
SYM_CODE_START(relocate_new_kernel)
/*
* a0: EFI boot flag for the new kernel
* a1: Command line pointer for the new kernel
* a2: System table pointer for the new kernel
* a3: Start address to jump to after relocation
* a4: Pointer to the current indirection page entry
*/
move s0, a4
/*
* In case of a kdump/crash kernel, the indirection page is not
* populated as the kernel is directly copied to a reserved location
*/
beqz s0, done
process_entry:
PTR_L s1, s0, 0
PTR_ADDI s0, s0, SZREG
/* destination page */
andi s2, s1, IND_DESTINATION
beqz s2, 1f
li.w t0, ~0x1
and s3, s1, t0 /* store destination addr in s3 */
b process_entry
1:
/* indirection page, update s0 */
andi s2, s1, IND_INDIRECTION
beqz s2, 1f
li.w t0, ~0x2
and s0, s1, t0
b process_entry
1:
/* done page */
andi s2, s1, IND_DONE
beqz s2, 1f
b done
1:
/* source page */
andi s2, s1, IND_SOURCE
beqz s2, process_entry
li.w t0, ~0x8
and s1, s1, t0
li.w s5, (1 << _PAGE_SHIFT) / SZREG
copy_word:
/* copy page word by word */
REG_L s4, s1, 0
REG_S s4, s3, 0
PTR_ADDI s3, s3, SZREG
PTR_ADDI s1, s1, SZREG
LONG_ADDI s5, s5, -1
beqz s5, process_entry
b copy_word
b process_entry
done:
ibar 0
dbar 0
/*
* Jump to the new kernel,
* make sure the values of a0, a1, a2 and a3 are not changed.
*/
jr a3
SYM_CODE_END(relocate_new_kernel)
#ifdef CONFIG_SMP
/*
* Other CPUs should wait until code is relocated and
* then start at the entry point from LOONGARCH_IOCSR_MBUF0.
*/
SYM_CODE_START(kexec_smp_wait)
1: li.w t0, 0x100 /* wait for init loop */
2: addi.w t0, t0, -1 /* limit mailbox access */
bnez t0, 2b
li.w t1, LOONGARCH_IOCSR_MBUF0
iocsrrd.w s0, t1 /* check PC as an indicator */
beqz s0, 1b
iocsrrd.d s0, t1 /* get PC via mailbox */
li.d t0, CACHE_BASE
or s0, s0, t0 /* s0 = TO_CACHE(s0) */
jr s0 /* jump to initial PC */
SYM_CODE_END(kexec_smp_wait)
#endif
relocate_new_kernel_end:
SYM_DATA_START(relocate_new_kernel_size)
PTR relocate_new_kernel_end - relocate_new_kernel
SYM_DATA_END(relocate_new_kernel_size)
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/initrd.h> #include <linux/initrd.h>
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/kexec.h>
#include <linux/crash_dump.h>
#include <linux/root_dev.h> #include <linux/root_dev.h>
#include <linux/console.h> #include <linux/console.h>
#include <linux/pfn.h> #include <linux/pfn.h>
...@@ -185,8 +187,70 @@ static int __init early_parse_mem(char *p) ...@@ -185,8 +187,70 @@ static int __init early_parse_mem(char *p)
} }
early_param("mem", early_parse_mem); early_param("mem", early_parse_mem);
static void __init arch_reserve_vmcore(void)
{
#ifdef CONFIG_PROC_VMCORE
u64 i;
phys_addr_t start, end;
if (!is_kdump_kernel())
return;
if (!elfcorehdr_size) {
for_each_mem_range(i, &start, &end) {
if (elfcorehdr_addr >= start && elfcorehdr_addr < end) {
/*
* Reserve from the elf core header to the end of
* the memory segment, that should all be kdump
* reserved memory.
*/
elfcorehdr_size = end - elfcorehdr_addr;
break;
}
}
}
if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
pr_warn("elfcorehdr is overlapped\n");
return;
}
memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n",
elfcorehdr_size >> 10, elfcorehdr_addr);
#endif
}
static void __init arch_parse_crashkernel(void)
{
#ifdef CONFIG_KEXEC
int ret;
unsigned long long start;
unsigned long long total_mem;
unsigned long long crash_base, crash_size;
total_mem = memblock_phys_mem_size();
ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base);
if (ret < 0 || crash_size <= 0)
return;
start = memblock_phys_alloc_range(crash_size, 1, crash_base, crash_base + crash_size);
if (start != crash_base) {
pr_warn("Invalid memory region reserved for crash kernel\n");
return;
}
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
#endif
}
void __init platform_init(void) void __init platform_init(void)
{ {
arch_reserve_vmcore();
arch_parse_crashkernel();
#ifdef CONFIG_ACPI_TABLE_UPGRADE #ifdef CONFIG_ACPI_TABLE_UPGRADE
acpi_table_upgrade(); acpi_table_upgrade();
#endif #endif
...@@ -289,6 +353,15 @@ static void __init resource_init(void) ...@@ -289,6 +353,15 @@ static void __init resource_init(void)
request_resource(res, &data_resource); request_resource(res, &data_resource);
request_resource(res, &bss_resource); request_resource(res, &bss_resource);
} }
#ifdef CONFIG_KEXEC
if (crashk_res.start < crashk_res.end) {
insert_resource(&iomem_resource, &crashk_res);
pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
(unsigned long)((crashk_res.end - crashk_res.start + 1) >> 20),
(unsigned long)(crashk_res.start >> 20));
}
#endif
} }
static int __init reserve_memblock_reserved_regions(void) static int __init reserve_memblock_reserved_regions(void)
...@@ -348,10 +421,11 @@ void __init setup_arch(char **cmdline_p) ...@@ -348,10 +421,11 @@ void __init setup_arch(char **cmdline_p)
init_environ(); init_environ();
efi_init(); efi_init();
memblock_init(); memblock_init();
pagetable_init();
parse_early_param(); parse_early_param();
reserve_initrd_mem();
platform_init(); platform_init();
pagetable_init();
arch_mem_init(cmdline_p); arch_mem_init(cmdline_p);
resource_init(); resource_init();
......
...@@ -240,11 +240,6 @@ void loongson3_smp_finish(void) ...@@ -240,11 +240,6 @@ void loongson3_smp_finish(void)
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static bool io_master(int cpu)
{
return test_bit(cpu, &loongson_sysconf.cores_io_master);
}
int loongson3_cpu_disable(void) int loongson3_cpu_disable(void)
{ {
unsigned long flags; unsigned long flags;
......
// SPDX-License-Identifier: GPL-2.0
/*
* LoongArch specific sysrq operations.
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/sysrq.h>
#include <linux/workqueue.h>
#include <asm/cpu-features.h>
#include <asm/tlb.h>
/*
* Dump TLB entries on all CPUs.
*/
static DEFINE_SPINLOCK(show_lock);
static void sysrq_tlbdump_single(void *dummy)
{
unsigned long flags;
spin_lock_irqsave(&show_lock, flags);
pr_info("CPU%d:\n", smp_processor_id());
dump_tlb_regs();
pr_info("\n");
dump_tlb_all();
pr_info("\n");
spin_unlock_irqrestore(&show_lock, flags);
}
#ifdef CONFIG_SMP
static void sysrq_tlbdump_othercpus(struct work_struct *dummy)
{
smp_call_function(sysrq_tlbdump_single, NULL, 0);
}
static DECLARE_WORK(sysrq_tlbdump, sysrq_tlbdump_othercpus);
#endif
static void sysrq_handle_tlbdump(int key)
{
sysrq_tlbdump_single(NULL);
#ifdef CONFIG_SMP
schedule_work(&sysrq_tlbdump);
#endif
}
static struct sysrq_key_op sysrq_tlbdump_op = {
.handler = sysrq_handle_tlbdump,
.help_msg = "show-tlbs(x)",
.action_msg = "Show TLB entries",
.enable_mask = SYSRQ_ENABLE_DUMP,
};
static int __init loongarch_sysrq_init(void)
{
return register_sysrq_key('x', &sysrq_tlbdump_op);
}
arch_initcall(loongarch_sysrq_init);
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/node.h> #include <linux/node.h>
#include <linux/nodemask.h> #include <linux/nodemask.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <asm/bootinfo.h>
static DEFINE_PER_CPU(struct cpu, cpu_devices); static DEFINE_PER_CPU(struct cpu, cpu_devices);
...@@ -40,7 +41,7 @@ static int __init topology_init(void) ...@@ -40,7 +41,7 @@ static int __init topology_init(void)
for_each_present_cpu(i) { for_each_present_cpu(i) {
struct cpu *c = &per_cpu(cpu_devices, i); struct cpu *c = &per_cpu(cpu_devices, i);
c->hotpluggable = !!i; c->hotpluggable = !io_master(i);
ret = register_cpu(c, i); ret = register_cpu(c, i);
if (ret < 0) if (ret < 0)
pr_warn("topology_init: register_cpu %d failed (%d)\n", i, ret); pr_warn("topology_init: register_cpu %d failed (%d)\n", i, ret);
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/entry-common.h> #include <linux/entry-common.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kexec.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/extable.h> #include <linux/extable.h>
#include <linux/mm.h> #include <linux/mm.h>
...@@ -246,6 +247,9 @@ void __noreturn die(const char *str, struct pt_regs *regs) ...@@ -246,6 +247,9 @@ void __noreturn die(const char *str, struct pt_regs *regs)
oops_exit(); oops_exit();
if (regs && kexec_should_crash(current))
crash_kexec(regs);
if (in_interrupt()) if (in_interrupt())
panic("Fatal exception in interrupt"); panic("Fatal exception in interrupt");
...@@ -374,6 +378,29 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs) ...@@ -374,6 +378,29 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs)
irqentry_exit(regs, state); irqentry_exit(regs, state);
} }
#ifdef CONFIG_GENERIC_BUG
int is_valid_bugaddr(unsigned long addr)
{
return 1;
}
#endif /* CONFIG_GENERIC_BUG */
static void bug_handler(struct pt_regs *regs)
{
switch (report_bug(regs->csr_era, regs)) {
case BUG_TRAP_TYPE_BUG:
case BUG_TRAP_TYPE_NONE:
die_if_kernel("Oops - BUG", regs);
force_sig(SIGTRAP);
break;
case BUG_TRAP_TYPE_WARN:
/* Skip the BUG instruction and continue */
regs->csr_era += LOONGARCH_INSN_SIZE;
break;
}
}
asmlinkage void noinstr do_bp(struct pt_regs *regs) asmlinkage void noinstr do_bp(struct pt_regs *regs)
{ {
bool user = user_mode(regs); bool user = user_mode(regs);
...@@ -427,8 +454,7 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs) ...@@ -427,8 +454,7 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
switch (bcode) { switch (bcode) {
case BRK_BUG: case BRK_BUG:
die_if_kernel("Kernel bug detected", regs); bug_handler(regs);
force_sig(SIGTRAP);
break; break;
case BRK_DIVZERO: case BRK_DIVZERO:
die_if_kernel("Break instruction in kernel code", regs); die_if_kernel("Break instruction in kernel code", regs);
...@@ -620,9 +646,6 @@ asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp) ...@@ -620,9 +646,6 @@ asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp)
irqentry_exit(regs, state); irqentry_exit(regs, state);
} }
extern void tlb_init(int cpu);
extern void cache_error_setup(void);
unsigned long eentry; unsigned long eentry;
unsigned long tlbrentry; unsigned long tlbrentry;
......
...@@ -55,6 +55,10 @@ SECTIONS ...@@ -55,6 +55,10 @@ SECTIONS
EXCEPTION_TABLE(16) EXCEPTION_TABLE(16)
.got : ALIGN(16) { *(.got) }
.plt : ALIGN(16) { *(.plt) }
.got.plt : ALIGN(16) { *(.got.plt) }
. = ALIGN(PECOFF_SEGMENT_ALIGN); . = ALIGN(PECOFF_SEGMENT_ALIGN);
__init_begin = .; __init_begin = .;
__inittext_begin = .; __inittext_begin = .;
......
...@@ -6,8 +6,8 @@ ...@@ -6,8 +6,8 @@
* Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2007 MIPS Technologies, Inc. * Copyright (C) 2007 MIPS Technologies, Inc.
*/ */
#include <linux/cacheinfo.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/fcntl.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -16,14 +16,21 @@ ...@@ -16,14 +16,21 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <asm/bootinfo.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/cpu-features.h> #include <asm/cpu-features.h>
#include <asm/dma.h>
#include <asm/loongarch.h> #include <asm/loongarch.h>
#include <asm/numa.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/setup.h> #include <asm/setup.h>
void cache_error_setup(void)
{
extern char __weak except_vec_cex;
set_merr_handler(0x0, &except_vec_cex, 0x80);
}
/* /*
* LoongArch maintains ICache/DCache coherency by hardware, * LoongArch maintains ICache/DCache coherency by hardware,
* we just need "ibar" to avoid instruction hazard here. * we just need "ibar" to avoid instruction hazard here.
...@@ -34,109 +41,121 @@ void local_flush_icache_range(unsigned long start, unsigned long end) ...@@ -34,109 +41,121 @@ void local_flush_icache_range(unsigned long start, unsigned long end)
} }
EXPORT_SYMBOL(local_flush_icache_range); EXPORT_SYMBOL(local_flush_icache_range);
void cache_error_setup(void) static void flush_cache_leaf(unsigned int leaf)
{
extern char __weak except_vec_cex;
set_merr_handler(0x0, &except_vec_cex, 0x80);
}
static unsigned long icache_size __read_mostly;
static unsigned long dcache_size __read_mostly;
static unsigned long vcache_size __read_mostly;
static unsigned long scache_size __read_mostly;
static char *way_string[] = { NULL, "direct mapped", "2-way",
"3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
"9-way", "10-way", "11-way", "12-way",
"13-way", "14-way", "15-way", "16-way",
};
static void probe_pcache(void)
{ {
struct cpuinfo_loongarch *c = &current_cpu_data; int i, j, nr_nodes;
unsigned int lsize, sets, ways; uint64_t addr = CSR_DMW0_BASE;
unsigned int config; struct cache_desc *cdesc = current_cpu_data.cache_leaves + leaf;
config = read_cpucfg(LOONGARCH_CPUCFG17); nr_nodes = cache_private(cdesc) ? 1 : loongson_sysconf.nr_nodes;
lsize = 1 << ((config & CPUCFG17_L1I_SIZE_M) >> CPUCFG17_L1I_SIZE);
sets = 1 << ((config & CPUCFG17_L1I_SETS_M) >> CPUCFG17_L1I_SETS); do {
ways = ((config & CPUCFG17_L1I_WAYS_M) >> CPUCFG17_L1I_WAYS) + 1; for (i = 0; i < cdesc->sets; i++) {
for (j = 0; j < cdesc->ways; j++) {
c->icache.linesz = lsize; flush_cache_line(leaf, addr);
c->icache.sets = sets; addr++;
c->icache.ways = ways; }
icache_size = sets * ways * lsize;
c->icache.waysize = icache_size / c->icache.ways; addr -= cdesc->ways;
addr += cdesc->linesz;
config = read_cpucfg(LOONGARCH_CPUCFG18); }
lsize = 1 << ((config & CPUCFG18_L1D_SIZE_M) >> CPUCFG18_L1D_SIZE); addr += (1ULL << NODE_ADDRSPACE_SHIFT);
sets = 1 << ((config & CPUCFG18_L1D_SETS_M) >> CPUCFG18_L1D_SETS); } while (--nr_nodes > 0);
ways = ((config & CPUCFG18_L1D_WAYS_M) >> CPUCFG18_L1D_WAYS) + 1;
c->dcache.linesz = lsize;
c->dcache.sets = sets;
c->dcache.ways = ways;
dcache_size = sets * ways * lsize;
c->dcache.waysize = dcache_size / c->dcache.ways;
c->options |= LOONGARCH_CPU_PREFETCH;
pr_info("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
icache_size >> 10, way_string[c->icache.ways], "VIPT", c->icache.linesz);
pr_info("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
dcache_size >> 10, way_string[c->dcache.ways], "VIPT", "no aliases", c->dcache.linesz);
} }
static void probe_vcache(void) asmlinkage __visible void __flush_cache_all(void)
{ {
struct cpuinfo_loongarch *c = &current_cpu_data; int leaf;
unsigned int lsize, sets, ways; struct cache_desc *cdesc = current_cpu_data.cache_leaves;
unsigned int config; unsigned int cache_present = current_cpu_data.cache_leaves_present;
config = read_cpucfg(LOONGARCH_CPUCFG19); leaf = cache_present - 1;
lsize = 1 << ((config & CPUCFG19_L2_SIZE_M) >> CPUCFG19_L2_SIZE); if (cache_inclusive(cdesc + leaf)) {
sets = 1 << ((config & CPUCFG19_L2_SETS_M) >> CPUCFG19_L2_SETS); flush_cache_leaf(leaf);
ways = ((config & CPUCFG19_L2_WAYS_M) >> CPUCFG19_L2_WAYS) + 1; return;
}
c->vcache.linesz = lsize;
c->vcache.sets = sets; for (leaf = 0; leaf < cache_present; leaf++)
c->vcache.ways = ways; flush_cache_leaf(leaf);
vcache_size = lsize * sets * ways;
c->vcache.waysize = vcache_size / c->vcache.ways;
pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
} }
static void probe_scache(void) #define L1IUPRE (1 << 0)
{ #define L1IUUNIFY (1 << 1)
struct cpuinfo_loongarch *c = &current_cpu_data; #define L1DPRE (1 << 2)
unsigned int lsize, sets, ways;
unsigned int config; #define LXIUPRE (1 << 0)
#define LXIUUNIFY (1 << 1)
config = read_cpucfg(LOONGARCH_CPUCFG20); #define LXIUPRIV (1 << 2)
lsize = 1 << ((config & CPUCFG20_L3_SIZE_M) >> CPUCFG20_L3_SIZE); #define LXIUINCL (1 << 3)
sets = 1 << ((config & CPUCFG20_L3_SETS_M) >> CPUCFG20_L3_SETS); #define LXDPRE (1 << 4)
ways = ((config & CPUCFG20_L3_WAYS_M) >> CPUCFG20_L3_WAYS) + 1; #define LXDPRIV (1 << 5)
#define LXDINCL (1 << 6)
c->scache.linesz = lsize;
c->scache.sets = sets; #define populate_cache_properties(cfg0, cdesc, level, leaf) \
c->scache.ways = ways; do { \
/* 4 cores. scaches are shared */ unsigned int cfg1; \
scache_size = lsize * sets * ways; \
c->scache.waysize = scache_size / c->scache.ways; cfg1 = read_cpucfg(LOONGARCH_CPUCFG17 + leaf); \
if (level == 1) { \
pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", cdesc->flags |= CACHE_PRIVATE; \
scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); } else { \
} if (cfg0 & LXIUPRIV) \
cdesc->flags |= CACHE_PRIVATE; \
if (cfg0 & LXIUINCL) \
cdesc->flags |= CACHE_INCLUSIVE; \
} \
cdesc->level = level; \
cdesc->flags |= CACHE_PRESENT; \
cdesc->ways = ((cfg1 & CPUCFG_CACHE_WAYS_M) >> CPUCFG_CACHE_WAYS) + 1; \
cdesc->sets = 1 << ((cfg1 & CPUCFG_CACHE_SETS_M) >> CPUCFG_CACHE_SETS); \
cdesc->linesz = 1 << ((cfg1 & CPUCFG_CACHE_LSIZE_M) >> CPUCFG_CACHE_LSIZE); \
cdesc++; leaf++; \
} while (0)
void cpu_cache_init(void) void cpu_cache_init(void)
{ {
probe_pcache(); unsigned int leaf = 0, level = 1;
probe_vcache(); unsigned int config = read_cpucfg(LOONGARCH_CPUCFG16);
probe_scache(); struct cache_desc *cdesc = current_cpu_data.cache_leaves;
if (config & L1IUPRE) {
if (config & L1IUUNIFY)
cdesc->type = CACHE_TYPE_UNIFIED;
else
cdesc->type = CACHE_TYPE_INST;
populate_cache_properties(config, cdesc, level, leaf);
}
if (config & L1DPRE) {
cdesc->type = CACHE_TYPE_DATA;
populate_cache_properties(config, cdesc, level, leaf);
}
config = config >> 3;
for (level = 2; level <= CACHE_LEVEL_MAX; level++) {
if (!config)
break;
if (config & LXIUPRE) {
if (config & LXIUUNIFY)
cdesc->type = CACHE_TYPE_UNIFIED;
else
cdesc->type = CACHE_TYPE_INST;
populate_cache_properties(config, cdesc, level, leaf);
}
if (config & LXDPRE) {
cdesc->type = CACHE_TYPE_DATA;
populate_cache_properties(config, cdesc, level, leaf);
}
config = config >> 7;
}
BUG_ON(leaf > CACHE_LEAVES_MAX);
current_cpu_data.cache_leaves_present = leaf;
current_cpu_data.options |= LOONGARCH_CPU_PREFETCH;
shm_align_mask = PAGE_SIZE - 1; shm_align_mask = PAGE_SIZE - 1;
} }
......
...@@ -152,6 +152,70 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); ...@@ -152,6 +152,70 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif #endif
#endif #endif
static pte_t *fixmap_pte(unsigned long addr)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pgd = pgd_offset_k(addr);
p4d = p4d_offset(pgd, addr);
if (pgd_none(*pgd)) {
pud_t *new __maybe_unused;
new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
pgd_populate(&init_mm, pgd, new);
#ifndef __PAGETABLE_PUD_FOLDED
pud_init((unsigned long)new, (unsigned long)invalid_pmd_table);
#endif
}
pud = pud_offset(p4d, addr);
if (pud_none(*pud)) {
pmd_t *new __maybe_unused;
new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
pud_populate(&init_mm, pud, new);
#ifndef __PAGETABLE_PMD_FOLDED
pmd_init((unsigned long)new, (unsigned long)invalid_pte_table);
#endif
}
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) {
pte_t *new __maybe_unused;
new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
pmd_populate_kernel(&init_mm, pmd, new);
}
return pte_offset_kernel(pmd, addr);
}
void __init __set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags)
{
unsigned long addr = __fix_to_virt(idx);
pte_t *ptep;
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
ptep = fixmap_pte(addr);
if (!pte_none(*ptep)) {
pte_ERROR(*ptep);
return;
}
if (pgprot_val(flags))
set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
else {
pte_clear(&init_mm, addr, ptep);
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
}
}
/* /*
* Align swapper_pg_dir in to 64K, allows its address to be loaded * Align swapper_pg_dir in to 64K, allows its address to be loaded
* with a single LUI instruction in the TLB handlers. If we used * with a single LUI instruction in the TLB handlers. If we used
......
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/ */
#include <linux/export.h> #include <linux/export.h>
#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/mman.h> #include <linux/mman.h>
...@@ -116,3 +118,30 @@ int __virt_addr_valid(volatile void *kaddr) ...@@ -116,3 +118,30 @@ int __virt_addr_valid(volatile void *kaddr)
return pfn_valid(PFN_DOWN(PHYSADDR(kaddr))); return pfn_valid(PFN_DOWN(PHYSADDR(kaddr)));
} }
EXPORT_SYMBOL_GPL(__virt_addr_valid); EXPORT_SYMBOL_GPL(__virt_addr_valid);
/*
* You really shouldn't be using read() or write() on /dev/mem. This might go
* away in the future.
*/
int valid_phys_addr_range(phys_addr_t addr, size_t size)
{
/*
* Check whether addr is covered by a memory region without the
* MEMBLOCK_NOMAP attribute, and whether that region covers the
* entire range. In theory, this could lead to false negatives
* if the range is covered by distinct but adjacent memory regions
* that only differ in other attributes. However, few of such
* attributes have been defined, and it is debatable whether it
* follows that /dev/mem read() calls should be able traverse
* such boundaries.
*/
return memblock_is_region_memory(addr, size) && memblock_is_map_memory(addr);
}
/*
* Do not allow /dev/mem mappings beyond the supported physical range.
*/
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
{
return !(((pfn << PAGE_SHIFT) + size) & ~(GENMASK_ULL(cpu_pabits, 0)));
}
...@@ -258,7 +258,7 @@ extern long exception_handlers[VECSIZE * 128 / sizeof(long)]; ...@@ -258,7 +258,7 @@ extern long exception_handlers[VECSIZE * 128 / sizeof(long)];
void setup_tlb_handler(int cpu) void setup_tlb_handler(int cpu)
{ {
setup_ptwalker(); setup_ptwalker();
output_pgtable_bits_defines(); local_flush_tlb_all();
/* The tlb handlers are generated only once */ /* The tlb handlers are generated only once */
if (cpu == 0) { if (cpu == 0) {
...@@ -301,6 +301,7 @@ void tlb_init(int cpu) ...@@ -301,6 +301,7 @@ void tlb_init(int cpu)
write_csr_pagesize(PS_DEFAULT_SIZE); write_csr_pagesize(PS_DEFAULT_SIZE);
write_csr_stlbpgsize(PS_DEFAULT_SIZE); write_csr_stlbpgsize(PS_DEFAULT_SIZE);
write_csr_tlbrefill_pagesize(PS_DEFAULT_SIZE); write_csr_tlbrefill_pagesize(PS_DEFAULT_SIZE);
setup_tlb_handler(cpu); setup_tlb_handler(cpu);
local_flush_tlb_all(); output_pgtable_bits_defines();
} }
This diff is collapsed.
# SPDX-License-Identifier: GPL-2.0-only
#
# Makefile for arch/loongarch/net
#
# Copyright (C) 2022 Loongson Technology Corporation Limited
#
obj-$(CONFIG_BPF_JIT) += bpf_jit.o
This diff is collapsed.
This diff is collapsed.
...@@ -82,6 +82,69 @@ static int acpi_prepare_root_resources(struct acpi_pci_root_info *ci) ...@@ -82,6 +82,69 @@ static int acpi_prepare_root_resources(struct acpi_pci_root_info *ci)
return 0; return 0;
} }
/*
* Create a PCI config space window
* - reserve mem region
* - alloc struct pci_config_window with space for all mappings
* - ioremap the config space
*/
static struct pci_config_window *arch_pci_ecam_create(struct device *dev,
struct resource *cfgres, struct resource *busr, const struct pci_ecam_ops *ops)
{
int bsz, bus_range, err;
struct resource *conflict;
struct pci_config_window *cfg;
if (busr->start > busr->end)
return ERR_PTR(-EINVAL);
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg)
return ERR_PTR(-ENOMEM);
cfg->parent = dev;
cfg->ops = ops;
cfg->busr.start = busr->start;
cfg->busr.end = busr->end;
cfg->busr.flags = IORESOURCE_BUS;
bus_range = resource_size(cfgres) >> ops->bus_shift;
bsz = 1 << ops->bus_shift;
cfg->res.start = cfgres->start;
cfg->res.end = cfgres->end;
cfg->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
cfg->res.name = "PCI ECAM";
conflict = request_resource_conflict(&iomem_resource, &cfg->res);
if (conflict) {
err = -EBUSY;
dev_err(dev, "can't claim ECAM area %pR: address conflict with %s %pR\n",
&cfg->res, conflict->name, conflict);
goto err_exit;
}
cfg->win = pci_remap_cfgspace(cfgres->start, bus_range * bsz);
if (!cfg->win)
goto err_exit_iomap;
if (ops->init) {
err = ops->init(cfg);
if (err)
goto err_exit;
}
dev_info(dev, "ECAM at %pR for %pR\n", &cfg->res, &cfg->busr);
return cfg;
err_exit_iomap:
err = -ENOMEM;
dev_err(dev, "ECAM ioremap failed\n");
err_exit:
pci_ecam_free(cfg);
return ERR_PTR(err);
}
/* /*
* Lookup the bus range for the domain in MCFG, and set up config space * Lookup the bus range for the domain in MCFG, and set up config space
* mapping. * mapping.
...@@ -106,11 +169,16 @@ pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root) ...@@ -106,11 +169,16 @@ pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root)
bus_shift = ecam_ops->bus_shift ? : 20; bus_shift = ecam_ops->bus_shift ? : 20;
cfgres.start = root->mcfg_addr + (bus_res->start << bus_shift); if (bus_shift == 20)
cfgres.end = cfgres.start + (resource_size(bus_res) << bus_shift) - 1; cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops);
cfgres.flags = IORESOURCE_MEM; else {
cfgres.start = root->mcfg_addr + (bus_res->start << bus_shift);
cfgres.end = cfgres.start + (resource_size(bus_res) << bus_shift) - 1;
cfgres.end |= BIT(28) + (((PCI_CFG_SPACE_EXP_SIZE - 1) & 0xf00) << 16);
cfgres.flags = IORESOURCE_MEM;
cfg = arch_pci_ecam_create(dev, &cfgres, bus_res, ecam_ops);
}
cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops);
if (IS_ERR(cfg)) { if (IS_ERR(cfg)) {
dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res, PTR_ERR(cfg)); dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res, PTR_ERR(cfg));
return NULL; return NULL;
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/vgaarb.h> #include <linux/vgaarb.h>
#include <asm/cacheflush.h>
#include <asm/loongson.h> #include <asm/loongson.h>
#define PCI_DEVICE_ID_LOONGSON_HOST 0x7a00 #define PCI_DEVICE_ID_LOONGSON_HOST 0x7a00
...@@ -45,12 +46,10 @@ static int __init pcibios_init(void) ...@@ -45,12 +46,10 @@ static int __init pcibios_init(void)
unsigned int lsize; unsigned int lsize;
/* /*
* Set PCI cacheline size to that of the highest level in the * Set PCI cacheline size to that of the last level in the
* cache hierarchy. * cache hierarchy.
*/ */
lsize = cpu_dcache_line_size(); lsize = cpu_last_level_cache_line_size();
lsize = cpu_vcache_line_size() ? : lsize;
lsize = cpu_scache_line_size() ? : lsize;
BUG_ON(!lsize); BUG_ON(!lsize);
......
...@@ -3,6 +3,8 @@ if MIPS ...@@ -3,6 +3,8 @@ if MIPS
source "drivers/platform/mips/Kconfig" source "drivers/platform/mips/Kconfig"
endif endif
source "drivers/platform/loongarch/Kconfig"
source "drivers/platform/goldfish/Kconfig" source "drivers/platform/goldfish/Kconfig"
source "drivers/platform/chrome/Kconfig" source "drivers/platform/chrome/Kconfig"
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
# #
obj-$(CONFIG_X86) += x86/ obj-$(CONFIG_X86) += x86/
obj-$(CONFIG_LOONGARCH) += loongarch/
obj-$(CONFIG_MELLANOX_PLATFORM) += mellanox/ obj-$(CONFIG_MELLANOX_PLATFORM) += mellanox/
obj-$(CONFIG_MIPS) += mips/ obj-$(CONFIG_MIPS) += mips/
obj-$(CONFIG_OLPC_EC) += olpc/ obj-$(CONFIG_OLPC_EC) += olpc/
......
#
# LoongArch Platform Specific Drivers
#
menuconfig LOONGARCH_PLATFORM_DEVICES
bool "LoongArch Platform Specific Device Drivers"
default y
depends on LOONGARCH
help
Say Y here to get to see options for device drivers of various
LoongArch platforms, including vendor-specific laptop/desktop
extension and hardware monitor drivers. This option itself does
not add any kernel code.
If you say N, all options in this submenu will be skipped and disabled.
if LOONGARCH_PLATFORM_DEVICES
config LOONGSON_LAPTOP
tristate "Generic Loongson-3 Laptop Driver"
depends on ACPI
depends on BACKLIGHT_CLASS_DEVICE
depends on INPUT
depends on MACH_LOONGSON64
select ACPI_VIDEO
select INPUT_SPARSEKMAP
default y
help
ACPI-based Loongson-3 family laptops generic driver.
endif # LOONGARCH_PLATFORM_DEVICES
obj-$(CONFIG_LOONGSON_LAPTOP) += loongson-laptop.o
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment