Commit 6f73b362 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

Pull powerpc updates from Benjamin Herrenschmidt:
 "Here are the powerpc goodies for 3.5.  Main highlights are:

   - Support for the NX crypto engine in Power7+
   - A bunch of Anton goodness, including some micro optimization of our
     syscall entry on Power7
   - I converted a pile of our thermal control drivers to the new i2c
     APIs (essentially turning the old therm_pm72 into a proper set of
     windfarm drivers).  That's one more step toward removing the
     deprecated i2c APIs, there's still a few drivers to fix, but we are
     getting close
   - kexec/kdump support for 47x embedded cores

  The big missing thing here is no updates from Freescale.  Not sure
  what's up here, but with Kumar not working for them anymore things are
  a bit in a state of flux in that area."

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (71 commits)
  powerpc: Fix irq distribution
  Revert "powerpc/hw-breakpoint: Use generic hw-breakpoint interfaces for new PPC ptrace flags"
  powerpc: Fixing a cputhread code documentation
  powerpc/crypto: Enable the PFO-based encryption device
  powerpc/crypto: Build files for the nx device driver
  powerpc/crypto: debugfs routines and docs for the nx device driver
  powerpc/crypto: SHA512 hash routines for nx encryption
  powerpc/crypto: SHA256 hash routines for nx encryption
  powerpc/crypto: AES-XCBC mode routines for nx encryption
  powerpc/crypto: AES-GCM mode routines for nx encryption
  powerpc/crypto: AES-ECB mode routines for nx encryption
  powerpc/crypto: AES-CTR mode routines for nx encryption
  powerpc/crypto: AES-CCM mode routines for nx encryption
  powerpc/crypto: AES-CBC mode routines for nx encryption
  powerpc/crypto: nx driver code supporting nx encryption
  powerpc/pseries: Enable the PFO-based RNG accelerator
  powerpc/pseries/hwrng: PFO-based hwrng driver
  powerpc/pseries: Add PFO support to the VIO bus
  powerpc/pseries: Add pseries update notifier for OFDT prop changes
  powerpc/pseries: Add new hvcall constants to support PFO
  ...
parents 3a8580f8 2074b1d9
What: /sys/kernel/debug/nx-crypto/*
Date: March 2012
KernelVersion: 3.4
Contact: Kent Yoder <key@linux.vnet.ibm.com>
Description:
These debugfs interfaces are built by the nx-crypto driver, built in
arch/powerpc/crypto/nx.
Error Detection
===============
errors:
- A u32 providing a total count of errors since the driver was loaded. The
only errors counted here are those returned from the hcall, H_COP_OP.
last_error:
- The most recent non-zero return code from the H_COP_OP hcall. -EBUSY is not
recorded here (the hcall will retry until -EBUSY goes away).
last_error_pid:
- The process ID of the process who received the most recent error from the
hcall.
Device Use
==========
aes_bytes:
- The total number of bytes encrypted using AES in any of the driver's
supported modes.
aes_ops:
- The total number of AES operations submitted to the hardware.
sha256_bytes:
- The total number of bytes hashed by the hardware using SHA-256.
sha256_ops:
- The total number of SHA-256 operations submitted to the hardware.
sha512_bytes:
- The total number of bytes hashed by the hardware using SHA-512.
sha512_ops:
- The total number of SHA-512 operations submitted to the hardware.
......@@ -350,7 +350,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
config KEXEC
bool "kexec system call (EXPERIMENTAL)"
depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP && !PPC_47x)) && EXPERIMENTAL
depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) && EXPERIMENTAL
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
......@@ -367,7 +367,7 @@ config KEXEC
config CRASH_DUMP
bool "Build a kdump crash kernel"
depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP && !PPC_47x)
depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP)
select RELOCATABLE if PPC64 || 44x
select DYNAMIC_MEMSTART if FSL_BOOKE
help
......
......@@ -69,6 +69,16 @@ LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-y)
CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=no -mcall-aixdesc
CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple
CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,-mtune=power4)
CFLAGS-$(CONFIG_CELL_CPU) += $(call cc-option,-mcpu=cell)
CFLAGS-$(CONFIG_POWER4_CPU) += $(call cc-option,-mcpu=power4)
CFLAGS-$(CONFIG_POWER5_CPU) += $(call cc-option,-mcpu=power5)
CFLAGS-$(CONFIG_POWER6_CPU) += $(call cc-option,-mcpu=power6)
CFLAGS-$(CONFIG_POWER7_CPU) += $(call cc-option,-mcpu=power7)
CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell)
KBUILD_CPPFLAGS += -Iarch/$(ARCH)
KBUILD_AFLAGS += -Iarch/$(ARCH)
KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
......@@ -76,32 +86,11 @@ CPP = $(CC) -E $(KBUILD_CFLAGS)
CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
ifeq ($(CONFIG_PPC64),y)
GCC_BROKEN_VEC := $(call cc-ifversion, -lt, 0400, y)
ifeq ($(CONFIG_POWER4_ONLY),y)
ifeq ($(CONFIG_ALTIVEC),y)
ifeq ($(GCC_BROKEN_VEC),y)
KBUILD_CFLAGS += $(call cc-option,-mcpu=970)
else
KBUILD_CFLAGS += $(call cc-option,-mcpu=power4)
endif
else
KBUILD_CFLAGS += $(call cc-option,-mcpu=power4)
endif
else
KBUILD_CFLAGS += $(call cc-option,-mtune=power4)
endif
endif
KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
ifeq ($(CONFIG_TUNE_CELL),y)
KBUILD_CFLAGS += $(call cc-option,-mtune=cell)
endif
# No AltiVec instruction when building kernel
# No AltiVec or VSX instructions when building kernel
KBUILD_CFLAGS += $(call cc-option,-mno-altivec)
KBUILD_CFLAGS += $(call cc-option,-mno-vsx)
# No SPE instruction when building kernel
# (We use all available options to help semi-broken compilers)
......@@ -160,6 +149,7 @@ core-$(CONFIG_KVM) += arch/powerpc/kvm/
core-$(CONFIG_PERF_EVENTS) += arch/powerpc/perf/
drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
drivers-$(CONFIG_CRYPTO_DEV_NX) += drivers/crypto/nx/
# Default to zImage, override when needed
all: zImage
......@@ -234,10 +224,11 @@ archprepare: checkbin
# Use the file '.tmp_gas_check' for binutils tests, as gas won't output
# to stdout and these checks are run even on install targets.
TOUT := .tmp_gas_check
# Ensure this is binutils 2.12.1 (or 2.12.90.0.7) or later for altivec
# instructions.
# gcc-3.4 and binutils-2.14 are a fatal combination.
# Check gcc and binutils versions:
# - gcc-3.4 and binutils-2.14 are a fatal combination
# - Require gcc 4.0 or above on 64-bit
# - gcc-4.2.0 has issues compiling modules on 64-bit
checkbin:
@if test "$(call cc-version)" = "0304" ; then \
if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \
......@@ -247,6 +238,12 @@ checkbin:
false; \
fi ; \
fi
@if test "$(call cc-version)" -lt "0400" \
&& test "x${CONFIG_PPC64}" = "xy" ; then \
echo -n "Sorry, GCC v4.0 or above is required to build " ; \
echo "the 64-bit powerpc kernel." ; \
false ; \
fi
@if test "$(call cc-fullversion)" = "040200" \
&& test "x${CONFIG_MODULES}${CONFIG_PPC64}" = "xyy" ; then \
echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \
......
......@@ -373,5 +373,30 @@ PCIE0: pciex@d00000000 {
0x0 0x0 0x0 0x3 &UIC3 0xe 0x4 /* swizzled int C */
0x0 0x0 0x0 0x4 &UIC3 0xf 0x4 /* swizzled int D */>;
};
MSI: ppc4xx-msi@C10000000 {
compatible = "amcc,ppc4xx-msi", "ppc4xx-msi";
reg = < 0xC 0x10000000 0x100
0xC 0x10000000 0x100>;
sdr-base = <0x36C>;
msi-data = <0x00004440>;
msi-mask = <0x0000ffe0>;
interrupts =<0 1 2 3 4 5 6 7>;
interrupt-parent = <&MSI>;
#interrupt-cells = <1>;
#address-cells = <0>;
#size-cells = <0>;
msi-available-ranges = <0x0 0x100>;
interrupt-map = <
0 &UIC3 0x18 1
1 &UIC3 0x19 1
2 &UIC3 0x1A 1
3 &UIC3 0x1B 1
4 &UIC3 0x1C 1
5 &UIC3 0x1D 1
6 &UIC3 0x1E 1
7 &UIC3 0x1F 1
>;
};
};
};
CONFIG_PPC64=y
CONFIG_POWER4_ONLY=y
CONFIG_ALTIVEC=y
CONFIG_SMP=y
CONFIG_NR_CPUS=4
......
CONFIG_PPC64=y
CONFIG_POWER4_ONLY=y
CONFIG_SMP=y
CONFIG_NR_CPUS=4
CONFIG_EXPERIMENTAL=y
......
CONFIG_PPC64=y
CONFIG_POWER4_ONLY=y
CONFIG_ALTIVEC=y
# CONFIG_VIRT_CPU_ACCOUNTING is not set
CONFIG_SMP=y
......
......@@ -6,7 +6,6 @@ CONFIG_NR_CPUS=2
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_SPARSE_IRQ=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y
......@@ -25,7 +24,6 @@ CONFIG_PS3_DISK=y
CONFIG_PS3_ROM=y
CONFIG_PS3_FLASH=y
CONFIG_PS3_VRAM=m
CONFIG_PS3_LPM=m
# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
CONFIG_HIGH_RES_TIMERS=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
......@@ -53,8 +51,6 @@ CONFIG_IP_PNP_DHCP=y
# CONFIG_INET_DIAG is not set
CONFIG_IPV6=y
CONFIG_BT=m
CONFIG_BT_L2CAP=y
CONFIG_BT_SCO=y
CONFIG_BT_RFCOMM=m
CONFIG_BT_RFCOMM_TTY=y
CONFIG_BT_BNEP=m
......@@ -63,7 +59,6 @@ CONFIG_BT_BNEP_PROTO_FILTER=y
CONFIG_BT_HIDP=m
CONFIG_BT_HCIBTUSB=m
CONFIG_CFG80211=m
# CONFIG_WIRELESS_EXT_SYSFS is not set
CONFIG_MAC80211=m
CONFIG_MAC80211_RC_PID=y
# CONFIG_MAC80211_RC_MINSTREL is not set
......@@ -181,7 +176,6 @@ CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_WRITECOUNT=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DEBUG_LIST=y
CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_FTRACE is not set
CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_CRYPTO_CCM=m
......
......@@ -29,18 +29,9 @@
#define PPC_LLARX(t, a, b, eh) PPC_LDARX(t, a, b, eh)
#define PPC_STLCX stringify_in_c(stdcx.)
#define PPC_CNTLZL stringify_in_c(cntlzd)
#define PPC_MTOCRF(FXM, RS) MTOCRF((FXM), (RS))
#define PPC_LR_STKOFF 16
#define PPC_MIN_STKFRM 112
/* Move to CR, single-entry optimized version. Only available
* on POWER4 and later.
*/
#ifdef CONFIG_POWER4_ONLY
#define PPC_MTOCRF stringify_in_c(mtocrf)
#else
#define PPC_MTOCRF stringify_in_c(mtcrf)
#endif
#else /* 32-bit */
/* operations for longs and pointers */
......
......@@ -9,7 +9,7 @@
* Note: This implementation is limited to a power of 2 number of
* threads per core and the same number for each core in the system
* (though it would work if some processors had less threads as long
* as the CPU numbers are still allocated, just not brought offline).
* as the CPU numbers are still allocated, just not brought online).
*
* However, the API allows for a different implementation in the future
* if needed, as long as you only use the functions and not the variables
......
......@@ -77,8 +77,27 @@
#define H_MR_CONDITION -43
#define H_NOT_ENOUGH_RESOURCES -44
#define H_R_STATE -45
#define H_RESCINDEND -46
#define H_RESCINDED -46
#define H_P2 -55
#define H_P3 -56
#define H_P4 -57
#define H_P5 -58
#define H_P6 -59
#define H_P7 -60
#define H_P8 -61
#define H_P9 -62
#define H_TOO_BIG -64
#define H_OVERLAP -68
#define H_INTERRUPT -69
#define H_BAD_DATA -70
#define H_NOT_ACTIVE -71
#define H_SG_LIST -72
#define H_OP_MODE -73
#define H_COP_HW -74
#define H_UNSUPPORTED_FLAG_START -256
#define H_UNSUPPORTED_FLAG_END -511
#define H_MULTI_THREADS_ACTIVE -9005
#define H_OUTSTANDING_COP_OPS -9006
/* Long Busy is a condition that can be returned by the firmware
......@@ -240,6 +259,8 @@
#define H_GET_MPP 0x2D4
#define H_HOME_NODE_ASSOCIATIVITY 0x2EC
#define H_BEST_ENERGY 0x2F4
#define H_RANDOM 0x300
#define H_COP 0x304
#define H_GET_MPP_X 0x314
#define MAX_HCALL_OPCODE H_GET_MPP_X
......
......@@ -20,18 +20,16 @@
#define _ASM_POWERPC_LPPACA_H
#ifdef __KERNEL__
/* These definitions relate to hypervisors that only exist when using
/*
* These definitions relate to hypervisors that only exist when using
* a server type processor
*/
#ifdef CONFIG_PPC_BOOK3S
//=============================================================================
//
// This control block contains the data that is shared between the
// hypervisor (PLIC) and the OS.
//
//
//----------------------------------------------------------------------------
/*
* This control block contains the data that is shared between the
* hypervisor and the OS.
*/
#include <linux/cache.h>
#include <linux/threads.h>
#include <asm/types.h>
......@@ -43,123 +41,65 @@
*/
#define NR_LPPACAS 1
/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k
* alignment is sufficient to prevent this */
/*
* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k
* alignment is sufficient to prevent this
*/
struct lppaca {
//=============================================================================
// CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
// NOTE: The xDynXyz fields are fields that will be dynamically changed by
// PLIC when preparing to bring a processor online or when dispatching a
// virtual processor!
//=============================================================================
u32 desc; // Eye catcher 0xD397D781 x00-x03
u16 size; // Size of this struct x04-x05
u16 reserved1; // Reserved x06-x07
u16 reserved2:14; // Reserved x08-x09
u8 shared_proc:1; // Shared processor indicator ...
u8 secondary_thread:1; // Secondary thread indicator ...
volatile u8 dyn_proc_status:8; // Dynamic Status of this proc x0A-x0A
u8 secondary_thread_count; // Secondary thread count x0B-x0B
volatile u16 dyn_hv_phys_proc_index;// Dynamic HV Physical Proc Index0C-x0D
volatile u16 dyn_hv_log_proc_index;// Dynamic HV Logical Proc Indexx0E-x0F
u32 decr_val; // Value for Decr programming x10-x13
u32 pmc_val; // Value for PMC regs x14-x17
volatile u32 dyn_hw_node_id; // Dynamic Hardware Node id x18-x1B
volatile u32 dyn_hw_proc_id; // Dynamic Hardware Proc Id x1C-x1F
volatile u32 dyn_pir; // Dynamic ProcIdReg value x20-x23
u32 dsei_data; // DSEI data x24-x27
u64 sprg3; // SPRG3 value x28-x2F
u8 reserved3[40]; // Reserved x30-x57
volatile u8 vphn_assoc_counts[8]; // Virtual processor home node
// associativity change counters x58-x5F
u8 reserved4[32]; // Reserved x60-x7F
//=============================================================================
// CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data
//=============================================================================
// This Dword contains a byte for each type of interrupt that can occur.
// The IPI is a count while the others are just a binary 1 or 0.
union {
u64 any_int;
struct {
u16 reserved; // Reserved - cleared by #mpasmbl
u8 xirr_int; // Indicates xXirrValue is valid or Immed IO
u8 ipi_cnt; // IPI Count
u8 decr_int; // DECR interrupt occurred
u8 pdc_int; // PDC interrupt occurred
u8 quantum_int; // Interrupt quantum reached
u8 old_plic_deferred_ext_int; // Old PLIC has a deferred XIRR pending
} fields;
} int_dword;
// Whenever any fields in this Dword are set then PLIC will defer the
// processing of external interrupts. Note that PLIC will store the
// XIRR directly into the xXirrValue field so that another XIRR will
// not be presented until this one clears. The layout of the low
// 4-bytes of this Dword is up to SLIC - PLIC just checks whether the
// entire Dword is zero or not. A non-zero value in the low order
// 2-bytes will result in SLIC being granted the highest thread
// priority upon return. A 0 will return to SLIC as medium priority.
u64 plic_defer_ints_area; // Entire Dword
// Used to pass the real SRR0/1 from PLIC to SLIC as well as to
// pass the target SRR0/1 from SLIC to PLIC on a SetAsrAndRfid.
u64 saved_srr0; // Saved SRR0 x10-x17
u64 saved_srr1; // Saved SRR1 x18-x1F
// Used to pass parms from the OS to PLIC for SetAsrAndRfid
u64 saved_gpr3; // Saved GPR3 x20-x27
u64 saved_gpr4; // Saved GPR4 x28-x2F
union {
u64 saved_gpr5; /* Saved GPR5 x30-x37 */
struct {
u8 cede_latency_hint; /* x30 */
u8 reserved[7]; /* x31-x36 */
} fields;
} gpr5_dword;
u8 dtl_enable_mask; // Dispatch Trace Log mask x38-x38
u8 donate_dedicated_cpu; // Donate dedicated CPU cycles x39-x39
u8 fpregs_in_use; // FP regs in use x3A-x3A
u8 pmcregs_in_use; // PMC regs in use x3B-x3B
volatile u32 saved_decr; // Saved Decr Value x3C-x3F
volatile u64 emulated_time_base;// Emulated TB for this thread x40-x47
volatile u64 cur_plic_latency; // Unaccounted PLIC latency x48-x4F
u64 tot_plic_latency; // Accumulated PLIC latency x50-x57
u64 wait_state_cycles; // Wait cycles for this proc x58-x5F
u64 end_of_quantum; // TB at end of quantum x60-x67
u64 pdc_saved_sprg1; // Saved SPRG1 for PMC int x68-x6F
u64 pdc_saved_srr0; // Saved SRR0 for PMC int x70-x77
volatile u32 virtual_decr; // Virtual DECR for shared procsx78-x7B
u16 slb_count; // # of SLBs to maintain x7C-x7D
u8 idle; // Indicate OS is idle x7E
u8 vmxregs_in_use; // VMX registers in use x7F
//=============================================================================
// CACHE_LINE_3 0x0100 - 0x017F: This line is shared with other processors
//=============================================================================
// This is the yield_count. An "odd" value (low bit on) means that
// the processor is yielded (either because of an OS yield or a PLIC
// preempt). An even value implies that the processor is currently
// executing.
// NOTE: This value will ALWAYS be zero for dedicated processors and
// will NEVER be zero for shared processors (ie, initialized to a 1).
volatile u32 yield_count; // PLIC increments each dispatchx00-x03
volatile u32 dispersion_count; // dispatch changed phys cpu x04-x07
volatile u64 cmo_faults; // CMO page fault count x08-x0F
volatile u64 cmo_fault_time; // CMO page fault time x10-x17
u8 reserved7[104]; // Reserved x18-x7F
//=============================================================================
// CACHE_LINE_4-5 0x0180 - 0x027F Contains PMC interrupt data
//=============================================================================
u32 page_ins; // CMO Hint - # page ins by OS x00-x03
u8 reserved8[148]; // Reserved x04-x97
volatile u64 dtl_idx; // Dispatch Trace Log head idx x98-x9F
u8 reserved9[96]; // Reserved xA0-xFF
/* cacheline 1 contains read-only data */
u32 desc; /* Eye catcher 0xD397D781 */
u16 size; /* Size of this struct */
u16 reserved1;
u16 reserved2:14;
u8 shared_proc:1; /* Shared processor indicator */
u8 secondary_thread:1; /* Secondary thread indicator */
u8 reserved3[14];
volatile u32 dyn_hw_node_id; /* Dynamic hardware node id */
volatile u32 dyn_hw_proc_id; /* Dynamic hardware proc id */
u8 reserved4[56];
volatile u8 vphn_assoc_counts[8]; /* Virtual processor home node */
/* associativity change counters */
u8 reserved5[32];
/* cacheline 2 contains local read-write data */
u8 reserved6[48];
u8 cede_latency_hint;
u8 reserved7[7];
u8 dtl_enable_mask; /* Dispatch Trace Log mask */
u8 donate_dedicated_cpu; /* Donate dedicated CPU cycles */
u8 fpregs_in_use;
u8 pmcregs_in_use;
u8 reserved8[28];
u64 wait_state_cycles; /* Wait cycles for this proc */
u8 reserved9[28];
u16 slb_count; /* # of SLBs to maintain */
u8 idle; /* Indicate OS is idle */
u8 vmxregs_in_use;
/* cacheline 3 is shared with other processors */
/*
* This is the yield_count. An "odd" value (low bit on) means that
* the processor is yielded (either because of an OS yield or a
* hypervisor preempt). An even value implies that the processor is
* currently executing.
* NOTE: This value will ALWAYS be zero for dedicated processors and
* will NEVER be zero for shared processors (ie, initialized to a 1).
*/
volatile u32 yield_count;
volatile u32 dispersion_count; /* dispatch changed physical cpu */
volatile u64 cmo_faults; /* CMO page fault count */
volatile u64 cmo_fault_time; /* CMO page fault time */
u8 reserved10[104];
/* cacheline 4-5 */
u32 page_ins; /* CMO Hint - # page ins by OS */
u8 reserved11[148];
volatile u64 dtl_idx; /* Dispatch Trace Log head index */
u8 reserved12[96];
} __attribute__((__aligned__(0x400)));
extern struct lppaca lppaca[];
......@@ -172,13 +112,13 @@ extern struct lppaca lppaca[];
* ESID is stored in the lower 64bits, then the VSID.
*/
struct slb_shadow {
u32 persistent; // Number of persistent SLBs x00-x03
u32 buffer_length; // Total shadow buffer length x04-x07
u64 reserved; // Alignment x08-x0f
u32 persistent; /* Number of persistent SLBs */
u32 buffer_length; /* Total shadow buffer length */
u64 reserved;
struct {
u64 esid;
u64 vsid;
} save_area[SLB_NUM_BOLTED]; // x10-x40
} save_area[SLB_NUM_BOLTED];
} ____cacheline_aligned;
extern struct slb_shadow slb_shadow[];
......
......@@ -265,8 +265,8 @@ LV1_CALL(get_spe_irq_outlet, 2, 1, 78 )
LV1_CALL(set_spe_privilege_state_area_1_register, 3, 0, 79 )
LV1_CALL(create_repository_node, 6, 0, 90 )
LV1_CALL(read_repository_node, 5, 2, 91 )
LV1_CALL(modify_repository_node_value, 6, 0, 92 )
LV1_CALL(remove_repository_node, 4, 0, 93 )
LV1_CALL(write_repository_node, 6, 0, 92 )
LV1_CALL(delete_repository_node, 4, 0, 93 )
LV1_CALL(read_htab_entries, 2, 5, 95 )
LV1_CALL(set_dabr, 2, 0, 96 )
LV1_CALL(get_total_execution_time, 2, 1, 103 )
......
......@@ -13,6 +13,18 @@
#define PSERIES_RECONFIG_REMOVE 0x0002
#define PSERIES_DRCONF_MEM_ADD 0x0003
#define PSERIES_DRCONF_MEM_REMOVE 0x0004
#define PSERIES_UPDATE_PROPERTY 0x0005
/**
* pSeries_reconfig_notify - Notifier value structure for OFDT property updates
*
* @node: Device tree node which owns the property being updated
* @property: Updated property
*/
struct pSeries_reconfig_prop_update {
struct device_node *node;
struct property *property;
};
#ifdef CONFIG_PPC_PSERIES
extern int pSeries_reconfig_notifier_register(struct notifier_block *);
......
......@@ -369,6 +369,14 @@ BEGIN_FTR_SECTION \
END_FTR_SECTION_IFCLR(CPU_FTR_601)
#endif
#ifdef CONFIG_PPC64
#define MTOCRF(FXM, RS) \
BEGIN_FTR_SECTION_NESTED(848); \
mtcrf (FXM), (RS); \
FTR_SECTION_ELSE_NESTED(848); \
mtocrf (FXM), (RS); \
ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848)
#endif
/*
* This instruction is not implemented on the PPC 603 or 601; however, on
......
......@@ -354,12 +354,6 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
#define PTRACE_GETREGS64 22
#define PTRACE_SETREGS64 23
/* (old) PTRACE requests with inverted arguments */
#define PPC_PTRACE_GETREGS 0x99 /* Get GPRs 0 - 31 */
#define PPC_PTRACE_SETREGS 0x98 /* Set GPRs 0 - 31 */
#define PPC_PTRACE_GETFPREGS 0x97 /* Get FPRs 0 - 31 */
#define PPC_PTRACE_SETFPREGS 0x96 /* Set FPRs 0 - 31 */
/* Calls to trace a 64bit program from a 32bit program */
#define PPC_PTRACE_PEEKTEXT_3264 0x95
#define PPC_PTRACE_PEEKDATA_3264 0x94
......
......@@ -21,7 +21,6 @@ extern void disable_kernel_fp(void);
extern void enable_kernel_fp(void);
extern void flush_fp_to_thread(struct task_struct *);
extern void enable_kernel_altivec(void);
extern void giveup_altivec(struct task_struct *);
extern void load_up_altivec(struct task_struct *);
extern int emulate_altivec(struct pt_regs *);
extern void __giveup_vsx(struct task_struct *);
......@@ -40,10 +39,15 @@ static inline void discard_lazy_cpu_state(void)
#ifdef CONFIG_ALTIVEC
extern void flush_altivec_to_thread(struct task_struct *);
extern void giveup_altivec(struct task_struct *);
extern void giveup_altivec_notask(void);
#else
static inline void flush_altivec_to_thread(struct task_struct *t)
{
}
static inline void giveup_altivec(struct task_struct *t)
{
}
#endif
#ifdef CONFIG_VSX
......
......@@ -113,7 +113,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_NOERROR (1<<TIF_NOERROR)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
......
......@@ -46,6 +46,48 @@
struct iommu_table;
/*
* Platform Facilities Option (PFO)-specific data
*/
/* Starting unit address for PFO devices on the VIO BUS */
#define VIO_BASE_PFO_UA 0x50000000
/**
* vio_pfo_op - PFO operation parameters
*
* @flags: h_call subfunctions and modifiers
* @in: Input data block logical real address
* @inlen: If non-negative, the length of the input data block. If negative,
* the length of the input data descriptor list in bytes.
* @out: Output data block logical real address
* @outlen: If non-negative, the length of the input data block. If negative,
* the length of the input data descriptor list in bytes.
* @csbcpb: Logical real address of the 4k naturally-aligned storage block
* containing the CSB & optional FC field specific CPB
* @timeout: # of milliseconds to retry h_call, 0 for no timeout.
* @hcall_err: pointer to return the h_call return value, else NULL
*/
struct vio_pfo_op {
u64 flags;
s64 in;
s64 inlen;
s64 out;
s64 outlen;
u64 csbcpb;
void *done;
unsigned long handle;
unsigned int timeout;
long hcall_err;
};
/* End PFO specific data */
enum vio_dev_family {
VDEVICE, /* The OF node is a child of /vdevice */
PFO, /* The OF node is a child of /ibm,platform-facilities */
};
/**
* vio_dev - This structure is used to describe virtual I/O devices.
*
......@@ -58,6 +100,7 @@ struct vio_dev {
const char *name;
const char *type;
uint32_t unit_address;
uint32_t resource_id;
unsigned int irq;
struct {
size_t desired;
......@@ -65,6 +108,7 @@ struct vio_dev {
size_t allocated;
atomic_t allocs_failed;
} cmo;
enum vio_dev_family family;
struct device dev;
};
......@@ -95,6 +139,8 @@ extern void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired);
extern void __devinit vio_unregister_device(struct vio_dev *dev);
extern int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op);
struct device_node;
extern struct vio_dev *vio_register_device_node(
......
......@@ -188,10 +188,6 @@ int main(void)
DEFINE(SLBSHADOW_STACKESID,
offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid));
DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area));
DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use));
DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx));
DEFINE(LPPACA_YIELDCOUNT, offsetof(struct lppaca, yield_count));
......
......@@ -63,15 +63,9 @@ system_call_common:
std r0,GPR0(r1)
std r10,GPR1(r1)
ACCOUNT_CPU_USER_ENTRY(r10, r11)
/*
* This "crclr so" clears CR0.SO, which is the error indication on
* return from this system call. There must be no cmp instruction
* between it and the "mfcr r9" below, otherwise if XER.SO is set,
* CR0.SO will get set, causing all system calls to appear to fail.
*/
crclr so
std r2,GPR2(r1)
std r3,GPR3(r1)
mfcr r2
std r4,GPR4(r1)
std r5,GPR5(r1)
std r6,GPR6(r1)
......@@ -82,18 +76,20 @@ system_call_common:
std r11,GPR10(r1)
std r11,GPR11(r1)
std r11,GPR12(r1)
std r11,_XER(r1)
std r11,_CTR(r1)
std r9,GPR13(r1)
mfcr r9
mflr r10
/*
* This clears CR0.SO (bit 28), which is the error indication on
* return from this system call.
*/
rldimi r2,r11,28,(63-28)
li r11,0xc01
std r9,_CCR(r1)
std r10,_LINK(r1)
std r11,_TRAP(r1)
mfxer r9
mfctr r10
std r9,_XER(r1)
std r10,_CTR(r1)
std r3,ORIG_GPR3(r1)
std r2,_CCR(r1)
ld r2,PACATOC(r13)
addi r9,r1,STACK_FRAME_OVERHEAD
ld r11,exception_marker@toc(r2)
......@@ -154,7 +150,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
ld r10,TI_FLAGS(r11)
andi. r11,r10,_TIF_SYSCALL_T_OR_A
bne- syscall_dotrace
syscall_dotrace_cont:
.Lsyscall_dotrace_cont:
cmpldi 0,r0,NR_syscalls
bge- syscall_enosys
......@@ -211,7 +207,7 @@ syscall_exit:
cmpld r3,r11
ld r5,_CCR(r1)
bge- syscall_error
syscall_error_cont:
.Lsyscall_error_cont:
ld r7,_NIP(r1)
BEGIN_FTR_SECTION
stdcx. r0,0,r1 /* to clear the reservation */
......@@ -246,7 +242,7 @@ syscall_error:
oris r5,r5,0x1000 /* Set SO bit in CR */
neg r3,r3
std r5,_CCR(r1)
b syscall_error_cont
b .Lsyscall_error_cont
/* Traced system call support */
syscall_dotrace:
......@@ -268,7 +264,7 @@ syscall_dotrace:
addi r9,r1,STACK_FRAME_OVERHEAD
clrrdi r10,r1,THREAD_SHIFT
ld r10,TI_FLAGS(r10)
b syscall_dotrace_cont
b .Lsyscall_dotrace_cont
syscall_enosys:
li r3,-ENOSYS
......
......@@ -94,12 +94,10 @@ machine_check_pSeries_1:
data_access_pSeries:
HMT_MEDIUM
SET_SCRATCH0(r13)
#ifndef CONFIG_POWER4_ONLY
BEGIN_FTR_SECTION
b data_access_check_stab
data_access_not_stab:
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
#endif
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
KVMTEST, 0x300)
......@@ -301,7 +299,6 @@ machine_check_fwnmi:
EXC_STD, KVMTEST, 0x200)
KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
#ifndef CONFIG_POWER4_ONLY
/* moved from 0x300 */
data_access_check_stab:
GET_PACA(r13)
......@@ -328,7 +325,6 @@ do_stab_bolted_pSeries:
GET_SCRATCH0(r10)
std r10,PACA_EXSLB+EX_R13(r13)
EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
#endif /* CONFIG_POWER4_ONLY */
KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
......
......@@ -777,14 +777,6 @@ _GLOBAL(__fixup_440A_mcheck)
sync
blr
/*
* extern void giveup_altivec(struct task_struct *prev)
*
* The 44x core does not have an AltiVec unit.
*/
_GLOBAL(giveup_altivec)
blr
/*
* extern void giveup_fpu(struct task_struct *prev)
*
......
......@@ -874,14 +874,6 @@ _GLOBAL(__setup_e500mc_ivors)
sync
blr
/*
* extern void giveup_altivec(struct task_struct *prev)
*
* The e500 core does not have an AltiVec unit.
*/
_GLOBAL(giveup_altivec)
blr
#ifdef CONFIG_SPE
/*
* extern void giveup_spe(struct task_struct *prev)
......
......@@ -587,7 +587,7 @@ int irq_choose_cpu(const struct cpumask *mask)
{
int cpuid;
if (cpumask_equal(mask, cpu_all_mask)) {
if (cpumask_equal(mask, cpu_online_mask)) {
static int irq_rover;
static DEFINE_RAW_SPINLOCK(irq_rover_lock);
unsigned long flags;
......
......@@ -738,7 +738,22 @@ relocate_new_kernel:
mr r5, r31
li r0, 0
#elif defined(CONFIG_44x) && !defined(CONFIG_PPC_47x)
#elif defined(CONFIG_44x)
/* Save our parameters */
mr r29, r3
mr r30, r4
mr r31, r5
#ifdef CONFIG_PPC_47x
/* Check for 47x cores */
mfspr r3,SPRN_PVR
srwi r3,r3,16
cmplwi cr0,r3,PVR_476@h
beq setup_map_47x
cmplwi cr0,r3,PVR_476_ISS@h
beq setup_map_47x
#endif /* CONFIG_PPC_47x */
/*
* Code for setting up 1:1 mapping for PPC440x for KEXEC
......@@ -753,16 +768,15 @@ relocate_new_kernel:
* 5) Invalidate the tmp mapping.
*
* - Based on the kexec support code for FSL BookE
* - Doesn't support 47x yet.
*
*/
/* Save our parameters */
mr r29, r3
mr r30, r4
mr r31, r5
/* Load our MSR_IS and TID to MMUCR for TLB search */
mfspr r3,SPRN_PID
/*
* Load the PID with kernel PID (0).
* Also load our MSR_IS and TID to MMUCR for TLB search.
*/
li r3, 0
mtspr SPRN_PID, r3
mfmsr r4
andi. r4,r4,MSR_IS@l
beq wmmucr
......@@ -900,6 +914,179 @@ next_tlb:
li r3, 0
tlbwe r3, r24, PPC44x_TLB_PAGEID
sync
b ppc44x_map_done
#ifdef CONFIG_PPC_47x
/* 1:1 mapping for 47x */
setup_map_47x:
/*
* Load the kernel pid (0) to PID and also to MMUCR[TID].
* Also set the MSR IS->MMUCR STS
*/
li r3, 0
mtspr SPRN_PID, r3 /* Set PID */
mfmsr r4 /* Get MSR */
andi. r4, r4, MSR_IS@l /* TS=1? */
beq 1f /* If not, leave STS=0 */
oris r3, r3, PPC47x_MMUCR_STS@h /* Set STS=1 */
1: mtspr SPRN_MMUCR, r3 /* Put MMUCR */
sync
/* Find the entry we are running from */
bl 2f
2: mflr r23
tlbsx r23, 0, r23
tlbre r24, r23, 0 /* TLB Word 0 */
tlbre r25, r23, 1 /* TLB Word 1 */
tlbre r26, r23, 2 /* TLB Word 2 */
/*
* Invalidates all the tlb entries by writing to 256 RPNs(r4)
* of 4k page size in all 4 ways (0-3 in r3).
* This would invalidate the entire UTLB including the one we are
* running from. However the shadow TLB entries would help us
* to continue the execution, until we flush them (rfi/isync).
*/
addis r3, 0, 0x8000 /* specify the way */
addi r4, 0, 0 /* TLB Word0 = (EPN=0, VALID = 0) */
addi r5, 0, 0
b clear_utlb_entry
/* Align the loop to speed things up. from head_44x.S */
.align 6
clear_utlb_entry:
tlbwe r4, r3, 0
tlbwe r5, r3, 1
tlbwe r5, r3, 2
addis r3, r3, 0x2000 /* Increment the way */
cmpwi r3, 0
bne clear_utlb_entry
addis r3, 0, 0x8000
addis r4, r4, 0x100 /* Increment the EPN */
cmpwi r4, 0
bne clear_utlb_entry
/* Create the entries in the other address space */
mfmsr r5
rlwinm r7, r5, 27, 31, 31 /* Get the TS (Bit 26) from MSR */
xori r7, r7, 1 /* r7 = !TS */
insrwi r24, r7, 1, 21 /* Change the TS in the saved TLB word 0 */
/*
* write out the TLB entries for the tmp mapping
* Use way '0' so that we could easily invalidate it later.
*/
lis r3, 0x8000 /* Way '0' */
tlbwe r24, r3, 0
tlbwe r25, r3, 1
tlbwe r26, r3, 2
/* Update the msr to the new TS */
insrwi r5, r7, 1, 26
bl 1f
1: mflr r6
addi r6, r6, (2f-1b)
mtspr SPRN_SRR0, r6
mtspr SPRN_SRR1, r5
rfi
/*
* Now we are in the tmp address space.
* Create a 1:1 mapping for 0-2GiB in the original TS.
*/
2:
li r3, 0
li r4, 0 /* TLB Word 0 */
li r5, 0 /* TLB Word 1 */
li r6, 0
ori r6, r6, PPC47x_TLB2_S_RWX /* TLB word 2 */
li r8, 0 /* PageIndex */
xori r7, r7, 1 /* revert back to original TS */
write_utlb:
rotlwi r5, r8, 28 /* RPN = PageIndex * 256M */
/* ERPN = 0 as we don't use memory above 2G */
mr r4, r5 /* EPN = RPN */
ori r4, r4, (PPC47x_TLB0_VALID | PPC47x_TLB0_256M)
insrwi r4, r7, 1, 21 /* Insert the TS to Word 0 */
tlbwe r4, r3, 0 /* Write out the entries */
tlbwe r5, r3, 1
tlbwe r6, r3, 2
addi r8, r8, 1
cmpwi r8, 8 /* Have we completed ? */
bne write_utlb
/* make sure we complete the TLB write up */
isync
/*
* Prepare to jump to the 1:1 mapping.
* 1) Extract page size of the tmp mapping
* DSIZ = TLB_Word0[22:27]
* 2) Calculate the physical address of the address
* to jump to.
*/
rlwinm r10, r24, 0, 22, 27
cmpwi r10, PPC47x_TLB0_4K
bne 0f
li r10, 0x1000 /* r10 = 4k */
bl 1f
0:
/* Defaults to 256M */
lis r10, 0x1000
bl 1f
1: mflr r4
addi r4, r4, (2f-1b) /* virtual address of 2f */
subi r11, r10, 1 /* offsetmask = Pagesize - 1 */
not r10, r11 /* Pagemask = ~(offsetmask) */
and r5, r25, r10 /* Physical page */
and r6, r4, r11 /* offset within the current page */
or r5, r5, r6 /* Physical address for 2f */
/* Switch the TS in MSR to the original one */
mfmsr r8
insrwi r8, r7, 1, 26
mtspr SPRN_SRR1, r8
mtspr SPRN_SRR0, r5
rfi
2:
/* Invalidate the tmp mapping */
lis r3, 0x8000 /* Way '0' */
clrrwi r24, r24, 12 /* Clear the valid bit */
tlbwe r24, r3, 0
tlbwe r25, r3, 1
tlbwe r26, r3, 2
/* Make sure we complete the TLB write and flush the shadow TLB */
isync
#endif
ppc44x_map_done:
/* Restore the parameters */
mr r3, r29
......
......@@ -36,10 +36,7 @@ struct lppaca lppaca[] = {
[0 ... (NR_LPPACAS-1)] = {
.desc = 0xd397d781, /* "LpPa" */
.size = sizeof(struct lppaca),
.dyn_proc_status = 2,
.decr_val = 0x00ff0000,
.fpregs_in_use = 1,
.end_of_quantum = 0xfffffffffffffffful,
.slb_count = 64,
.vmxregs_in_use = 0,
.page_ins = 0,
......
......@@ -124,7 +124,7 @@ void enable_kernel_altivec(void)
if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
giveup_altivec(current);
else
giveup_altivec(NULL); /* just enable AltiVec for kernel - force */
giveup_altivec_notask();
#else
giveup_altivec(last_task_used_altivec);
#endif /* CONFIG_SMP */
......
......@@ -680,6 +680,9 @@ static void __init early_cmdline_parse(void)
#define OV3_VMX 0x40 /* VMX/Altivec */
#define OV3_DFP 0x20 /* decimal FP */
/* Option vector 4: IBM PAPR implementation */
#define OV4_MIN_ENT_CAP 0x01 /* minimum VP entitled capacity */
/* Option vector 5: PAPR/OF options supported */
#define OV5_LPAR 0x80 /* logical partitioning supported */
#define OV5_SPLPAR 0x40 /* shared-processor LPAR supported */
......@@ -701,6 +704,8 @@ static void __init early_cmdline_parse(void)
#define OV5_XCMO 0x00
#endif
#define OV5_TYPE1_AFFINITY 0x80 /* Type 1 NUMA affinity */
#define OV5_PFO_HW_RNG 0x80 /* PFO Random Number Generator */
#define OV5_PFO_HW_ENCR 0x20 /* PFO Encryption Accelerator */
/* Option Vector 6: IBM PAPR hints */
#define OV6_LINUX 0x02 /* Linux is our OS */
......@@ -744,11 +749,12 @@ static unsigned char ibm_architecture_vec[] = {
OV3_FP | OV3_VMX | OV3_DFP,
/* option vector 4: IBM PAPR implementation */
2 - 2, /* length */
3 - 2, /* length */
0, /* don't halt */
OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
/* option vector 5: PAPR/OF options */
13 - 2, /* length */
18 - 2, /* length */
0, /* don't ignore, don't halt */
OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY |
OV5_DONATE_DEDICATE_CPU | OV5_MSI,
......@@ -762,8 +768,13 @@ static unsigned char ibm_architecture_vec[] = {
* must match by the macro below. Update the definition if
* the structure layout changes.
*/
#define IBM_ARCH_VEC_NRCORES_OFFSET 100
#define IBM_ARCH_VEC_NRCORES_OFFSET 101
W(NR_CPUS), /* number of cores supported */
0,
0,
0,
0,
OV5_PFO_HW_RNG | OV5_PFO_HW_ENCR,
/* option vector 6: IBM PAPR hints */
4 - 2, /* length */
......
......@@ -1432,40 +1432,6 @@ static long ppc_del_hwdebug(struct task_struct *child, long addr, long data)
#endif
}
/*
* Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
* we mark them as obsolete now, they will be removed in a future version
*/
static long arch_ptrace_old(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
void __user *datavp = (void __user *) data;
switch (request) {
case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_GPR, 0, 32 * sizeof(long),
datavp);
case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_GPR, 0, 32 * sizeof(long),
datavp);
case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_FPR, 0, 32 * sizeof(double),
datavp);
case PPC_PTRACE_SETFPREGS: /* Set FPRs 0 - 31. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_FPR, 0, 32 * sizeof(double),
datavp);
}
return -EPERM;
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
......@@ -1687,14 +1653,6 @@ long arch_ptrace(struct task_struct *child, long request,
datavp);
#endif
/* Old reverse args ptrace callss */
case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
case PPC_PTRACE_SETFPREGS: /* Get FPRs 0 - 31. */
ret = arch_ptrace_old(child, request, addr, data);
break;
default:
ret = ptrace_request(child, request, addr, data);
break;
......
......@@ -39,30 +39,6 @@
* in exit.c or in signal.c.
*/
/*
* Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
* we mark them as obsolete now, they will be removed in a future version
*/
static long compat_ptrace_old(struct task_struct *child, long request,
long addr, long data)
{
switch (request) {
case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
return copy_regset_to_user(child,
task_user_regset_view(current), 0,
0, 32 * sizeof(compat_long_t),
compat_ptr(data));
case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
return copy_regset_from_user(child,
task_user_regset_view(current), 0,
0, 32 * sizeof(compat_long_t),
compat_ptr(data));
}
return -EPERM;
}
/* Macros to workout the correct index for the FPR in the thread struct */
#define FPRNUMBER(i) (((i) - PT_FPR0) >> 1)
#define FPRHALF(i) (((i) - PT_FPR0) & 1)
......@@ -308,8 +284,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
case PTRACE_SETVSRREGS:
case PTRACE_GETREGS64:
case PTRACE_SETREGS64:
case PPC_PTRACE_GETFPREGS:
case PPC_PTRACE_SETFPREGS:
case PTRACE_KILL:
case PTRACE_SINGLESTEP:
case PTRACE_DETACH:
......@@ -322,12 +296,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
ret = arch_ptrace(child, request, addr, data);
break;
/* Old reverse args ptrace callss */
case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
ret = compat_ptrace_old(child, request, addr, data);
break;
default:
ret = compat_ptrace_request(child, request, addr, data);
break;
......
......@@ -89,6 +89,16 @@ _GLOBAL(load_up_altivec)
/* restore registers and return */
blr
_GLOBAL(giveup_altivec_notask)
mfmsr r3
andis. r4,r3,MSR_VEC@h
bnelr /* Already enabled? */
oris r3,r3,MSR_VEC@h
SYNC
MTMSRD(r3) /* enable use of VMX now */
isync
blr
/*
* giveup_altivec(tsk)
* Disable VMX for the task given as the argument,
......
......@@ -14,7 +14,9 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/cpu.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/stat.h>
#include <linux/device.h>
#include <linux/init.h>
......@@ -709,13 +711,26 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev)
struct vio_driver *viodrv = to_vio_driver(dev->driver);
unsigned long flags;
size_t size;
bool dma_capable = false;
/*
* Check to see that device has a DMA window and configure
* entitlement for the device.
*/
/* A device requires entitlement if it has a DMA window property */
switch (viodev->family) {
case VDEVICE:
if (of_get_property(viodev->dev.of_node,
"ibm,my-dma-window", NULL)) {
"ibm,my-dma-window", NULL))
dma_capable = true;
break;
case PFO:
dma_capable = false;
break;
default:
dev_warn(dev, "unknown device family: %d\n", viodev->family);
BUG();
break;
}
/* Configure entitlement for the device. */
if (dma_capable) {
/* Check that the driver is CMO enabled and get desired DMA */
if (!viodrv->get_desired_dma) {
dev_err(dev, "%s: device driver does not support CMO\n",
......@@ -1050,6 +1065,94 @@ static void vio_cmo_sysfs_init(void) { }
EXPORT_SYMBOL(vio_cmo_entitlement_update);
EXPORT_SYMBOL(vio_cmo_set_dev_desired);
/*
* Platform Facilities Option (PFO) support
*/
/**
* vio_h_cop_sync - Perform a synchronous PFO co-processor operation
*
* @vdev - Pointer to a struct vio_dev for device
* @op - Pointer to a struct vio_pfo_op for the operation parameters
*
* Calls the hypervisor to synchronously perform the PFO operation
* described in @op. In the case of a busy response from the hypervisor,
* the operation will be re-submitted indefinitely unless a non-zero timeout
* is specified or an error occurs. The timeout places a limit on when to
* stop re-submitting a operation, the total time can be exceeded if an
* operation is in progress.
*
* If op->hcall_ret is not NULL, this will be set to the return from the
* last h_cop_op call or it will be 0 if an error not involving the h_call
* was encountered.
*
* Returns:
* 0 on success,
* -EINVAL if the h_call fails due to an invalid parameter,
* -E2BIG if the h_call can not be performed synchronously,
* -EBUSY if a timeout is specified and has elapsed,
* -EACCES if the memory area for data/status has been rescinded, or
* -EPERM if a hardware fault has been indicated
*/
int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op)
{
struct device *dev = &vdev->dev;
unsigned long deadline = 0;
long hret = 0;
int ret = 0;
if (op->timeout)
deadline = jiffies + msecs_to_jiffies(op->timeout);
while (true) {
hret = plpar_hcall_norets(H_COP, op->flags,
vdev->resource_id,
op->in, op->inlen, op->out,
op->outlen, op->csbcpb);
if (hret == H_SUCCESS ||
(hret != H_NOT_ENOUGH_RESOURCES &&
hret != H_BUSY && hret != H_RESOURCE) ||
(op->timeout && time_after(deadline, jiffies)))
break;
dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret);
}
switch (hret) {
case H_SUCCESS:
ret = 0;
break;
case H_OP_MODE:
case H_TOO_BIG:
ret = -E2BIG;
break;
case H_RESCINDED:
ret = -EACCES;
break;
case H_HARDWARE:
ret = -EPERM;
break;
case H_NOT_ENOUGH_RESOURCES:
case H_RESOURCE:
case H_BUSY:
ret = -EBUSY;
break;
default:
ret = -EINVAL;
break;
}
if (ret)
dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n",
__func__, ret, hret);
op->hcall_err = hret;
return ret;
}
EXPORT_SYMBOL(vio_h_cop_sync);
static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
{
const unsigned char *dma_window;
......@@ -1211,35 +1314,87 @@ static void __devinit vio_dev_release(struct device *dev)
struct vio_dev *vio_register_device_node(struct device_node *of_node)
{
struct vio_dev *viodev;
struct device_node *parent_node;
const unsigned int *unit_address;
const unsigned int *pfo_resid = NULL;
enum vio_dev_family family;
const char *of_node_name = of_node->name ? of_node->name : "<unknown>";
/* we need the 'device_type' property, in order to match with drivers */
if (of_node->type == NULL) {
printk(KERN_WARNING "%s: node %s missing 'device_type'\n",
/*
* Determine if this node is a under the /vdevice node or under the
* /ibm,platform-facilities node. This decides the device's family.
*/
parent_node = of_get_parent(of_node);
if (parent_node) {
if (!strcmp(parent_node->full_name, "/ibm,platform-facilities"))
family = PFO;
else if (!strcmp(parent_node->full_name, "/vdevice"))
family = VDEVICE;
else {
pr_warn("%s: parent(%s) of %s not recognized.\n",
__func__,
of_node->name ? of_node->name : "<unknown>");
parent_node->full_name,
of_node_name);
of_node_put(parent_node);
return NULL;
}
of_node_put(parent_node);
} else {
pr_warn("%s: could not determine the parent of node %s.\n",
__func__, of_node_name);
return NULL;
}
unit_address = of_get_property(of_node, "reg", NULL);
if (unit_address == NULL) {
printk(KERN_WARNING "%s: node %s missing 'reg'\n",
__func__,
of_node->name ? of_node->name : "<unknown>");
if (family == PFO) {
if (of_get_property(of_node, "interrupt-controller", NULL)) {
pr_debug("%s: Skipping the interrupt controller %s.\n",
__func__, of_node_name);
return NULL;
}
}
/* allocate a vio_dev for this node */
viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
if (viodev == NULL)
if (viodev == NULL) {
pr_warn("%s: allocation failure for VIO device.\n", __func__);
return NULL;
}
viodev->irq = irq_of_parse_and_map(of_node, 0);
/* we need the 'device_type' property, in order to match with drivers */
viodev->family = family;
if (viodev->family == VDEVICE) {
if (of_node->type != NULL)
viodev->type = of_node->type;
else {
pr_warn("%s: node %s is missing the 'device_type' "
"property.\n", __func__, of_node_name);
goto out;
}
unit_address = of_get_property(of_node, "reg", NULL);
if (unit_address == NULL) {
pr_warn("%s: node %s missing 'reg'\n",
__func__, of_node_name);
goto out;
}
dev_set_name(&viodev->dev, "%x", *unit_address);
viodev->name = of_node->name;
viodev->type = of_node->type;
viodev->irq = irq_of_parse_and_map(of_node, 0);
viodev->unit_address = *unit_address;
} else {
/* PFO devices need their resource_id for submitting COP_OPs
* This is an optional field for devices, but is required when
* performing synchronous ops */
pfo_resid = of_get_property(of_node, "ibm,resource-id", NULL);
if (pfo_resid != NULL)
viodev->resource_id = *pfo_resid;
unit_address = NULL;
dev_set_name(&viodev->dev, "%s", of_node_name);
viodev->type = of_node_name;
viodev->irq = 0;
}
viodev->name = of_node->name;
viodev->dev.of_node = of_node_get(of_node);
if (firmware_has_feature(FW_FEATURE_CMO))
......@@ -1267,16 +1422,51 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
}
return viodev;
out: /* Use this exit point for any return prior to device_register */
kfree(viodev);
return NULL;
}
EXPORT_SYMBOL(vio_register_device_node);
/*
* vio_bus_scan_for_devices - Scan OF and register each child device
* @root_name - OF node name for the root of the subtree to search.
* This must be non-NULL
*
* Starting from the root node provide, register the device node for
* each child beneath the root.
*/
static void vio_bus_scan_register_devices(char *root_name)
{
struct device_node *node_root, *node_child;
if (!root_name)
return;
node_root = of_find_node_by_name(NULL, root_name);
if (node_root) {
/*
* Create struct vio_devices for each virtual device in
* the device tree. Drivers will associate with them later.
*/
node_child = of_get_next_child(node_root, NULL);
while (node_child) {
vio_register_device_node(node_child);
node_child = of_get_next_child(node_root, node_child);
}
of_node_put(node_root);
}
}
/**
* vio_bus_init: - Initialize the virtual IO bus
*/
static int __init vio_bus_init(void)
{
int err;
struct device_node *node_vroot;
if (firmware_has_feature(FW_FEATURE_CMO))
vio_cmo_sysfs_init();
......@@ -1301,19 +1491,8 @@ static int __init vio_bus_init(void)
if (firmware_has_feature(FW_FEATURE_CMO))
vio_cmo_bus_init();
node_vroot = of_find_node_by_name(NULL, "vdevice");
if (node_vroot) {
struct device_node *of_node;
/*
* Create struct vio_devices for each virtual device in
* the device tree. Drivers will associate with them later.
*/
for (of_node = node_vroot->child; of_node != NULL;
of_node = of_node->sibling)
vio_register_device_node(of_node);
of_node_put(node_vroot);
}
vio_bus_scan_register_devices("vdevice");
vio_bus_scan_register_devices("ibm,platform-facilities");
return 0;
}
......@@ -1436,12 +1615,28 @@ struct vio_dev *vio_find_node(struct device_node *vnode)
{
const uint32_t *unit_address;
char kobj_name[20];
struct device_node *vnode_parent;
const char *dev_type;
vnode_parent = of_get_parent(vnode);
if (!vnode_parent)
return NULL;
dev_type = of_get_property(vnode_parent, "device_type", NULL);
of_node_put(vnode_parent);
if (!dev_type)
return NULL;
/* construct the kobject name from the device node */
if (!strcmp(dev_type, "vdevice")) {
unit_address = of_get_property(vnode, "reg", NULL);
if (!unit_address)
return NULL;
snprintf(kobj_name, sizeof(kobj_name), "%x", *unit_address);
} else if (!strcmp(dev_type, "ibm,platform-facilities"))
snprintf(kobj_name, sizeof(kobj_name), "%s", vnode->name);
else
return NULL;
return vio_find_name(kobj_name);
}
......
......@@ -30,7 +30,7 @@ _GLOBAL(__copy_tofrom_user_base)
dcbt 0,r4
beq .Lcopy_page_4K
andi. r6,r6,7
PPC_MTOCRF 0x01,r5
PPC_MTOCRF(0x01,r5)
blt cr1,.Lshort_copy
/* Below we want to nop out the bne if we're on a CPU that has the
* CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit
......@@ -186,7 +186,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
blr
.Ldst_unaligned:
PPC_MTOCRF 0x01,r6 /* put #bytes to 8B bdry into cr7 */
PPC_MTOCRF(0x01,r6) /* put #bytes to 8B bdry into cr7 */
subf r5,r6,r5
li r7,0
cmpldi cr1,r5,16
......@@ -201,7 +201,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
2: bf cr7*4+1,3f
37: lwzx r0,r7,r4
83: stwx r0,r7,r3
3: PPC_MTOCRF 0x01,r5
3: PPC_MTOCRF(0x01,r5)
add r4,r6,r4
add r3,r6,r3
b .Ldst_aligned
......
......@@ -19,7 +19,7 @@ _GLOBAL(memset)
rlwimi r4,r4,16,0,15
cmplw cr1,r5,r0 /* do we get that far? */
rldimi r4,r4,32,0
PPC_MTOCRF 1,r0
PPC_MTOCRF(1,r0)
mr r6,r3
blt cr1,8f
beq+ 3f /* if already 8-byte aligned */
......@@ -49,7 +49,7 @@ _GLOBAL(memset)
bdnz 4b
5: srwi. r0,r5,3
clrlwi r5,r5,29
PPC_MTOCRF 1,r0
PPC_MTOCRF(1,r0)
beq 8f
bf 29,6f
std r4,0(r6)
......@@ -65,7 +65,7 @@ _GLOBAL(memset)
std r4,0(r6)
addi r6,r6,8
8: cmpwi r5,0
PPC_MTOCRF 1,r5
PPC_MTOCRF(1,r5)
beqlr+
bf 29,9f
stw r4,0(r6)
......
......@@ -12,7 +12,7 @@
.align 7
_GLOBAL(memcpy)
std r3,48(r1) /* save destination pointer for return value */
PPC_MTOCRF 0x01,r5
PPC_MTOCRF(0x01,r5)
cmpldi cr1,r5,16
neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry
andi. r6,r6,7
......@@ -154,7 +154,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
blr
.Ldst_unaligned:
PPC_MTOCRF 0x01,r6 # put #bytes to 8B bdry into cr7
PPC_MTOCRF(0x01,r6) # put #bytes to 8B bdry into cr7
subf r5,r6,r5
li r7,0
cmpldi cr1,r5,16
......@@ -169,7 +169,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
2: bf cr7*4+1,3f
lwzx r0,r7,r4
stwx r0,r7,r3
3: PPC_MTOCRF 0x01,r5
3: PPC_MTOCRF(0x01,r5)
add r4,r6,r4
add r3,r6,r3
b .Ldst_aligned
......
......@@ -23,6 +23,8 @@ config BLUESTONE
default n
select PPC44x_SIMPLE
select APM821xx
select PCI_MSI
select PPC4xx_MSI
select PPC4xx_PCI_EXPRESS
select IBM_EMAC_RGMII
help
......
......@@ -78,6 +78,36 @@ config PPC_BOOK3E_64
endchoice
choice
prompt "CPU selection"
depends on PPC64
default GENERIC_CPU
help
This will create a kernel which is optimised for a particular CPU.
The resulting kernel may not run on other CPUs, so use this with care.
If unsure, select Generic.
config GENERIC_CPU
bool "Generic"
config CELL_CPU
bool "Cell Broadband Engine"
config POWER4_CPU
bool "POWER4"
config POWER5_CPU
bool "POWER5"
config POWER6_CPU
bool "POWER6"
config POWER7_CPU
bool "POWER7"
endchoice
config PPC_BOOK3S
def_bool y
depends on PPC_BOOK3S_32 || PPC_BOOK3S_64
......@@ -86,15 +116,6 @@ config PPC_BOOK3E
def_bool y
depends on PPC_BOOK3E_64
config POWER4_ONLY
bool "Optimize for POWER4"
depends on PPC64 && PPC_BOOK3S
default n
---help---
Cause the compiler to optimize for POWER4/POWER5/PPC970 processors.
The resulting binary will not work on POWER3 or RS64 processors
when compiled with binutils 2.15 or later.
config 6xx
def_bool y
depends on PPC32 && PPC_BOOK3S
......
......@@ -1503,6 +1503,7 @@ static int __init pmac_i2c_create_platform_devices(void)
if (bus->platform_dev == NULL)
return -ENOMEM;
bus->platform_dev->dev.platform_data = bus;
bus->platform_dev->dev.of_node = bus->busnode;
platform_device_add(bus->platform_dev);
}
......
......@@ -7,7 +7,6 @@ config PPC_PS3
select USB_OHCI_BIG_ENDIAN_MMIO
select USB_ARCH_HAS_EHCI
select USB_EHCI_BIG_ENDIAN_MMIO
select MEMORY_HOTPLUG
select PPC_PCI_CHOICE
help
This option enables support for the Sony PS3 game console
......@@ -74,7 +73,7 @@ config PS3_PS3AV
help
Include support for the PS3 AV Settings driver.
This support is required for graphics and sound. In
This support is required for PS3 graphics and sound. In
general, all users will say Y or M.
config PS3_SYS_MANAGER
......@@ -85,9 +84,22 @@ config PS3_SYS_MANAGER
help
Include support for the PS3 System Manager.
This support is required for system control. In
This support is required for PS3 system control. In
general, all users will say Y or M.
config PS3_REPOSITORY_WRITE
bool "PS3 Repository write support" if PS3_ADVANCED
depends on PPC_PS3
default n
help
Enables support for writing to the PS3 System Repository.
This support is intended for bootloaders that need to store data
in the repository for later boot stages.
If in doubt, say N here and reduce the size of the kernel by a
small amount.
config PS3_STORAGE
depends on PPC_PS3
tristate
......@@ -122,7 +134,7 @@ config PS3_FLASH
This support is required to access the PS3 FLASH ROM, which
contains the boot loader and some boot options.
In general, all users will say Y or M.
In general, PS3 OtherOS users will say Y or M.
As this driver needs a fixed buffer of 256 KiB of memory, it can
be disabled on the kernel command line using "ps3flash=off", to
......@@ -156,7 +168,7 @@ config PS3GELIC_UDBG
via the Ethernet port (UDP port number 18194).
This driver uses a trivial implementation and is independent
from the main network driver.
from the main PS3 gelic network driver.
If in doubt, say N here.
......
......@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/memory_hotplug.h>
#include <linux/memblock.h>
#include <linux/slab.h>
......@@ -79,12 +78,14 @@ enum {
* @base: base address
* @size: size in bytes
* @offset: difference between base and rm.size
* @destroy: flag if region should be destroyed upon shutdown
*/
struct mem_region {
u64 base;
u64 size;
unsigned long offset;
int destroy;
};
/**
......@@ -96,7 +97,7 @@ struct mem_region {
* The HV virtual address space (vas) allows for hotplug memory regions.
* Memory regions can be created and destroyed in the vas at runtime.
* @rm: real mode (bootmem) region
* @r1: hotplug memory region(s)
* @r1: highmem region(s)
*
* ps3 addresses
* virt_addr: a cpu 'translated' effective address
......@@ -222,10 +223,6 @@ void ps3_mm_vas_destroy(void)
}
}
/*============================================================================*/
/* memory hotplug routines */
/*============================================================================*/
/**
* ps3_mm_region_create - create a memory region in the vas
* @r: pointer to a struct mem_region to accept initialized values
......@@ -262,6 +259,7 @@ static int ps3_mm_region_create(struct mem_region *r, unsigned long size)
goto zero_region;
}
r->destroy = 1;
r->offset = r->base - map.rm.size;
return result;
......@@ -279,7 +277,14 @@ static void ps3_mm_region_destroy(struct mem_region *r)
{
int result;
if (!r->destroy) {
pr_info("%s:%d: Not destroying high region: %llxh %llxh\n",
__func__, __LINE__, r->base, r->size);
return;
}
DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base);
if (r->base) {
result = lv1_release_memory(r->base);
BUG_ON(result);
......@@ -288,50 +293,36 @@ static void ps3_mm_region_destroy(struct mem_region *r)
}
}
/**
* ps3_mm_add_memory - hot add memory
*/
static int __init ps3_mm_add_memory(void)
static int ps3_mm_get_repository_highmem(struct mem_region *r)
{
int result;
unsigned long start_addr;
unsigned long start_pfn;
unsigned long nr_pages;
if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
return -ENODEV;
BUG_ON(!mem_init_done);
start_addr = map.rm.size;
start_pfn = start_addr >> PAGE_SHIFT;
nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* Assume a single highmem region. */
DBG("%s:%d: start_addr %lxh, start_pfn %lxh, nr_pages %lxh\n",
__func__, __LINE__, start_addr, start_pfn, nr_pages);
result = ps3_repository_read_highmem_info(0, &r->base, &r->size);
result = add_memory(0, start_addr, map.r1.size);
if (result)
goto zero_region;
if (result) {
pr_err("%s:%d: add_memory failed: (%d)\n",
__func__, __LINE__, result);
return result;
if (!r->base || !r->size) {
result = -1;
goto zero_region;
}
memblock_add(start_addr, map.r1.size);
r->offset = r->base - map.rm.size;
result = online_pages(start_pfn, nr_pages);
DBG("%s:%d: Found high region in repository: %llxh %llxh\n",
__func__, __LINE__, r->base, r->size);
if (result)
pr_err("%s:%d: online_pages failed: (%d)\n",
__func__, __LINE__, result);
return 0;
zero_region:
DBG("%s:%d: No high region in repository.\n", __func__, __LINE__);
r->size = r->base = r->offset = 0;
return result;
}
device_initcall(ps3_mm_add_memory);
/*============================================================================*/
/* dma routines */
/*============================================================================*/
......@@ -1217,13 +1208,23 @@ void __init ps3_mm_init(void)
BUG_ON(map.rm.base);
BUG_ON(!map.rm.size);
/* Check if we got the highmem region from an earlier boot step */
/* arrange to do this in ps3_mm_add_memory */
if (ps3_mm_get_repository_highmem(&map.r1))
ps3_mm_region_create(&map.r1, map.total - map.rm.size);
/* correct map.total for the real total amount of memory we use */
map.total = map.rm.size + map.r1.size;
if (!map.r1.size) {
DBG("%s:%d: No highmem region found\n", __func__, __LINE__);
} else {
DBG("%s:%d: Adding highmem region: %llxh %llxh\n",
__func__, __LINE__, map.rm.size,
map.total - map.rm.size);
memblock_add(map.rm.size, map.total - map.rm.size);
}
DBG(" <- %s:%d\n", __func__, __LINE__);
}
......
......@@ -188,6 +188,22 @@ int ps3_repository_read_rm_size(unsigned int ppe_id, u64 *rm_size);
int ps3_repository_read_region_total(u64 *region_total);
int ps3_repository_read_mm_info(u64 *rm_base, u64 *rm_size,
u64 *region_total);
int ps3_repository_read_highmem_region_count(unsigned int *region_count);
int ps3_repository_read_highmem_base(unsigned int region_index,
u64 *highmem_base);
int ps3_repository_read_highmem_size(unsigned int region_index,
u64 *highmem_size);
int ps3_repository_read_highmem_info(unsigned int region_index,
u64 *highmem_base, u64 *highmem_size);
int ps3_repository_write_highmem_region_count(unsigned int region_count);
int ps3_repository_write_highmem_base(unsigned int region_index,
u64 highmem_base);
int ps3_repository_write_highmem_size(unsigned int region_index,
u64 highmem_size);
int ps3_repository_write_highmem_info(unsigned int region_index,
u64 highmem_base, u64 highmem_size);
int ps3_repository_delete_highmem_info(unsigned int region_index);
/* repository pme info */
......
......@@ -778,6 +778,72 @@ int ps3_repository_read_mm_info(u64 *rm_base, u64 *rm_size, u64 *region_total)
: ps3_repository_read_region_total(region_total);
}
/**
* ps3_repository_read_highmem_region_count - Read the number of highmem regions
*
* Bootloaders must arrange the repository nodes such that regions are indexed
* with a region_index from 0 to region_count-1.
*/
int ps3_repository_read_highmem_region_count(unsigned int *region_count)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_CURRENT,
make_first_field("highmem", 0),
make_field("region", 0),
make_field("count", 0),
0,
&v1, NULL);
*region_count = v1;
return result;
}
int ps3_repository_read_highmem_base(unsigned int region_index,
u64 *highmem_base)
{
return read_node(PS3_LPAR_ID_CURRENT,
make_first_field("highmem", 0),
make_field("region", region_index),
make_field("base", 0),
0,
highmem_base, NULL);
}
int ps3_repository_read_highmem_size(unsigned int region_index,
u64 *highmem_size)
{
return read_node(PS3_LPAR_ID_CURRENT,
make_first_field("highmem", 0),
make_field("region", region_index),
make_field("size", 0),
0,
highmem_size, NULL);
}
/**
* ps3_repository_read_highmem_info - Read high memory region info
* @region_index: Region index, {0,..,region_count-1}.
* @highmem_base: High memory base address.
* @highmem_size: High memory size.
*
* Bootloaders that preallocate highmem regions must place the
* region info into the repository at these well known nodes.
*/
int ps3_repository_read_highmem_info(unsigned int region_index,
u64 *highmem_base, u64 *highmem_size)
{
int result;
*highmem_base = 0;
result = ps3_repository_read_highmem_base(region_index, highmem_base);
return result ? result
: ps3_repository_read_highmem_size(region_index, highmem_size);
}
/**
* ps3_repository_read_num_spu_reserved - Number of physical spus reserved.
* @num_spu: Number of physical spus.
......@@ -1002,6 +1068,138 @@ int ps3_repository_read_lpm_privileges(unsigned int be_index, u64 *lpar,
lpar, rights);
}
#if defined(CONFIG_PS3_REPOSITORY_WRITE)
static int create_node(u64 n1, u64 n2, u64 n3, u64 n4, u64 v1, u64 v2)
{
int result;
dump_node(0, n1, n2, n3, n4, v1, v2);
result = lv1_create_repository_node(n1, n2, n3, n4, v1, v2);
if (result) {
pr_devel("%s:%d: lv1_create_repository_node failed: %s\n",
__func__, __LINE__, ps3_result(result));
return -ENOENT;
}
return 0;
}
static int delete_node(u64 n1, u64 n2, u64 n3, u64 n4)
{
int result;
dump_node(0, n1, n2, n3, n4, 0, 0);
result = lv1_delete_repository_node(n1, n2, n3, n4);
if (result) {
pr_devel("%s:%d: lv1_delete_repository_node failed: %s\n",
__func__, __LINE__, ps3_result(result));
return -ENOENT;
}
return 0;
}
static int write_node(u64 n1, u64 n2, u64 n3, u64 n4, u64 v1, u64 v2)
{
int result;
result = create_node(n1, n2, n3, n4, v1, v2);
if (!result)
return 0;
result = lv1_write_repository_node(n1, n2, n3, n4, v1, v2);
if (result) {
pr_devel("%s:%d: lv1_write_repository_node failed: %s\n",
__func__, __LINE__, ps3_result(result));
return -ENOENT;
}
return 0;
}
int ps3_repository_write_highmem_region_count(unsigned int region_count)
{
int result;
u64 v1 = (u64)region_count;
result = write_node(
make_first_field("highmem", 0),
make_field("region", 0),
make_field("count", 0),
0,
v1, 0);
return result;
}
int ps3_repository_write_highmem_base(unsigned int region_index,
u64 highmem_base)
{
return write_node(
make_first_field("highmem", 0),
make_field("region", region_index),
make_field("base", 0),
0,
highmem_base, 0);
}
int ps3_repository_write_highmem_size(unsigned int region_index,
u64 highmem_size)
{
return write_node(
make_first_field("highmem", 0),
make_field("region", region_index),
make_field("size", 0),
0,
highmem_size, 0);
}
int ps3_repository_write_highmem_info(unsigned int region_index,
u64 highmem_base, u64 highmem_size)
{
int result;
result = ps3_repository_write_highmem_base(region_index, highmem_base);
return result ? result
: ps3_repository_write_highmem_size(region_index, highmem_size);
}
static int ps3_repository_delete_highmem_base(unsigned int region_index)
{
return delete_node(
make_first_field("highmem", 0),
make_field("region", region_index),
make_field("base", 0),
0);
}
static int ps3_repository_delete_highmem_size(unsigned int region_index)
{
return delete_node(
make_first_field("highmem", 0),
make_field("region", region_index),
make_field("size", 0),
0);
}
int ps3_repository_delete_highmem_info(unsigned int region_index)
{
int result;
result = ps3_repository_delete_highmem_base(region_index);
result += ps3_repository_delete_highmem_size(region_index);
return result ? -1 : 0;
}
#endif /* defined(CONFIG_PS3_WRITE_REPOSITORY) */
#if defined(DEBUG)
int ps3_repository_dump_resource_info(const struct ps3_repository_device *repo)
......
......@@ -489,7 +489,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
* a stack trace will help the device-driver authors figure
* out what happened. So print that out.
*/
dump_stack();
WARN(1, "EEH: failure detected\n");
return 1;
dn_unlock:
......
......@@ -22,12 +22,12 @@ static inline long poll_pending(void)
static inline u8 get_cede_latency_hint(void)
{
return get_lppaca()->gpr5_dword.fields.cede_latency_hint;
return get_lppaca()->cede_latency_hint;
}
static inline void set_cede_latency_hint(u8 latency_hint)
{
get_lppaca()->gpr5_dword.fields.cede_latency_hint = latency_hint;
get_lppaca()->cede_latency_hint = latency_hint;
}
static inline long cede_processor(void)
......
......@@ -103,11 +103,13 @@ int pSeries_reconfig_notifier_register(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&pSeries_reconfig_chain, nb);
}
EXPORT_SYMBOL_GPL(pSeries_reconfig_notifier_register);
void pSeries_reconfig_notifier_unregister(struct notifier_block *nb)
{
blocking_notifier_chain_unregister(&pSeries_reconfig_chain, nb);
}
EXPORT_SYMBOL_GPL(pSeries_reconfig_notifier_unregister);
int pSeries_reconfig_notify(unsigned long action, void *p)
{
......@@ -426,6 +428,7 @@ static int do_remove_property(char *buf, size_t bufsize)
static int do_update_property(char *buf, size_t bufsize)
{
struct device_node *np;
struct pSeries_reconfig_prop_update upd_value;
unsigned char *value;
char *name, *end, *next_prop;
int rc, length;
......@@ -454,6 +457,10 @@ static int do_update_property(char *buf, size_t bufsize)
return -ENODEV;
}
upd_value.node = np;
upd_value.property = newprop;
pSeries_reconfig_notify(PSERIES_UPDATE_PROPERTY, &upd_value);
rc = prom_update_property(np, newprop, oldprop);
if (rc)
return rc;
......
......@@ -28,10 +28,11 @@
#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <asm/prom.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
#include <boot/dcr.h>
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
#include <asm/msi_bitmap.h>
......@@ -43,13 +44,14 @@
#define PEIH_FLUSH0 0x30
#define PEIH_FLUSH1 0x38
#define PEIH_CNTRST 0x48
#define NR_MSI_IRQS 4
static int msi_irqs;
struct ppc4xx_msi {
u32 msi_addr_lo;
u32 msi_addr_hi;
void __iomem *msi_regs;
int msi_virqs[NR_MSI_IRQS];
int *msi_virqs;
struct msi_bitmap bitmap;
struct device_node *msi_dev;
};
......@@ -61,7 +63,7 @@ static int ppc4xx_msi_init_allocator(struct platform_device *dev,
{
int err;
err = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS,
err = msi_bitmap_alloc(&msi_data->bitmap, msi_irqs,
dev->dev.of_node);
if (err)
return err;
......@@ -83,6 +85,11 @@ static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
struct msi_desc *entry;
struct ppc4xx_msi *msi_data = &ppc4xx_msi;
msi_data->msi_virqs = kmalloc((msi_irqs) * sizeof(int),
GFP_KERNEL);
if (!msi_data->msi_virqs)
return -ENOMEM;
list_for_each_entry(entry, &dev->msi_list, list) {
int_no = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
if (int_no >= 0)
......@@ -150,12 +157,11 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
if (!sdr_addr)
return -1;
SDR0_WRITE(sdr_addr, (u64)res.start >> 32); /*HIGH addr */
SDR0_WRITE(sdr_addr + 1, res.start & 0xFFFFFFFF); /* Low addr */
mtdcri(SDR0, *sdr_addr, upper_32_bits(res.start)); /*HIGH addr */
mtdcri(SDR0, *sdr_addr + 1, lower_32_bits(res.start)); /* Low addr */
msi->msi_dev = of_find_node_by_name(NULL, "ppc4xx-msi");
if (msi->msi_dev)
if (!msi->msi_dev)
return -ENODEV;
msi->msi_regs = of_iomap(msi->msi_dev, 0);
......@@ -167,9 +173,12 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
(u32) (msi->msi_regs + PEIH_TERMADH), (u32) (msi->msi_regs));
msi_virt = dma_alloc_coherent(&dev->dev, 64, &msi_phys, GFP_KERNEL);
msi->msi_addr_hi = 0x0;
msi->msi_addr_lo = (u32) msi_phys;
dev_dbg(&dev->dev, "PCIE-MSI: msi address 0x%x\n", msi->msi_addr_lo);
if (!msi_virt)
return -ENOMEM;
msi->msi_addr_hi = upper_32_bits(msi_phys);
msi->msi_addr_lo = lower_32_bits(msi_phys & 0xffffffff);
dev_dbg(&dev->dev, "PCIE-MSI: msi address high 0x%x, low 0x%x\n",
msi->msi_addr_hi, msi->msi_addr_lo);
/* Progam the Interrupt handler Termination addr registers */
out_be32(msi->msi_regs + PEIH_TERMADH, msi->msi_addr_hi);
......@@ -185,6 +194,8 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
out_be32(msi->msi_regs + PEIH_MSIED, *msi_data);
out_be32(msi->msi_regs + PEIH_MSIMK, *msi_mask);
dma_free_coherent(&dev->dev, 64, msi_virt, msi_phys);
return 0;
}
......@@ -194,7 +205,7 @@ static int ppc4xx_of_msi_remove(struct platform_device *dev)
int i;
int virq;
for (i = 0; i < NR_MSI_IRQS; i++) {
for (i = 0; i < msi_irqs; i++) {
virq = msi->msi_virqs[i];
if (virq != NO_IRQ)
irq_dispose_mapping(virq);
......@@ -215,8 +226,6 @@ static int __devinit ppc4xx_msi_probe(struct platform_device *dev)
struct resource res;
int err = 0;
msi = &ppc4xx_msi;/*keep the msi data for further use*/
dev_dbg(&dev->dev, "PCIE-MSI: Setting up MSI support...\n");
msi = kzalloc(sizeof(struct ppc4xx_msi), GFP_KERNEL);
......@@ -234,6 +243,10 @@ static int __devinit ppc4xx_msi_probe(struct platform_device *dev)
goto error_out;
}
msi_irqs = of_irq_count(dev->dev.of_node);
if (!msi_irqs)
return -ENODEV;
if (ppc4xx_setup_pcieh_hw(dev, res, msi))
goto error_out;
......@@ -242,6 +255,7 @@ static int __devinit ppc4xx_msi_probe(struct platform_device *dev)
dev_err(&dev->dev, "Error allocating MSI bitmap\n");
goto error_out;
}
ppc4xx_msi = *msi;
ppc_md.setup_msi_irqs = ppc4xx_setup_msi_irqs;
ppc_md.teardown_msi_irqs = ppc4xx_teardown_msi_irqs;
......
......@@ -250,3 +250,16 @@ config UML_RANDOM
(check your distro, or download from
http://sourceforge.net/projects/gkernel/). rngd periodically reads
/dev/hwrng and injects the entropy into /dev/random.
config HW_RANDOM_PSERIES
tristate "pSeries HW Random Number Generator support"
depends on HW_RANDOM && PPC64 && IBMVIO
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on POWER7+ machines and above
To compile this driver as a module, choose M here: the
module will be called pseries-rng.
If unsure, say Y.
......@@ -22,3 +22,4 @@ obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o
obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
/*
* Copyright (C) 2010 Michael Neuling IBM Corporation
*
* Driver for the pseries hardware RNG for POWER7+ and above
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/hw_random.h>
#include <asm/vio.h>
#define MODULE_NAME "pseries-rng"
static int pseries_rng_data_read(struct hwrng *rng, u32 *data)
{
if (plpar_hcall(H_RANDOM, (unsigned long *)data) != H_SUCCESS) {
printk(KERN_ERR "pseries rng hcall error\n");
return 0;
}
return 8;
}
/**
* pseries_rng_get_desired_dma - Return desired DMA allocate for CMO operations
*
* This is a required function for a driver to operate in a CMO environment
* but this device does not make use of DMA allocations, return 0.
*
* Return value:
* Number of bytes of IO data the driver will need to perform well -> 0
*/
static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev)
{
return 0;
};
static struct hwrng pseries_rng = {
.name = MODULE_NAME,
.data_read = pseries_rng_data_read,
};
static int __init pseries_rng_probe(struct vio_dev *dev,
const struct vio_device_id *id)
{
return hwrng_register(&pseries_rng);
}
static int __exit pseries_rng_remove(struct vio_dev *dev)
{
hwrng_unregister(&pseries_rng);
return 0;
}
static struct vio_device_id pseries_rng_driver_ids[] = {
{ "ibm,random-v1", "ibm,random"},
{ "", "" }
};
MODULE_DEVICE_TABLE(vio, pseries_rng_driver_ids);
static struct vio_driver pseries_rng_driver = {
.name = MODULE_NAME,
.probe = pseries_rng_probe,
.remove = pseries_rng_remove,
.get_desired_dma = pseries_rng_get_desired_dma,
.id_table = pseries_rng_driver_ids
};
static int __init rng_init(void)
{
printk(KERN_INFO "Registering IBM pSeries RNG driver\n");
return vio_register_driver(&pseries_rng_driver);
}
module_init(rng_init);
static void __exit rng_exit(void)
{
vio_unregister_driver(&pseries_rng_driver);
}
module_exit(rng_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael Neuling <mikey@neuling.org>");
MODULE_DESCRIPTION("H/W RNG driver for IBM pSeries processors");
......@@ -297,4 +297,21 @@ config CRYPTO_DEV_TEGRA_AES
To compile this driver as a module, choose M here: the module
will be called tegra-aes.
config CRYPTO_DEV_NX
tristate "Support for Power7+ in-Nest cryptographic accleration"
depends on PPC64 && IBMVIO
select CRYPTO_AES
select CRYPTO_CBC
select CRYPTO_ECB
select CRYPTO_CCM
select CRYPTO_GCM
select CRYPTO_AUTHENC
select CRYPTO_XCBC
select CRYPTO_SHA256
select CRYPTO_SHA512
help
Support for Power7+ in-Nest cryptographic acceleration. This
module supports acceleration for AES and SHA2 algorithms. If you
choose 'M' here, this module will be called nx_crypto.
endif # CRYPTO_HW
obj-$(CONFIG_CRYPTO_DEV_NX) += nx-crypto.o
nx-crypto-objs := nx.o \
nx_debugfs.o \
nx-aes-cbc.o \
nx-aes-ecb.o \
nx-aes-gcm.o \
nx-aes-ccm.o \
nx-aes-ctr.o \
nx-aes-xcbc.o \
nx-sha256.o \
nx-sha512.o
/**
* AES CBC routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 only.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Author: Kent Yoder <yoder1@us.ibm.com>
*/
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
#include "nx.h"
static int cbc_aes_nx_set_key(struct crypto_tfm *tfm,
const u8 *in_key,
unsigned int key_len)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
nx_ctx_init(nx_ctx, HCOP_FC_AES);
switch (key_len) {
case AES_KEYSIZE_128:
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
break;
case AES_KEYSIZE_192:
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
break;
case AES_KEYSIZE_256:
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
break;
default:
return -EINVAL;
}
csbcpb->cpb.hdr.mode = NX_MODE_AES_CBC;
memcpy(csbcpb->cpb.aes_cbc.key, in_key, key_len);
return 0;
}
static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes,
int enc)
{
struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
int rc;
if (nbytes > nx_ctx->ap->databytelen)
return -EINVAL;
if (enc)
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
else
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes,
csbcpb->cpb.aes_cbc.iv);
if (rc)
goto out;
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
rc = -EINVAL;
goto out;
}
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
atomic_inc(&(nx_ctx->stats->aes_ops));
atomic64_add(csbcpb->csb.processed_byte_count,
&(nx_ctx->stats->aes_bytes));
out:
return rc;
}
static int cbc_aes_nx_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
return cbc_aes_nx_crypt(desc, dst, src, nbytes, 1);
}
static int cbc_aes_nx_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
return cbc_aes_nx_crypt(desc, dst, src, nbytes, 0);
}
struct crypto_alg nx_cbc_aes_alg = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-nx",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(nx_cbc_aes_alg.cra_list),
.cra_init = nx_crypto_ctx_aes_cbc_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = cbc_aes_nx_set_key,
.encrypt = cbc_aes_nx_encrypt,
.decrypt = cbc_aes_nx_decrypt,
}
};
This diff is collapsed.
/**
* AES CTR routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 only.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Author: Kent Yoder <yoder1@us.ibm.com>
*/
#include <crypto/aes.h>
#include <crypto/ctr.h>
#include <crypto/algapi.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
#include "nx.h"
static int ctr_aes_nx_set_key(struct crypto_tfm *tfm,
const u8 *in_key,
unsigned int key_len)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
nx_ctx_init(nx_ctx, HCOP_FC_AES);
switch (key_len) {
case AES_KEYSIZE_128:
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
break;
case AES_KEYSIZE_192:
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
break;
case AES_KEYSIZE_256:
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
break;
default:
return -EINVAL;
}
csbcpb->cpb.hdr.mode = NX_MODE_AES_CTR;
memcpy(csbcpb->cpb.aes_ctr.key, in_key, key_len);
return 0;
}
static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
const u8 *in_key,
unsigned int key_len)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
if (key_len < CTR_RFC3686_NONCE_SIZE)
return -EINVAL;
memcpy(nx_ctx->priv.ctr.iv,
in_key + key_len - CTR_RFC3686_NONCE_SIZE,
CTR_RFC3686_NONCE_SIZE);
key_len -= CTR_RFC3686_NONCE_SIZE;
return ctr_aes_nx_set_key(tfm, in_key, key_len);
}
static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
int rc;
if (nbytes > nx_ctx->ap->databytelen)
return -EINVAL;
rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes,
csbcpb->cpb.aes_ctr.iv);
if (rc)
goto out;
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
rc = -EINVAL;
goto out;
}
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
atomic_inc(&(nx_ctx->stats->aes_ops));
atomic64_add(csbcpb->csb.processed_byte_count,
&(nx_ctx->stats->aes_bytes));
out:
return rc;
}
static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
u8 *iv = nx_ctx->priv.ctr.iv;
memcpy(iv + CTR_RFC3686_NONCE_SIZE,
desc->info, CTR_RFC3686_IV_SIZE);
iv[15] = 1;
desc->info = nx_ctx->priv.ctr.iv;
return ctr_aes_nx_crypt(desc, dst, src, nbytes);
}
struct crypto_alg nx_ctr_aes_alg = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-nx",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(nx_ctr_aes_alg.cra_list),
.cra_init = nx_crypto_ctx_aes_ctr_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ctr_aes_nx_set_key,
.encrypt = ctr_aes_nx_crypt,
.decrypt = ctr_aes_nx_crypt,
}
};
struct crypto_alg nx_ctr3686_aes_alg = {
.cra_name = "rfc3686(ctr(aes))",
.cra_driver_name = "rfc3686-ctr-aes-nx",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(nx_ctr3686_aes_alg.cra_list),
.cra_init = nx_crypto_ctx_aes_ctr_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
.max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
.ivsize = CTR_RFC3686_IV_SIZE,
.geniv = "seqiv",
.setkey = ctr3686_aes_nx_set_key,
.encrypt = ctr3686_aes_nx_crypt,
.decrypt = ctr3686_aes_nx_crypt,
}
};
/**
* AES ECB routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 only.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Author: Kent Yoder <yoder1@us.ibm.com>
*/
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
#include "nx.h"
static int ecb_aes_nx_set_key(struct crypto_tfm *tfm,
const u8 *in_key,
unsigned int key_len)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
nx_ctx_init(nx_ctx, HCOP_FC_AES);
switch (key_len) {
case AES_KEYSIZE_128:
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
break;
case AES_KEYSIZE_192:
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
break;
case AES_KEYSIZE_256:
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
break;
default:
return -EINVAL;
}
csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
memcpy(csbcpb->cpb.aes_ecb.key, in_key, key_len);
return 0;
}
static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes,
int enc)
{
struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
int rc;
if (nbytes > nx_ctx->ap->databytelen)
return -EINVAL;
if (enc)
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
else
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, NULL);
if (rc)
goto out;
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
rc = -EINVAL;
goto out;
}
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
atomic_inc(&(nx_ctx->stats->aes_ops));
atomic64_add(csbcpb->csb.processed_byte_count,
&(nx_ctx->stats->aes_bytes));
out:
return rc;
}
static int ecb_aes_nx_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
return ecb_aes_nx_crypt(desc, dst, src, nbytes, 1);
}
static int ecb_aes_nx_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
return ecb_aes_nx_crypt(desc, dst, src, nbytes, 0);
}
struct crypto_alg nx_ecb_aes_alg = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-nx",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(nx_ecb_aes_alg.cra_list),
.cra_init = nx_crypto_ctx_aes_ecb_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = ecb_aes_nx_set_key,
.encrypt = ecb_aes_nx_encrypt,
.decrypt = ecb_aes_nx_decrypt,
}
};
/**
* AES GCM routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2012 International Business Machines Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 only.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Author: Kent Yoder <yoder1@us.ibm.com>
*/
#include <crypto/internal/aead.h>
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
#include "nx.h"
static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
const u8 *in_key,
unsigned int key_len)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
nx_ctx_init(nx_ctx, HCOP_FC_AES);
switch (key_len) {
case AES_KEYSIZE_128:
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
break;
case AES_KEYSIZE_192:
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
break;
case AES_KEYSIZE_256:
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
break;
default:
return -EINVAL;
}
csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len);
csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA;
memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len);
return 0;
}
static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
const u8 *in_key,
unsigned int key_len)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
char *nonce = nx_ctx->priv.gcm.nonce;
int rc;
if (key_len < 4)
return -EINVAL;
key_len -= 4;
rc = gcm_aes_nx_set_key(tfm, in_key, key_len);
if (rc)
goto out;
memcpy(nonce, in_key + key_len, 4);
out:
return rc;
}
static int gcm_aes_nx_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
if (authsize > crypto_aead_alg(tfm)->maxauthsize)
return -EINVAL;
crypto_aead_crt(tfm)->authsize = authsize;
return 0;
}
static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
switch (authsize) {
case 8:
case 12:
case 16:
break;
default:
return -EINVAL;
}
crypto_aead_crt(tfm)->authsize = authsize;
return 0;
}
static int nx_gca(struct nx_crypto_ctx *nx_ctx,
struct aead_request *req,
u8 *out)
{
struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
int rc = -EINVAL;
struct scatter_walk walk;
struct nx_sg *nx_sg = nx_ctx->in_sg;
if (req->assoclen > nx_ctx->ap->databytelen)
goto out;
if (req->assoclen <= AES_BLOCK_SIZE) {
scatterwalk_start(&walk, req->assoc);
scatterwalk_copychunks(out, &walk, req->assoclen,
SCATTERWALK_FROM_SG);
scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
rc = 0;
goto out;
}
nx_sg = nx_walk_and_build(nx_sg, nx_ctx->ap->sglen, req->assoc, 0,
req->assoclen);
nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) * sizeof(struct nx_sg);
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
atomic_inc(&(nx_ctx->stats->aes_ops));
atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
out:
return rc;
}
static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct blkcipher_desc desc;
unsigned int nbytes = req->cryptlen;
int rc = -EINVAL;
if (nbytes > nx_ctx->ap->databytelen)
goto out;
desc.info = nx_ctx->priv.gcm.iv;
/* initialize the counter */
*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
/* For scenarios where the input message is zero length, AES CTR mode
* may be used. Set the source data to be a single block (16B) of all
* zeros, and set the input IV value to be the same as the GMAC IV
* value. - nx_wb 4.8.1.3 */
if (nbytes == 0) {
char src[AES_BLOCK_SIZE] = {};
struct scatterlist sg;
desc.tfm = crypto_alloc_blkcipher("ctr(aes)", 0, 0);
if (IS_ERR(desc.tfm)) {
rc = -ENOMEM;
goto out;
}
crypto_blkcipher_setkey(desc.tfm, csbcpb->cpb.aes_gcm.key,
NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_128 ? 16 :
NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_192 ? 24 : 32);
sg_init_one(&sg, src, AES_BLOCK_SIZE);
if (enc)
crypto_blkcipher_encrypt_iv(&desc, req->dst, &sg,
AES_BLOCK_SIZE);
else
crypto_blkcipher_decrypt_iv(&desc, req->dst, &sg,
AES_BLOCK_SIZE);
crypto_free_blkcipher(desc.tfm);
rc = 0;
goto out;
}
desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8;
if (req->assoclen) {
rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad);
if (rc)
goto out;
}
if (enc)
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
else
nbytes -= AES_BLOCK_SIZE;
csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, req->src, nbytes,
csbcpb->cpb.aes_gcm.iv_or_cnt);
if (rc)
goto out;
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
atomic_inc(&(nx_ctx->stats->aes_ops));
atomic64_add(csbcpb->csb.processed_byte_count,
&(nx_ctx->stats->aes_bytes));
if (enc) {
/* copy out the auth tag */
scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac,
req->dst, nbytes,
crypto_aead_authsize(crypto_aead_reqtfm(req)),
SCATTERWALK_TO_SG);
} else if (req->assoclen) {
u8 *itag = nx_ctx->priv.gcm.iauth_tag;
u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
scatterwalk_map_and_copy(itag, req->dst, nbytes,
crypto_aead_authsize(crypto_aead_reqtfm(req)),
SCATTERWALK_FROM_SG);
rc = memcmp(itag, otag,
crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
-EBADMSG : 0;
}
out:
return rc;
}
static int gcm_aes_nx_encrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
char *iv = nx_ctx->priv.gcm.iv;
memcpy(iv, req->iv, 12);
return gcm_aes_nx_crypt(req, 1);
}
static int gcm_aes_nx_decrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
char *iv = nx_ctx->priv.gcm.iv;
memcpy(iv, req->iv, 12);
return gcm_aes_nx_crypt(req, 0);
}
static int gcm4106_aes_nx_encrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
char *iv = nx_ctx->priv.gcm.iv;
char *nonce = nx_ctx->priv.gcm.nonce;
memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
return gcm_aes_nx_crypt(req, 1);
}
static int gcm4106_aes_nx_decrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
char *iv = nx_ctx->priv.gcm.iv;
char *nonce = nx_ctx->priv.gcm.nonce;
memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
return gcm_aes_nx_crypt(req, 0);
}
/* tell the block cipher walk routines that this is a stream cipher by
* setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
* during encrypt/decrypt doesn't solve this problem, because it calls
* blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
* but instead uses this tfm->blocksize. */
struct crypto_alg nx_gcm_aes_alg = {
.cra_name = "gcm(aes)",
.cra_driver_name = "gcm-aes-nx",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_aead_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(nx_gcm_aes_alg.cra_list),
.cra_init = nx_crypto_ctx_aes_gcm_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_aead = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setkey = gcm_aes_nx_set_key,
.setauthsize = gcm_aes_nx_setauthsize,
.encrypt = gcm_aes_nx_encrypt,
.decrypt = gcm_aes_nx_decrypt,
}
};
struct crypto_alg nx_gcm4106_aes_alg = {
.cra_name = "rfc4106(gcm(aes))",
.cra_driver_name = "rfc4106-gcm-aes-nx",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_nivaead_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(nx_gcm4106_aes_alg.cra_list),
.cra_init = nx_crypto_ctx_aes_gcm_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_aead = {
.ivsize = 8,
.maxauthsize = AES_BLOCK_SIZE,
.geniv = "seqiv",
.setkey = gcm4106_aes_nx_set_key,
.setauthsize = gcm4106_aes_nx_setauthsize,
.encrypt = gcm4106_aes_nx_encrypt,
.decrypt = gcm4106_aes_nx_decrypt,
}
};
/**
* AES XCBC routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 only.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Author: Kent Yoder <yoder1@us.ibm.com>
*/
#include <crypto/internal/hash.h>
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
#include "nx.h"
struct xcbc_state {
u8 state[AES_BLOCK_SIZE];
unsigned int count;
u8 buffer[AES_BLOCK_SIZE];
};
static int nx_xcbc_set_key(struct crypto_shash *desc,
const u8 *in_key,
unsigned int key_len)
{
struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
switch (key_len) {
case AES_KEYSIZE_128:
nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
break;
default:
return -EINVAL;
}
memcpy(nx_ctx->priv.xcbc.key, in_key, key_len);
return 0;
}
static int nx_xcbc_init(struct shash_desc *desc)
{
struct xcbc_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *out_sg;
nx_ctx_init(nx_ctx, HCOP_FC_AES);
memset(sctx, 0, sizeof *sctx);
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE);
memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key);
out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
AES_BLOCK_SIZE, nx_ctx->ap->sglen);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
return 0;
}
static int nx_xcbc_update(struct shash_desc *desc,
const u8 *data,
unsigned int len)
{
struct xcbc_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg;
u32 to_process, leftover;
int rc = 0;
if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
/* we've hit the nx chip previously and we're updating again,
* so copy over the partial digest */
memcpy(csbcpb->cpb.aes_xcbc.cv,
csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
}
/* 2 cases for total data len:
* 1: <= AES_BLOCK_SIZE: copy into state, return 0
* 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover
*/
if (len + sctx->count <= AES_BLOCK_SIZE) {
memcpy(sctx->buffer + sctx->count, data, len);
sctx->count += len;
goto out;
}
/* to_process: the AES_BLOCK_SIZE data chunk to process in this
* update */
to_process = (sctx->count + len) & ~(AES_BLOCK_SIZE - 1);
leftover = (sctx->count + len) & (AES_BLOCK_SIZE - 1);
/* the hardware will not accept a 0 byte operation for this algorithm
* and the operation MUST be finalized to be correct. So if we happen
* to get an update that falls on a block sized boundary, we must
* save off the last block to finalize with later. */
if (!leftover) {
to_process -= AES_BLOCK_SIZE;
leftover = AES_BLOCK_SIZE;
}
if (sctx->count) {
in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buffer,
sctx->count, nx_ctx->ap->sglen);
in_sg = nx_build_sg_list(in_sg, (u8 *)data,
to_process - sctx->count,
nx_ctx->ap->sglen);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
sizeof(struct nx_sg);
} else {
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data, to_process,
nx_ctx->ap->sglen);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
sizeof(struct nx_sg);
}
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
rc = -EINVAL;
goto out;
}
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
atomic_inc(&(nx_ctx->stats->aes_ops));
/* copy the leftover back into the state struct */
memcpy(sctx->buffer, data + len - leftover, leftover);
sctx->count = leftover;
/* everything after the first update is continuation */
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
out:
return rc;
}
static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
{
struct xcbc_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
int rc = 0;
if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
/* we've hit the nx chip previously, now we're finalizing,
* so copy over the partial digest */
memcpy(csbcpb->cpb.aes_xcbc.cv,
csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
} else if (sctx->count == 0) {
/* we've never seen an update, so this is a 0 byte op. The
* hardware cannot handle a 0 byte op, so just copy out the
* known 0 byte result. This is cheaper than allocating a
* software context to do a 0 byte op */
u8 data[] = { 0x75, 0xf0, 0x25, 0x1d, 0x52, 0x8a, 0xc0, 0x1c,
0x45, 0x73, 0xdf, 0xd5, 0x84, 0xd7, 0x9f, 0x29 };
memcpy(out, data, sizeof(data));
goto out;
}
/* final is represented by continuing the operation and indicating that
* this is not an intermediate operation */
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
sctx->count, nx_ctx->ap->sglen);
out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE,
nx_ctx->ap->sglen);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
if (!nx_ctx->op.outlen) {
rc = -EINVAL;
goto out;
}
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
atomic_inc(&(nx_ctx->stats->aes_ops));
memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
out:
return rc;
}
struct shash_alg nx_shash_aes_xcbc_alg = {
.digestsize = AES_BLOCK_SIZE,
.init = nx_xcbc_init,
.update = nx_xcbc_update,
.final = nx_xcbc_final,
.setkey = nx_xcbc_set_key,
.descsize = sizeof(struct xcbc_state),
.statesize = sizeof(struct xcbc_state),
.base = {
.cra_name = "xcbc(aes)",
.cra_driver_name = "xcbc-aes-nx",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_init = nx_crypto_ctx_aes_xcbc_init,
.cra_exit = nx_crypto_ctx_exit,
}
};
/**
* SHA-256 routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 only.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Author: Kent Yoder <yoder1@us.ibm.com>
*/
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
#include <linux/module.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
#include "nx.h"
static int nx_sha256_init(struct shash_desc *desc)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_sg *out_sg;
nx_ctx_init(nx_ctx, HCOP_FC_SHA);
memset(sctx, 0, sizeof *sctx);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
SHA256_DIGEST_SIZE, nx_ctx->ap->sglen);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
return 0;
}
static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg;
u64 to_process, leftover;
int rc = 0;
if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
/* we've hit the nx chip previously and we're updating again,
* so copy over the partial digest */
memcpy(csbcpb->cpb.sha256.input_partial_digest,
csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
}
/* 2 cases for total data len:
* 1: <= SHA256_BLOCK_SIZE: copy into state, return 0
* 2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover
*/
if (len + sctx->count <= SHA256_BLOCK_SIZE) {
memcpy(sctx->buf + sctx->count, data, len);
sctx->count += len;
goto out;
}
/* to_process: the SHA256_BLOCK_SIZE data chunk to process in this
* update */
to_process = (sctx->count + len) & ~(SHA256_BLOCK_SIZE - 1);
leftover = (sctx->count + len) & (SHA256_BLOCK_SIZE - 1);
if (sctx->count) {
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf,
sctx->count, nx_ctx->ap->sglen);
in_sg = nx_build_sg_list(in_sg, (u8 *)data,
to_process - sctx->count,
nx_ctx->ap->sglen);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
sizeof(struct nx_sg);
} else {
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data,
to_process, nx_ctx->ap->sglen);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
sizeof(struct nx_sg);
}
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
rc = -EINVAL;
goto out;
}
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
atomic_inc(&(nx_ctx->stats->sha256_ops));
/* copy the leftover back into the state struct */
memcpy(sctx->buf, data + len - leftover, leftover);
sctx->count = leftover;
csbcpb->cpb.sha256.message_bit_length += (u64)
(csbcpb->cpb.sha256.spbc * 8);
/* everything after the first update is continuation */
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
out:
return rc;
}
static int nx_sha256_final(struct shash_desc *desc, u8 *out)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
int rc;
if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
/* we've hit the nx chip previously, now we're finalizing,
* so copy over the partial digest */
memcpy(csbcpb->cpb.sha256.input_partial_digest,
csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
}
/* final is represented by continuing the operation and indicating that
* this is not an intermediate operation */
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
csbcpb->cpb.sha256.message_bit_length += (u64)(sctx->count * 8);
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf,
sctx->count, nx_ctx->ap->sglen);
out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA256_DIGEST_SIZE,
nx_ctx->ap->sglen);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
if (!nx_ctx->op.outlen) {
rc = -EINVAL;
goto out;
}
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
atomic_inc(&(nx_ctx->stats->sha256_ops));
atomic64_add(csbcpb->cpb.sha256.message_bit_length,
&(nx_ctx->stats->sha256_bytes));
memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
out:
return rc;
}
static int nx_sha256_export(struct shash_desc *desc, void *out)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct sha256_state *octx = out;
octx->count = sctx->count +
(csbcpb->cpb.sha256.message_bit_length / 8);
memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
/* if no data has been processed yet, we need to export SHA256's
* initial data, in case this context gets imported into a software
* context */
if (csbcpb->cpb.sha256.message_bit_length)
memcpy(octx->state, csbcpb->cpb.sha256.message_digest,
SHA256_DIGEST_SIZE);
else {
octx->state[0] = SHA256_H0;
octx->state[1] = SHA256_H1;
octx->state[2] = SHA256_H2;
octx->state[3] = SHA256_H3;
octx->state[4] = SHA256_H4;
octx->state[5] = SHA256_H5;
octx->state[6] = SHA256_H6;
octx->state[7] = SHA256_H7;
}
return 0;
}
static int nx_sha256_import(struct shash_desc *desc, const void *in)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
const struct sha256_state *ictx = in;
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx->count = ictx->count & 0x3f;
csbcpb->cpb.sha256.message_bit_length = (ictx->count & ~0x3f) * 8;
if (csbcpb->cpb.sha256.message_bit_length) {
memcpy(csbcpb->cpb.sha256.message_digest, ictx->state,
SHA256_DIGEST_SIZE);
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
}
return 0;
}
struct shash_alg nx_shash_sha256_alg = {
.digestsize = SHA256_DIGEST_SIZE,
.init = nx_sha256_init,
.update = nx_sha256_update,
.final = nx_sha256_final,
.export = nx_sha256_export,
.import = nx_sha256_import,
.descsize = sizeof(struct sha256_state),
.statesize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-nx",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_init = nx_crypto_ctx_sha_init,
.cra_exit = nx_crypto_ctx_exit,
}
};
/**
* SHA-512 routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 only.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Author: Kent Yoder <yoder1@us.ibm.com>
*/
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
#include <linux/module.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
#include "nx.h"
static int nx_sha512_init(struct shash_desc *desc)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_sg *out_sg;
nx_ctx_init(nx_ctx, HCOP_FC_SHA);
memset(sctx, 0, sizeof *sctx);
nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
SHA512_DIGEST_SIZE, nx_ctx->ap->sglen);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
return 0;
}
static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg;
u64 to_process, leftover, spbc_bits;
int rc = 0;
if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
/* we've hit the nx chip previously and we're updating again,
* so copy over the partial digest */
memcpy(csbcpb->cpb.sha512.input_partial_digest,
csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
}
/* 2 cases for total data len:
* 1: <= SHA512_BLOCK_SIZE: copy into state, return 0
* 2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover
*/
if ((u64)len + sctx->count[0] <= SHA512_BLOCK_SIZE) {
memcpy(sctx->buf + sctx->count[0], data, len);
sctx->count[0] += len;
goto out;
}
/* to_process: the SHA512_BLOCK_SIZE data chunk to process in this
* update */
to_process = (sctx->count[0] + len) & ~(SHA512_BLOCK_SIZE - 1);
leftover = (sctx->count[0] + len) & (SHA512_BLOCK_SIZE - 1);
if (sctx->count[0]) {
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf,
sctx->count[0], nx_ctx->ap->sglen);
in_sg = nx_build_sg_list(in_sg, (u8 *)data,
to_process - sctx->count[0],
nx_ctx->ap->sglen);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
sizeof(struct nx_sg);
} else {
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data,
to_process, nx_ctx->ap->sglen);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
sizeof(struct nx_sg);
}
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
rc = -EINVAL;
goto out;
}
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
atomic_inc(&(nx_ctx->stats->sha512_ops));
/* copy the leftover back into the state struct */
memcpy(sctx->buf, data + len - leftover, leftover);
sctx->count[0] = leftover;
spbc_bits = csbcpb->cpb.sha512.spbc * 8;
csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits;
if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits)
csbcpb->cpb.sha512.message_bit_length_hi++;
/* everything after the first update is continuation */
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
out:
return rc;
}
static int nx_sha512_final(struct shash_desc *desc, u8 *out)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
u64 count0;
int rc;
if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
/* we've hit the nx chip previously, now we're finalizing,
* so copy over the partial digest */
memcpy(csbcpb->cpb.sha512.input_partial_digest,
csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
}
/* final is represented by continuing the operation and indicating that
* this is not an intermediate operation */
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
count0 = sctx->count[0] * 8;
csbcpb->cpb.sha512.message_bit_length_lo += count0;
if (csbcpb->cpb.sha512.message_bit_length_lo < count0)
csbcpb->cpb.sha512.message_bit_length_hi++;
in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, sctx->count[0],
nx_ctx->ap->sglen);
out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA512_DIGEST_SIZE,
nx_ctx->ap->sglen);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
if (!nx_ctx->op.outlen) {
rc = -EINVAL;
goto out;
}
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
atomic_inc(&(nx_ctx->stats->sha512_ops));
atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo,
&(nx_ctx->stats->sha512_bytes));
memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
out:
return rc;
}
static int nx_sha512_export(struct shash_desc *desc, void *out)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct sha512_state *octx = out;
/* move message_bit_length (128 bits) into count and convert its value
* to bytes */
octx->count[0] = csbcpb->cpb.sha512.message_bit_length_lo >> 3 |
((csbcpb->cpb.sha512.message_bit_length_hi & 7) << 61);
octx->count[1] = csbcpb->cpb.sha512.message_bit_length_hi >> 3;
octx->count[0] += sctx->count[0];
if (octx->count[0] < sctx->count[0])
octx->count[1]++;
memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
/* if no data has been processed yet, we need to export SHA512's
* initial data, in case this context gets imported into a software
* context */
if (csbcpb->cpb.sha512.message_bit_length_hi ||
csbcpb->cpb.sha512.message_bit_length_lo)
memcpy(octx->state, csbcpb->cpb.sha512.message_digest,
SHA512_DIGEST_SIZE);
else {
octx->state[0] = SHA512_H0;
octx->state[1] = SHA512_H1;
octx->state[2] = SHA512_H2;
octx->state[3] = SHA512_H3;
octx->state[4] = SHA512_H4;
octx->state[5] = SHA512_H5;
octx->state[6] = SHA512_H6;
octx->state[7] = SHA512_H7;
}
return 0;
}
static int nx_sha512_import(struct shash_desc *desc, const void *in)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
const struct sha512_state *ictx = in;
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx->count[0] = ictx->count[0] & 0x3f;
csbcpb->cpb.sha512.message_bit_length_lo = (ictx->count[0] & ~0x3f)
<< 3;
csbcpb->cpb.sha512.message_bit_length_hi = ictx->count[1] << 3 |
ictx->count[0] >> 61;
if (csbcpb->cpb.sha512.message_bit_length_hi ||
csbcpb->cpb.sha512.message_bit_length_lo) {
memcpy(csbcpb->cpb.sha512.message_digest, ictx->state,
SHA512_DIGEST_SIZE);
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
}
return 0;
}
struct shash_alg nx_shash_sha512_alg = {
.digestsize = SHA512_DIGEST_SIZE,
.init = nx_sha512_init,
.update = nx_sha512_update,
.final = nx_sha512_final,
.export = nx_sha512_export,
.import = nx_sha512_import,
.descsize = sizeof(struct sha512_state),
.statesize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-nx",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_init = nx_crypto_ctx_sha_init,
.cra_exit = nx_crypto_ctx_exit,
}
};
This diff is collapsed.
#ifndef __NX_H__
#define __NX_H__
#define NX_NAME "nx-crypto"
#define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver"
#define NX_VERSION "1.0"
static const char nx_driver_string[] = NX_STRING;
static const char nx_driver_version[] = NX_VERSION;
/* a scatterlist in the format PHYP is expecting */
struct nx_sg {
u64 addr;
u32 rsvd;
u32 len;
} __attribute((packed));
#define NX_PAGE_SIZE (4096)
#define NX_MAX_SG_ENTRIES (NX_PAGE_SIZE/(sizeof(struct nx_sg)))
enum nx_status {
NX_DISABLED,
NX_WAITING,
NX_OKAY
};
/* msc_triplet and max_sync_cop are used only to assist in parsing the
* openFirmware property */
struct msc_triplet {
u32 keybitlen;
u32 databytelen;
u32 sglen;
} __packed;
struct max_sync_cop {
u32 fc;
u32 mode;
u32 triplets;
struct msc_triplet trip[0];
} __packed;
struct alg_props {
u32 databytelen;
u32 sglen;
};
#define NX_OF_FLAG_MAXSGLEN_SET (1)
#define NX_OF_FLAG_STATUS_SET (2)
#define NX_OF_FLAG_MAXSYNCCOP_SET (4)
#define NX_OF_FLAG_MASK_READY (NX_OF_FLAG_MAXSGLEN_SET | \
NX_OF_FLAG_STATUS_SET | \
NX_OF_FLAG_MAXSYNCCOP_SET)
struct nx_of {
u32 flags;
u32 max_sg_len;
enum nx_status status;
struct alg_props ap[NX_MAX_FC][NX_MAX_MODE][3];
};
struct nx_stats {
atomic_t aes_ops;
atomic64_t aes_bytes;
atomic_t sha256_ops;
atomic64_t sha256_bytes;
atomic_t sha512_ops;
atomic64_t sha512_bytes;
atomic_t sync_ops;
atomic_t errors;
atomic_t last_error;
atomic_t last_error_pid;
};
struct nx_debugfs {
struct dentry *dfs_root;
struct dentry *dfs_aes_ops, *dfs_aes_bytes;
struct dentry *dfs_sha256_ops, *dfs_sha256_bytes;
struct dentry *dfs_sha512_ops, *dfs_sha512_bytes;
struct dentry *dfs_errors, *dfs_last_error, *dfs_last_error_pid;
};
struct nx_crypto_driver {
struct nx_stats stats;
struct nx_of of;
struct vio_dev *viodev;
struct vio_driver viodriver;
struct nx_debugfs dfs;
};
#define NX_GCM4106_NONCE_LEN (4)
#define NX_GCM_CTR_OFFSET (12)
struct nx_gcm_priv {
u8 iv[16];
u8 iauth_tag[16];
u8 nonce[NX_GCM4106_NONCE_LEN];
};
#define NX_CCM_AES_KEY_LEN (16)
#define NX_CCM4309_AES_KEY_LEN (19)
#define NX_CCM4309_NONCE_LEN (3)
struct nx_ccm_priv {
u8 iv[16];
u8 b0[16];
u8 iauth_tag[16];
u8 oauth_tag[16];
u8 nonce[NX_CCM4309_NONCE_LEN];
};
struct nx_xcbc_priv {
u8 key[16];
};
struct nx_ctr_priv {
u8 iv[16];
};
struct nx_crypto_ctx {
void *kmem; /* unaligned, kmalloc'd buffer */
size_t kmem_len; /* length of kmem */
struct nx_csbcpb *csbcpb; /* aligned page given to phyp @ hcall time */
struct vio_pfo_op op; /* operation struct with hcall parameters */
struct nx_csbcpb *csbcpb_aead; /* secondary csbcpb used by AEAD algs */
struct vio_pfo_op op_aead;/* operation struct for csbcpb_aead */
struct nx_sg *in_sg; /* aligned pointer into kmem to an sg list */
struct nx_sg *out_sg; /* aligned pointer into kmem to an sg list */
struct alg_props *ap; /* pointer into props based on our key size */
struct alg_props props[3];/* openFirmware properties for requests */
struct nx_stats *stats; /* pointer into an nx_crypto_driver for stats
reporting */
union {
struct nx_gcm_priv gcm;
struct nx_ccm_priv ccm;
struct nx_xcbc_priv xcbc;
struct nx_ctr_priv ctr;
} priv;
};
/* prototypes */
int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm);
int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm);
int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm);
int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm);
int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm);
int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm);
int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm);
void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
u32 may_sleep);
struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int, u32);
int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
struct scatterlist *, struct scatterlist *, unsigned int,
u8 *);
struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
struct scatterlist *, unsigned int,
unsigned int);
#ifdef CONFIG_DEBUG_FS
#define NX_DEBUGFS_INIT(drv) nx_debugfs_init(drv)
#define NX_DEBUGFS_FINI(drv) nx_debugfs_fini(drv)
int nx_debugfs_init(struct nx_crypto_driver *);
void nx_debugfs_fini(struct nx_crypto_driver *);
#else
#define NX_DEBUGFS_INIT(drv) (0)
#define NX_DEBUGFS_FINI(drv) (0)
#endif
#define NX_PAGE_NUM(x) ((u64)(x) & 0xfffffffffffff000ULL)
extern struct crypto_alg nx_cbc_aes_alg;
extern struct crypto_alg nx_ecb_aes_alg;
extern struct crypto_alg nx_gcm_aes_alg;
extern struct crypto_alg nx_gcm4106_aes_alg;
extern struct crypto_alg nx_ctr_aes_alg;
extern struct crypto_alg nx_ctr3686_aes_alg;
extern struct crypto_alg nx_ccm_aes_alg;
extern struct crypto_alg nx_ccm4309_aes_alg;
extern struct shash_alg nx_shash_aes_xcbc_alg;
extern struct shash_alg nx_shash_sha512_alg;
extern struct shash_alg nx_shash_sha256_alg;
extern struct nx_crypto_driver nx_driver;
#define SCATTERWALK_TO_SG 1
#define SCATTERWALK_FROM_SG 0
#endif
#ifndef __NX_CSBCPB_H__
#define __NX_CSBCPB_H__
struct cop_symcpb_aes_ecb {
u8 key[32];
u8 __rsvd[80];
} __packed;
struct cop_symcpb_aes_cbc {
u8 iv[16];
u8 key[32];
u8 cv[16];
u32 spbc;
u8 __rsvd[44];
} __packed;
struct cop_symcpb_aes_gca {
u8 in_pat[16];
u8 key[32];
u8 out_pat[16];
u32 spbc;
u8 __rsvd[44];
} __packed;
struct cop_symcpb_aes_gcm {
u8 in_pat_or_aad[16];
u8 iv_or_cnt[16];
u64 bit_length_aad;
u64 bit_length_data;
u8 in_s0[16];
u8 key[32];
u8 __rsvd1[16];
u8 out_pat_or_mac[16];
u8 out_s0[16];
u8 out_cnt[16];
u32 spbc;
u8 __rsvd2[12];
} __packed;
struct cop_symcpb_aes_ctr {
u8 iv[16];
u8 key[32];
u8 cv[16];
u32 spbc;
u8 __rsvd2[44];
} __packed;
struct cop_symcpb_aes_cca {
u8 b0[16];
u8 b1[16];
u8 key[16];
u8 out_pat_or_b0[16];
u32 spbc;
u8 __rsvd[44];
} __packed;
struct cop_symcpb_aes_ccm {
u8 in_pat_or_b0[16];
u8 iv_or_ctr[16];
u8 in_s0[16];
u8 key[16];
u8 __rsvd1[48];
u8 out_pat_or_mac[16];
u8 out_s0[16];
u8 out_ctr[16];
u32 spbc;
u8 __rsvd2[12];
} __packed;
struct cop_symcpb_aes_xcbc {
u8 cv[16];
u8 key[16];
u8 __rsvd1[16];
u8 out_cv_mac[16];
u32 spbc;
u8 __rsvd2[44];
} __packed;
struct cop_symcpb_sha256 {
u64 message_bit_length;
u64 __rsvd1;
u8 input_partial_digest[32];
u8 message_digest[32];
u32 spbc;
u8 __rsvd2[44];
} __packed;
struct cop_symcpb_sha512 {
u64 message_bit_length_hi;
u64 message_bit_length_lo;
u8 input_partial_digest[64];
u8 __rsvd1[32];
u8 message_digest[64];
u32 spbc;
u8 __rsvd2[76];
} __packed;
#define NX_FDM_INTERMEDIATE 0x01
#define NX_FDM_CONTINUATION 0x02
#define NX_FDM_ENDE_ENCRYPT 0x80
#define NX_CPB_FDM(c) ((c)->cpb.hdr.fdm)
#define NX_CPB_KS_DS(c) ((c)->cpb.hdr.ks_ds)
#define NX_CPB_KEY_SIZE(c) (NX_CPB_KS_DS(c) >> 4)
#define NX_CPB_SET_KEY_SIZE(c, x) NX_CPB_KS_DS(c) |= ((x) << 4)
#define NX_CPB_SET_DIGEST_SIZE(c, x) NX_CPB_KS_DS(c) |= (x)
struct cop_symcpb_header {
u8 mode;
u8 fdm;
u8 ks_ds;
u8 pad_byte;
u8 __rsvd[12];
} __packed;
struct cop_parameter_block {
struct cop_symcpb_header hdr;
union {
struct cop_symcpb_aes_ecb aes_ecb;
struct cop_symcpb_aes_cbc aes_cbc;
struct cop_symcpb_aes_gca aes_gca;
struct cop_symcpb_aes_gcm aes_gcm;
struct cop_symcpb_aes_cca aes_cca;
struct cop_symcpb_aes_ccm aes_ccm;
struct cop_symcpb_aes_ctr aes_ctr;
struct cop_symcpb_aes_xcbc aes_xcbc;
struct cop_symcpb_sha256 sha256;
struct cop_symcpb_sha512 sha512;
};
} __packed;
#define NX_CSB_VALID_BIT 0x80
/* co-processor status block */
struct cop_status_block {
u8 valid;
u8 crb_seq_number;
u8 completion_code;
u8 completion_extension;
u32 processed_byte_count;
u64 address;
} __packed;
/* Nest accelerator workbook section 4.4 */
struct nx_csbcpb {
unsigned char __rsvd[112];
struct cop_status_block csb;
struct cop_parameter_block cpb;
} __packed;
/* nx_csbcpb related definitions */
#define NX_MODE_AES_ECB 0
#define NX_MODE_AES_CBC 1
#define NX_MODE_AES_GMAC 2
#define NX_MODE_AES_GCA 3
#define NX_MODE_AES_GCM 4
#define NX_MODE_AES_CCA 5
#define NX_MODE_AES_CCM 6
#define NX_MODE_AES_CTR 7
#define NX_MODE_AES_XCBC_MAC 20
#define NX_MODE_SHA 0
#define NX_MODE_SHA_HMAC 1
#define NX_MODE_AES_CBC_HMAC_ETA 8
#define NX_MODE_AES_CBC_HMAC_ATE 9
#define NX_MODE_AES_CBC_HMAC_EAA 10
#define NX_MODE_AES_CTR_HMAC_ETA 12
#define NX_MODE_AES_CTR_HMAC_ATE 13
#define NX_MODE_AES_CTR_HMAC_EAA 14
#define NX_FDM_CI_FULL 0
#define NX_FDM_CI_FIRST 1
#define NX_FDM_CI_LAST 2
#define NX_FDM_CI_MIDDLE 3
#define NX_FDM_PR_NONE 0
#define NX_FDM_PR_PAD 1
#define NX_KS_AES_128 1
#define NX_KS_AES_192 2
#define NX_KS_AES_256 3
#define NX_DS_SHA256 2
#define NX_DS_SHA512 3
#define NX_FC_AES 0
#define NX_FC_SHA 2
#define NX_FC_AES_HMAC 6
#define NX_MAX_FC (NX_FC_AES_HMAC + 1)
#define NX_MAX_MODE (NX_MODE_AES_XCBC_MAC + 1)
#define HCOP_FC_AES NX_FC_AES
#define HCOP_FC_SHA NX_FC_SHA
#define HCOP_FC_AES_HMAC NX_FC_AES_HMAC
/* indices into the array of algorithm properties */
#define NX_PROPS_AES_128 0
#define NX_PROPS_AES_192 1
#define NX_PROPS_AES_256 2
#define NX_PROPS_SHA256 1
#define NX_PROPS_SHA512 2
#endif
/**
* debugfs routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 only.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Author: Kent Yoder <yoder1@us.ibm.com>
*/
#include <linux/device.h>
#include <linux/kobject.h>
#include <linux/string.h>
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/crypto.h>
#include <crypto/hash.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
#include "nx.h"
#ifdef CONFIG_DEBUG_FS
/*
* debugfs
*
* For documentation on these attributes, please see:
*
* Documentation/ABI/testing/debugfs-pfo-nx-crypto
*/
int nx_debugfs_init(struct nx_crypto_driver *drv)
{
struct nx_debugfs *dfs = &drv->dfs;
dfs->dfs_root = debugfs_create_dir(NX_NAME, NULL);
dfs->dfs_aes_ops =
debugfs_create_u32("aes_ops",
S_IRUSR | S_IRGRP | S_IROTH,
dfs->dfs_root, (u32 *)&drv->stats.aes_ops);
dfs->dfs_sha256_ops =
debugfs_create_u32("sha256_ops",
S_IRUSR | S_IRGRP | S_IROTH,
dfs->dfs_root,
(u32 *)&drv->stats.sha256_ops);
dfs->dfs_sha512_ops =
debugfs_create_u32("sha512_ops",
S_IRUSR | S_IRGRP | S_IROTH,
dfs->dfs_root,
(u32 *)&drv->stats.sha512_ops);
dfs->dfs_aes_bytes =
debugfs_create_u64("aes_bytes",
S_IRUSR | S_IRGRP | S_IROTH,
dfs->dfs_root,
(u64 *)&drv->stats.aes_bytes);
dfs->dfs_sha256_bytes =
debugfs_create_u64("sha256_bytes",
S_IRUSR | S_IRGRP | S_IROTH,
dfs->dfs_root,
(u64 *)&drv->stats.sha256_bytes);
dfs->dfs_sha512_bytes =
debugfs_create_u64("sha512_bytes",
S_IRUSR | S_IRGRP | S_IROTH,
dfs->dfs_root,
(u64 *)&drv->stats.sha512_bytes);
dfs->dfs_errors =
debugfs_create_u32("errors",
S_IRUSR | S_IRGRP | S_IROTH,
dfs->dfs_root, (u32 *)&drv->stats.errors);
dfs->dfs_last_error =
debugfs_create_u32("last_error",
S_IRUSR | S_IRGRP | S_IROTH,
dfs->dfs_root,
(u32 *)&drv->stats.last_error);
dfs->dfs_last_error_pid =
debugfs_create_u32("last_error_pid",
S_IRUSR | S_IRGRP | S_IROTH,
dfs->dfs_root,
(u32 *)&drv->stats.last_error_pid);
return 0;
}
void
nx_debugfs_fini(struct nx_crypto_driver *drv)
{
debugfs_remove_recursive(drv->dfs.dfs_root);
}
#endif
This diff is collapsed.
This diff is collapsed.
......@@ -29,6 +29,20 @@ obj-$(CONFIG_THERM_PM72) += therm_pm72.o
obj-$(CONFIG_THERM_WINDTUNNEL) += therm_windtunnel.o
obj-$(CONFIG_THERM_ADT746X) += therm_adt746x.o
obj-$(CONFIG_WINDFARM) += windfarm_core.o
obj-$(CONFIG_WINDFARM_PM72) += windfarm_fcu_controls.o \
windfarm_ad7417_sensor.o \
windfarm_lm75_sensor.o \
windfarm_max6690_sensor.o \
windfarm_pid.o \
windfarm_cpufreq_clamp.o \
windfarm_pm72.o
obj-$(CONFIG_WINDFARM_RM31) += windfarm_fcu_controls.o \
windfarm_ad7417_sensor.o \
windfarm_lm75_sensor.o \
windfarm_lm87_sensor.o \
windfarm_pid.o \
windfarm_cpufreq_clamp.o \
windfarm_rm31.o
obj-$(CONFIG_WINDFARM_PM81) += windfarm_smu_controls.o \
windfarm_smu_sensors.o \
windfarm_lm75_sensor.o windfarm_pid.o \
......
......@@ -65,7 +65,7 @@ static int ams_i2c_probe(struct i2c_client *client,
static int ams_i2c_remove(struct i2c_client *client);
static const struct i2c_device_id ams_id[] = {
{ "ams", 0 },
{ "MAC,accelerometer_1", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ams_id);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment