Commit 3f5595e3 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Martin Schwidefsky:
 "Next to the usual bug fixes (including the TASK_SIZE fix), there is
  one larger crypto item. It allows to use protected keys with the
  in-kernel crypto API

  The protected key support has two parts, the pkey user space API to
  convert key formats and the paes crypto module that uses a protected
  key instead of a standard AES key"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390: TASK_SIZE for kernel threads
  s390/crypt: Add protected key AES module
  s390/dasd: fix spelling mistake: "supportet" -> "supported"
  s390/pkey: Introduce pkey kernel module
  s390/zcrypt: export additional symbols
  s390/zcrypt: Rework CONFIG_ZCRYPT Kconfig text.
  s390/zcrypt: Cleanup leftover module code.
  s390/nmi: purge tlbs after control register validation
  s390/nmi: fix order of register validation
  s390/crypto: Add PCKMO inline function
  s390/zcrypt: Enable request count reset for cards and queues.
  s390/mm: use _SEGMENT_ENTRY_EMPTY in the code
  s390/chsc: Add exception handler for CHSC instruction
  s390: opt into HAVE_COPY_THREAD_TLS
  s390: restore address space when returning to user space
  s390: rename CIF_ASCE to CIF_ASCE_PRIMARY
parents 12dfdfed fb94a687
...@@ -134,6 +134,7 @@ config S390 ...@@ -134,6 +134,7 @@ config S390
select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL select HAVE_CMPXCHG_LOCAL
select HAVE_COPY_THREAD_TLS
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG select HAVE_DMA_API_DEBUG
select HAVE_DMA_CONTIGUOUS select HAVE_DMA_CONTIGUOUS
......
...@@ -678,6 +678,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m ...@@ -678,6 +678,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_SHA1_S390=m CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_SHA512_S390=m
......
...@@ -628,6 +628,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m ...@@ -628,6 +628,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_SHA1_S390=m CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_SHA512_S390=m
......
...@@ -6,7 +6,7 @@ obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o sha_common.o ...@@ -6,7 +6,7 @@ obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o sha_common.o
obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o sha_common.o obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o sha_common.o
obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o
obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o
obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o paes_s390.o
obj-$(CONFIG_S390_PRNG) += prng.o obj-$(CONFIG_S390_PRNG) += prng.o
obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o
obj-$(CONFIG_CRYPTO_CRC32_S390) += crc32-vx_s390.o obj-$(CONFIG_CRYPTO_CRC32_S390) += crc32-vx_s390.o
......
This diff is collapsed.
...@@ -229,6 +229,7 @@ CONFIG_CRYPTO_USER_API_HASH=m ...@@ -229,6 +229,7 @@ CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_ZCRYPT=m CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_SHA1_S390=m CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_SHA512_S390=m
......
...@@ -28,8 +28,9 @@ ...@@ -28,8 +28,9 @@
#define CPACF_PPNO 0xb93c /* MSA5 */ #define CPACF_PPNO 0xb93c /* MSA5 */
/* /*
* Decryption modifier bit * En/decryption modifier bits
*/ */
#define CPACF_ENCRYPT 0x00
#define CPACF_DECRYPT 0x80 #define CPACF_DECRYPT 0x80
/* /*
...@@ -42,8 +43,13 @@ ...@@ -42,8 +43,13 @@
#define CPACF_KM_AES_128 0x12 #define CPACF_KM_AES_128 0x12
#define CPACF_KM_AES_192 0x13 #define CPACF_KM_AES_192 0x13
#define CPACF_KM_AES_256 0x14 #define CPACF_KM_AES_256 0x14
#define CPACF_KM_PAES_128 0x1a
#define CPACF_KM_PAES_192 0x1b
#define CPACF_KM_PAES_256 0x1c
#define CPACF_KM_XTS_128 0x32 #define CPACF_KM_XTS_128 0x32
#define CPACF_KM_XTS_256 0x34 #define CPACF_KM_XTS_256 0x34
#define CPACF_KM_PXTS_128 0x3a
#define CPACF_KM_PXTS_256 0x3c
/* /*
* Function codes for the KMC (CIPHER MESSAGE WITH CHAINING) * Function codes for the KMC (CIPHER MESSAGE WITH CHAINING)
...@@ -56,6 +62,9 @@ ...@@ -56,6 +62,9 @@
#define CPACF_KMC_AES_128 0x12 #define CPACF_KMC_AES_128 0x12
#define CPACF_KMC_AES_192 0x13 #define CPACF_KMC_AES_192 0x13
#define CPACF_KMC_AES_256 0x14 #define CPACF_KMC_AES_256 0x14
#define CPACF_KMC_PAES_128 0x1a
#define CPACF_KMC_PAES_192 0x1b
#define CPACF_KMC_PAES_256 0x1c
#define CPACF_KMC_PRNG 0x43 #define CPACF_KMC_PRNG 0x43
/* /*
...@@ -69,6 +78,9 @@ ...@@ -69,6 +78,9 @@
#define CPACF_KMCTR_AES_128 0x12 #define CPACF_KMCTR_AES_128 0x12
#define CPACF_KMCTR_AES_192 0x13 #define CPACF_KMCTR_AES_192 0x13
#define CPACF_KMCTR_AES_256 0x14 #define CPACF_KMCTR_AES_256 0x14
#define CPACF_KMCTR_PAES_128 0x1a
#define CPACF_KMCTR_PAES_192 0x1b
#define CPACF_KMCTR_PAES_256 0x1c
/* /*
* Function codes for the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) * Function codes for the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
...@@ -98,6 +110,18 @@ ...@@ -98,6 +110,18 @@
#define CPACF_KMAC_TDEA_128 0x02 #define CPACF_KMAC_TDEA_128 0x02
#define CPACF_KMAC_TDEA_192 0x03 #define CPACF_KMAC_TDEA_192 0x03
/*
* Function codes for the PCKMO (PERFORM CRYPTOGRAPHIC KEY MANAGEMENT)
* instruction
*/
#define CPACF_PCKMO_QUERY 0x00
#define CPACF_PCKMO_ENC_DES_KEY 0x01
#define CPACF_PCKMO_ENC_TDES_128_KEY 0x02
#define CPACF_PCKMO_ENC_TDES_192_KEY 0x03
#define CPACF_PCKMO_ENC_AES_128_KEY 0x12
#define CPACF_PCKMO_ENC_AES_192_KEY 0x13
#define CPACF_PCKMO_ENC_AES_256_KEY 0x14
/* /*
* Function codes for the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION) * Function codes for the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION)
* instruction * instruction
...@@ -397,4 +421,24 @@ static inline void cpacf_pcc(unsigned long func, void *param) ...@@ -397,4 +421,24 @@ static inline void cpacf_pcc(unsigned long func, void *param)
: "cc", "memory"); : "cc", "memory");
} }
/**
* cpacf_pckmo() - executes the PCKMO (PERFORM CRYPTOGRAPHIC KEY
* MANAGEMENT) instruction
* @func: the function code passed to PCKMO; see CPACF_PCKMO_xxx defines
* @param: address of parameter block; see POP for details on each func
*
* Returns 0.
*/
static inline void cpacf_pckmo(long func, void *param)
{
register unsigned long r0 asm("0") = (unsigned long) func;
register unsigned long r1 asm("1") = (unsigned long) param;
asm volatile(
" .insn rre,%[opc] << 16,0,0\n" /* PCKMO opcode */
:
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_PCKMO)
: "cc", "memory");
}
#endif /* _ASM_S390_CPACF_H */ #endif /* _ASM_S390_CPACF_H */
...@@ -63,7 +63,7 @@ static inline void set_user_asce(struct mm_struct *mm) ...@@ -63,7 +63,7 @@ static inline void set_user_asce(struct mm_struct *mm)
S390_lowcore.user_asce = mm->context.asce; S390_lowcore.user_asce = mm->context.asce;
if (current->thread.mm_segment.ar4) if (current->thread.mm_segment.ar4)
__ctl_load(S390_lowcore.user_asce, 7, 7); __ctl_load(S390_lowcore.user_asce, 7, 7);
set_cpu_flag(CIF_ASCE); set_cpu_flag(CIF_ASCE_PRIMARY);
} }
static inline void clear_user_asce(void) static inline void clear_user_asce(void)
...@@ -81,7 +81,7 @@ static inline void load_kernel_asce(void) ...@@ -81,7 +81,7 @@ static inline void load_kernel_asce(void)
__ctl_store(asce, 1, 1); __ctl_store(asce, 1, 1);
if (asce != S390_lowcore.kernel_asce) if (asce != S390_lowcore.kernel_asce)
__ctl_load(S390_lowcore.kernel_asce, 1, 1); __ctl_load(S390_lowcore.kernel_asce, 1, 1);
set_cpu_flag(CIF_ASCE); set_cpu_flag(CIF_ASCE_PRIMARY);
} }
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
......
...@@ -640,12 +640,12 @@ static inline int pud_bad(pud_t pud) ...@@ -640,12 +640,12 @@ static inline int pud_bad(pud_t pud)
static inline int pmd_present(pmd_t pmd) static inline int pmd_present(pmd_t pmd)
{ {
return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID; return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
} }
static inline int pmd_none(pmd_t pmd) static inline int pmd_none(pmd_t pmd)
{ {
return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID; return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
} }
static inline unsigned long pmd_pfn(pmd_t pmd) static inline unsigned long pmd_pfn(pmd_t pmd)
...@@ -803,7 +803,7 @@ static inline void pud_clear(pud_t *pud) ...@@ -803,7 +803,7 @@ static inline void pud_clear(pud_t *pud)
static inline void pmd_clear(pmd_t *pmdp) static inline void pmd_clear(pmd_t *pmdp)
{ {
pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID; pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
} }
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
...@@ -1357,7 +1357,7 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd) ...@@ -1357,7 +1357,7 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp) unsigned long addr, pmd_t *pmdp)
{ {
return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID)); return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
} }
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
...@@ -1367,10 +1367,10 @@ static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, ...@@ -1367,10 +1367,10 @@ static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
{ {
if (full) { if (full) {
pmd_t pmd = *pmdp; pmd_t pmd = *pmdp;
*pmdp = __pmd(_SEGMENT_ENTRY_INVALID); *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
return pmd; return pmd;
} }
return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID)); return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
} }
#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
...@@ -1384,7 +1384,7 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, ...@@ -1384,7 +1384,7 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
static inline void pmdp_invalidate(struct vm_area_struct *vma, static inline void pmdp_invalidate(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp) unsigned long addr, pmd_t *pmdp)
{ {
pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID)); pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
} }
#define __HAVE_ARCH_PMDP_SET_WRPROTECT #define __HAVE_ARCH_PMDP_SET_WRPROTECT
......
/*
* Kernelspace interface to the pkey device driver
*
* Copyright IBM Corp. 2016
*
* Author: Harald Freudenberger <freude@de.ibm.com>
*
*/
#ifndef _KAPI_PKEY_H
#define _KAPI_PKEY_H
#include <linux/ioctl.h>
#include <linux/types.h>
#include <uapi/asm/pkey.h>
/*
* Generate (AES) random secure key.
* @param cardnr may be -1 (use default card)
* @param domain may be -1 (use default domain)
* @param keytype one of the PKEY_KEYTYPE values
* @param seckey pointer to buffer receiving the secure key
* @return 0 on success, negative errno value on failure
*/
int pkey_genseckey(__u16 cardnr, __u16 domain,
__u32 keytype, struct pkey_seckey *seckey);
/*
* Generate (AES) secure key with given key value.
* @param cardnr may be -1 (use default card)
* @param domain may be -1 (use default domain)
* @param keytype one of the PKEY_KEYTYPE values
* @param clrkey pointer to buffer with clear key data
* @param seckey pointer to buffer receiving the secure key
* @return 0 on success, negative errno value on failure
*/
int pkey_clr2seckey(__u16 cardnr, __u16 domain, __u32 keytype,
const struct pkey_clrkey *clrkey,
struct pkey_seckey *seckey);
/*
* Derive (AES) proteced key from the (AES) secure key blob.
* @param cardnr may be -1 (use default card)
* @param domain may be -1 (use default domain)
* @param seckey pointer to buffer with the input secure key
* @param protkey pointer to buffer receiving the protected key and
* additional info (type, length)
* @return 0 on success, negative errno value on failure
*/
int pkey_sec2protkey(__u16 cardnr, __u16 domain,
const struct pkey_seckey *seckey,
struct pkey_protkey *protkey);
/*
* Derive (AES) protected key from a given clear key value.
* @param keytype one of the PKEY_KEYTYPE values
* @param clrkey pointer to buffer with clear key data
* @param protkey pointer to buffer receiving the protected key and
* additional info (type, length)
* @return 0 on success, negative errno value on failure
*/
int pkey_clr2protkey(__u32 keytype,
const struct pkey_clrkey *clrkey,
struct pkey_protkey *protkey);
/*
* Search for a matching crypto card based on the Master Key
* Verification Pattern provided inside a secure key.
* @param seckey pointer to buffer with the input secure key
* @param cardnr pointer to cardnr, receives the card number on success
* @param domain pointer to domain, receives the domain number on success
* @param verify if set, always verify by fetching verification pattern
* from card
* @return 0 on success, negative errno value on failure. If no card could be
* found, -ENODEV is returned.
*/
int pkey_findcard(const struct pkey_seckey *seckey,
__u16 *cardnr, __u16 *domain, int verify);
/*
* Find card and transform secure key to protected key.
* @param seckey pointer to buffer with the input secure key
* @param protkey pointer to buffer receiving the protected key and
* additional info (type, length)
* @return 0 on success, negative errno value on failure
*/
int pkey_skey2pkey(const struct pkey_seckey *seckey,
struct pkey_protkey *protkey);
#endif /* _KAPI_PKEY_H */
...@@ -14,14 +14,16 @@ ...@@ -14,14 +14,16 @@
#include <linux/const.h> #include <linux/const.h>
#define CIF_MCCK_PENDING 0 /* machine check handling is pending */ #define CIF_MCCK_PENDING 0 /* machine check handling is pending */
#define CIF_ASCE 1 /* user asce needs fixup / uaccess */ #define CIF_ASCE_PRIMARY 1 /* primary asce needs fixup / uaccess */
#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */ #define CIF_ASCE_SECONDARY 2 /* secondary asce needs fixup / uaccess */
#define CIF_FPU 3 /* restore FPU registers */ #define CIF_NOHZ_DELAY 3 /* delay HZ disable for a tick */
#define CIF_IGNORE_IRQ 4 /* ignore interrupt (for udelay) */ #define CIF_FPU 4 /* restore FPU registers */
#define CIF_ENABLED_WAIT 5 /* in enabled wait state */ #define CIF_IGNORE_IRQ 5 /* ignore interrupt (for udelay) */
#define CIF_ENABLED_WAIT 6 /* in enabled wait state */
#define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING) #define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING)
#define _CIF_ASCE _BITUL(CIF_ASCE) #define _CIF_ASCE_PRIMARY _BITUL(CIF_ASCE_PRIMARY)
#define _CIF_ASCE_SECONDARY _BITUL(CIF_ASCE_SECONDARY)
#define _CIF_NOHZ_DELAY _BITUL(CIF_NOHZ_DELAY) #define _CIF_NOHZ_DELAY _BITUL(CIF_NOHZ_DELAY)
#define _CIF_FPU _BITUL(CIF_FPU) #define _CIF_FPU _BITUL(CIF_FPU)
#define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ) #define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ)
...@@ -89,7 +91,8 @@ extern void execve_tail(void); ...@@ -89,7 +91,8 @@ extern void execve_tail(void);
* User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
*/ */
#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit) #define TASK_SIZE_OF(tsk) ((tsk)->mm ? \
(tsk)->mm->context.asce_limit : TASK_MAX_SIZE)
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
(1UL << 30) : (1UL << 41)) (1UL << 30) : (1UL << 41))
#define TASK_SIZE TASK_SIZE_OF(current) #define TASK_SIZE TASK_SIZE_OF(current)
...@@ -200,10 +203,12 @@ struct stack_frame { ...@@ -200,10 +203,12 @@ struct stack_frame {
struct task_struct; struct task_struct;
struct mm_struct; struct mm_struct;
struct seq_file; struct seq_file;
struct pt_regs;
typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable); typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable);
void dump_trace(dump_trace_func_t func, void *data, void dump_trace(dump_trace_func_t func, void *data,
struct task_struct *task, unsigned long sp); struct task_struct *task, unsigned long sp);
void show_registers(struct pt_regs *regs);
void show_cacheinfo(struct seq_file *m); void show_cacheinfo(struct seq_file *m);
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
*/ */
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <asm/processor.h>
#include <asm/ctl_reg.h> #include <asm/ctl_reg.h>
#define VERIFY_READ 0 #define VERIFY_READ 0
...@@ -36,18 +37,20 @@ ...@@ -36,18 +37,20 @@
#define get_ds() (KERNEL_DS) #define get_ds() (KERNEL_DS)
#define get_fs() (current->thread.mm_segment) #define get_fs() (current->thread.mm_segment)
#define set_fs(x) \
do { \
unsigned long __pto; \
current->thread.mm_segment = (x); \
__pto = current->thread.mm_segment.ar4 ? \
S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
__ctl_load(__pto, 7, 7); \
} while (0)
#define segment_eq(a,b) ((a).ar4 == (b).ar4) #define segment_eq(a,b) ((a).ar4 == (b).ar4)
static inline void set_fs(mm_segment_t fs)
{
current->thread.mm_segment = fs;
if (segment_eq(fs, KERNEL_DS)) {
set_cpu_flag(CIF_ASCE_SECONDARY);
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
} else {
clear_cpu_flag(CIF_ASCE_SECONDARY);
__ctl_load(S390_lowcore.user_asce, 7, 7);
}
}
static inline int __range_ok(unsigned long addr, unsigned long size) static inline int __range_ok(unsigned long addr, unsigned long size)
{ {
return 1; return 1;
......
...@@ -24,6 +24,7 @@ header-y += mman.h ...@@ -24,6 +24,7 @@ header-y += mman.h
header-y += monwriter.h header-y += monwriter.h
header-y += msgbuf.h header-y += msgbuf.h
header-y += param.h header-y += param.h
header-y += pkey.h
header-y += poll.h header-y += poll.h
header-y += posix_types.h header-y += posix_types.h
header-y += ptrace.h header-y += ptrace.h
......
/*
* Userspace interface to the pkey device driver
*
* Copyright IBM Corp. 2017
*
* Author: Harald Freudenberger <freude@de.ibm.com>
*
*/
#ifndef _UAPI_PKEY_H
#define _UAPI_PKEY_H
#include <linux/ioctl.h>
#include <linux/types.h>
/*
* Ioctl calls supported by the pkey device driver
*/
#define PKEY_IOCTL_MAGIC 'p'
#define SECKEYBLOBSIZE 64 /* secure key blob size is always 64 bytes */
#define MAXPROTKEYSIZE 64 /* a protected key blob may be up to 64 bytes */
#define MAXCLRKEYSIZE 32 /* a clear key value may be up to 32 bytes */
/* defines for the type field within the pkey_protkey struct */
#define PKEY_KEYTYPE_AES_128 1
#define PKEY_KEYTYPE_AES_192 2
#define PKEY_KEYTYPE_AES_256 3
/* Struct to hold a secure key blob */
struct pkey_seckey {
__u8 seckey[SECKEYBLOBSIZE]; /* the secure key blob */
};
/* Struct to hold protected key and length info */
struct pkey_protkey {
__u32 type; /* key type, one of the PKEY_KEYTYPE values */
__u32 len; /* bytes actually stored in protkey[] */
__u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */
};
/* Struct to hold a clear key value */
struct pkey_clrkey {
__u8 clrkey[MAXCLRKEYSIZE]; /* 16, 24, or 32 byte clear key value */
};
/*
* Generate secure key
*/
struct pkey_genseck {
__u16 cardnr; /* in: card to use or FFFF for any */
__u16 domain; /* in: domain or FFFF for any */
__u32 keytype; /* in: key type to generate */
struct pkey_seckey seckey; /* out: the secure key blob */
};
#define PKEY_GENSECK _IOWR(PKEY_IOCTL_MAGIC, 0x01, struct pkey_genseck)
/*
* Construct secure key from clear key value
*/
struct pkey_clr2seck {
__u16 cardnr; /* in: card to use or FFFF for any */
__u16 domain; /* in: domain or FFFF for any */
__u32 keytype; /* in: key type to generate */
struct pkey_clrkey clrkey; /* in: the clear key value */
struct pkey_seckey seckey; /* out: the secure key blob */
};
#define PKEY_CLR2SECK _IOWR(PKEY_IOCTL_MAGIC, 0x02, struct pkey_clr2seck)
/*
* Fabricate protected key from a secure key
*/
struct pkey_sec2protk {
__u16 cardnr; /* in: card to use or FFFF for any */
__u16 domain; /* in: domain or FFFF for any */
struct pkey_seckey seckey; /* in: the secure key blob */
struct pkey_protkey protkey; /* out: the protected key */
};
#define PKEY_SEC2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x03, struct pkey_sec2protk)
/*
* Fabricate protected key from an clear key value
*/
struct pkey_clr2protk {
__u32 keytype; /* in: key type to generate */
struct pkey_clrkey clrkey; /* in: the clear key value */
struct pkey_protkey protkey; /* out: the protected key */
};
#define PKEY_CLR2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x04, struct pkey_clr2protk)
/*
* Search for matching crypto card based on the Master Key
* Verification Pattern provided inside a secure key.
*/
struct pkey_findcard {
struct pkey_seckey seckey; /* in: the secure key blob */
__u16 cardnr; /* out: card number */
__u16 domain; /* out: domain number */
};
#define PKEY_FINDCARD _IOWR(PKEY_IOCTL_MAGIC, 0x05, struct pkey_findcard)
/*
* Combined together: findcard + sec2prot
*/
struct pkey_skey2pkey {
struct pkey_seckey seckey; /* in: the secure key blob */
struct pkey_protkey protkey; /* out: the protected key */
};
#define PKEY_SKEY2PKEY _IOWR(PKEY_IOCTL_MAGIC, 0x06, struct pkey_skey2pkey)
#endif /* _UAPI_PKEY_H */
...@@ -50,7 +50,8 @@ _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ ...@@ -50,7 +50,8 @@ _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_UPROBE) _TIF_UPROBE)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_SYSCALL_TRACEPOINT) _TIF_SYSCALL_TRACEPOINT)
_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE | _CIF_FPU) _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \
_CIF_ASCE_SECONDARY | _CIF_FPU)
_PIF_WORK = (_PIF_PER_TRAP) _PIF_WORK = (_PIF_PER_TRAP)
#define BASED(name) name-cleanup_critical(%r13) #define BASED(name) name-cleanup_critical(%r13)
...@@ -339,8 +340,8 @@ ENTRY(system_call) ...@@ -339,8 +340,8 @@ ENTRY(system_call)
jo .Lsysc_notify_resume jo .Lsysc_notify_resume
TSTMSK __LC_CPU_FLAGS,_CIF_FPU TSTMSK __LC_CPU_FLAGS,_CIF_FPU
jo .Lsysc_vxrs jo .Lsysc_vxrs
TSTMSK __LC_CPU_FLAGS,_CIF_ASCE TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
jo .Lsysc_uaccess jnz .Lsysc_asce
j .Lsysc_return # beware of critical section cleanup j .Lsysc_return # beware of critical section cleanup
# #
...@@ -358,12 +359,15 @@ ENTRY(system_call) ...@@ -358,12 +359,15 @@ ENTRY(system_call)
jg s390_handle_mcck # TIF bit will be cleared by handler jg s390_handle_mcck # TIF bit will be cleared by handler
# #
# _CIF_ASCE is set, load user space asce # _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce
# #
.Lsysc_uaccess: .Lsysc_asce:
ni __LC_CPU_FLAGS+7,255-_CIF_ASCE ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
j .Lsysc_return TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_SECONDARY
jz .Lsysc_return
larl %r14,.Lsysc_return
jg set_fs_fixup
# #
# CIF_FPU is set, restore floating-point controls and floating-point registers. # CIF_FPU is set, restore floating-point controls and floating-point registers.
...@@ -661,8 +665,8 @@ ENTRY(io_int_handler) ...@@ -661,8 +665,8 @@ ENTRY(io_int_handler)
jo .Lio_notify_resume jo .Lio_notify_resume
TSTMSK __LC_CPU_FLAGS,_CIF_FPU TSTMSK __LC_CPU_FLAGS,_CIF_FPU
jo .Lio_vxrs jo .Lio_vxrs
TSTMSK __LC_CPU_FLAGS,_CIF_ASCE TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
jo .Lio_uaccess jnz .Lio_asce
j .Lio_return # beware of critical section cleanup j .Lio_return # beware of critical section cleanup
# #
...@@ -675,12 +679,15 @@ ENTRY(io_int_handler) ...@@ -675,12 +679,15 @@ ENTRY(io_int_handler)
j .Lio_return j .Lio_return
# #
# _CIF_ASCE is set, load user space asce # _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce
# #
.Lio_uaccess: .Lio_asce:
ni __LC_CPU_FLAGS+7,255-_CIF_ASCE ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
j .Lio_return TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_SECONDARY
jz .Lio_return
larl %r14,.Lio_return
jg set_fs_fixup
# #
# CIF_FPU is set, restore floating-point controls and floating-point registers. # CIF_FPU is set, restore floating-point controls and floating-point registers.
......
...@@ -80,5 +80,6 @@ long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t); ...@@ -80,5 +80,6 @@ long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
DECLARE_PER_CPU(u64, mt_cycles[8]); DECLARE_PER_CPU(u64, mt_cycles[8]);
void verify_facilities(void); void verify_facilities(void);
void set_fs_fixup(void);
#endif /* _ENTRY_H */ #endif /* _ENTRY_H */
...@@ -116,6 +116,19 @@ static int notrace s390_validate_registers(union mci mci, int umode) ...@@ -116,6 +116,19 @@ static int notrace s390_validate_registers(union mci mci, int umode)
s390_handle_damage(); s390_handle_damage();
kill_task = 1; kill_task = 1;
} }
/* Validate control registers */
if (!mci.cr) {
/*
* Control registers have unknown contents.
* Can't recover and therefore stopping machine.
*/
s390_handle_damage();
} else {
asm volatile(
" lctlg 0,15,0(%0)\n"
" ptlb\n"
: : "a" (&S390_lowcore.cregs_save_area) : "memory");
}
if (!mci.fp) { if (!mci.fp) {
/* /*
* Floating point registers can't be restored. If the * Floating point registers can't be restored. If the
...@@ -208,18 +221,6 @@ static int notrace s390_validate_registers(union mci mci, int umode) ...@@ -208,18 +221,6 @@ static int notrace s390_validate_registers(union mci mci, int umode)
*/ */
kill_task = 1; kill_task = 1;
} }
/* Validate control registers */
if (!mci.cr) {
/*
* Control registers have unknown contents.
* Can't recover and therefore stopping machine.
*/
s390_handle_damage();
} else {
asm volatile(
" lctlg 0,15,0(%0)"
: : "a" (&S390_lowcore.cregs_save_area) : "memory");
}
/* /*
* We don't even try to validate the TOD register, since we simply * We don't even try to validate the TOD register, since we simply
* can't write something sensible into that register. * can't write something sensible into that register.
......
...@@ -100,8 +100,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) ...@@ -100,8 +100,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
return 0; return 0;
} }
int copy_thread(unsigned long clone_flags, unsigned long new_stackp, int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
unsigned long arg, struct task_struct *p) unsigned long arg, struct task_struct *p, unsigned long tls)
{ {
struct fake_frame struct fake_frame
{ {
...@@ -156,7 +156,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, ...@@ -156,7 +156,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
/* Set a new TLS ? */ /* Set a new TLS ? */
if (clone_flags & CLONE_SETTLS) { if (clone_flags & CLONE_SETTLS) {
unsigned long tls = frame->childregs.gprs[6];
if (is_compat_task()) { if (is_compat_task()) {
p->thread.acrs[0] = (unsigned int)tls; p->thread.acrs[0] = (unsigned int)tls;
} else { } else {
...@@ -234,3 +233,16 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) ...@@ -234,3 +233,16 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
ret = PAGE_ALIGN(mm->brk + brk_rnd()); ret = PAGE_ALIGN(mm->brk + brk_rnd());
return (ret > mm->brk) ? ret : mm->brk; return (ret > mm->brk) ? ret : mm->brk;
} }
void set_fs_fixup(void)
{
struct pt_regs *regs = current_pt_regs();
static bool warned;
set_fs(USER_DS);
if (warned)
return;
WARN(1, "Unbalanced set_fs - int code: 0x%x\n", regs->int_code);
show_registers(regs);
warned = true;
}
...@@ -359,8 +359,8 @@ static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr) ...@@ -359,8 +359,8 @@ static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
spin_lock(&gmap->guest_table_lock); spin_lock(&gmap->guest_table_lock);
entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT); entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
if (entry) { if (entry) {
flush = (*entry != _SEGMENT_ENTRY_INVALID); flush = (*entry != _SEGMENT_ENTRY_EMPTY);
*entry = _SEGMENT_ENTRY_INVALID; *entry = _SEGMENT_ENTRY_EMPTY;
} }
spin_unlock(&gmap->guest_table_lock); spin_unlock(&gmap->guest_table_lock);
return flush; return flush;
...@@ -589,7 +589,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) ...@@ -589,7 +589,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
return rc; return rc;
ptl = pmd_lock(mm, pmd); ptl = pmd_lock(mm, pmd);
spin_lock(&gmap->guest_table_lock); spin_lock(&gmap->guest_table_lock);
if (*table == _SEGMENT_ENTRY_INVALID) { if (*table == _SEGMENT_ENTRY_EMPTY) {
rc = radix_tree_insert(&gmap->host_to_guest, rc = radix_tree_insert(&gmap->host_to_guest,
vmaddr >> PMD_SHIFT, table); vmaddr >> PMD_SHIFT, table);
if (!rc) if (!rc)
......
...@@ -62,7 +62,7 @@ static inline unsigned long __pte_to_rste(pte_t pte) ...@@ -62,7 +62,7 @@ static inline unsigned long __pte_to_rste(pte_t pte)
rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC, rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC,
_SEGMENT_ENTRY_NOEXEC); _SEGMENT_ENTRY_NOEXEC);
} else } else
rste = _SEGMENT_ENTRY_INVALID; rste = _SEGMENT_ENTRY_EMPTY;
return rste; return rste;
} }
......
...@@ -62,19 +62,32 @@ config CRYPTO_DEV_GEODE ...@@ -62,19 +62,32 @@ config CRYPTO_DEV_GEODE
will be called geode-aes. will be called geode-aes.
config ZCRYPT config ZCRYPT
tristate "Support for PCI-attached cryptographic adapters" tristate "Support for s390 cryptographic adapters"
depends on S390 depends on S390
select HW_RANDOM select HW_RANDOM
help help
Select this option if you want to use a PCI-attached cryptographic Select this option if you want to enable support for
adapter like: s390 cryptographic adapters like:
+ PCI Cryptographic Accelerator (PCICA)
+ PCI Cryptographic Coprocessor (PCICC)
+ PCI-X Cryptographic Coprocessor (PCIXCC) + PCI-X Cryptographic Coprocessor (PCIXCC)
+ Crypto Express2 Coprocessor (CEX2C) + Crypto Express 2,3,4 or 5 Coprocessor (CEXxC)
+ Crypto Express2 Accelerator (CEX2A) + Crypto Express 2,3,4 or 5 Accelerator (CEXxA)
+ Crypto Express3 Coprocessor (CEX3C) + Crypto Express 4 or 5 EP11 Coprocessor (CEXxP)
+ Crypto Express3 Accelerator (CEX3A)
config PKEY
tristate "Kernel API for protected key handling"
depends on S390
depends on ZCRYPT
help
With this option enabled the pkey kernel module provides an API
for creation and handling of protected keys. Other parts of the
kernel or userspace applications may use these functions.
Select this option if you want to enable the kernel and userspace
API for proteced key handling.
Please note that creation of protected keys from secure keys
requires to have at least one CEX card in coprocessor mode
available at runtime.
config CRYPTO_SHA1_S390 config CRYPTO_SHA1_S390
tristate "SHA1 digest algorithm" tristate "SHA1 digest algorithm"
...@@ -124,6 +137,7 @@ config CRYPTO_AES_S390 ...@@ -124,6 +137,7 @@ config CRYPTO_AES_S390
depends on S390 depends on S390
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_BLKCIPHER select CRYPTO_BLKCIPHER
select PKEY
help help
This is the s390 hardware accelerated implementation of the This is the s390 hardware accelerated implementation of the
AES cipher algorithms (FIPS-197). AES cipher algorithms (FIPS-197).
......
...@@ -4864,7 +4864,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, ...@@ -4864,7 +4864,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
break; break;
case 3: /* tsa_intrg */ case 3: /* tsa_intrg */
len += sprintf(page + len, PRINTK_HEADER len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.intrg.: not supportet yet\n"); " tsb->tsa.intrg.: not supported yet\n");
break; break;
} }
......
...@@ -165,13 +165,15 @@ int tpi(struct tpi_info *addr) ...@@ -165,13 +165,15 @@ int tpi(struct tpi_info *addr)
int chsc(void *chsc_area) int chsc(void *chsc_area)
{ {
typedef struct { char _[4096]; } addr_type; typedef struct { char _[4096]; } addr_type;
int cc; int cc = -EIO;
asm volatile( asm volatile(
" .insn rre,0xb25f0000,%2,0\n" " .insn rre,0xb25f0000,%2,0\n"
" ipm %0\n" "0: ipm %0\n"
" srl %0,28\n" " srl %0,28\n"
: "=d" (cc), "=m" (*(addr_type *) chsc_area) "1:\n"
EX_TABLE(0b, 1b)
: "+d" (cc), "=m" (*(addr_type *) chsc_area)
: "d" (chsc_area), "m" (*(addr_type *) chsc_area) : "d" (chsc_area), "m" (*(addr_type *) chsc_area)
: "cc"); : "cc");
trace_s390_cio_chsc(chsc_area, cc); trace_s390_cio_chsc(chsc_area, cc);
......
...@@ -10,3 +10,7 @@ zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o ...@@ -10,3 +10,7 @@ zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
obj-$(CONFIG_ZCRYPT) += zcrypt.o obj-$(CONFIG_ZCRYPT) += zcrypt.o
# adapter drivers depend on ap.o and zcrypt.o # adapter drivers depend on ap.o and zcrypt.o
obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
# pkey kernel module
pkey-objs := pkey_api.o
obj-$(CONFIG_PKEY) += pkey.o
...@@ -1107,16 +1107,6 @@ static void ap_config_timeout(unsigned long ptr) ...@@ -1107,16 +1107,6 @@ static void ap_config_timeout(unsigned long ptr)
queue_work(system_long_wq, &ap_scan_work); queue_work(system_long_wq, &ap_scan_work);
} }
static void ap_reset_domain(void)
{
int i;
if (ap_domain_index == -1 || !ap_test_config_domain(ap_domain_index))
return;
for (i = 0; i < AP_DEVICES; i++)
ap_rapq(AP_MKQID(i, ap_domain_index));
}
static void ap_reset_all(void) static void ap_reset_all(void)
{ {
int i, j; int i, j;
......
...@@ -58,9 +58,9 @@ static ssize_t ap_functions_show(struct device *dev, ...@@ -58,9 +58,9 @@ static ssize_t ap_functions_show(struct device *dev,
static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL); static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
static ssize_t ap_request_count_show(struct device *dev, static ssize_t ap_req_count_show(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
{ {
struct ap_card *ac = to_ap_card(dev); struct ap_card *ac = to_ap_card(dev);
unsigned int req_cnt; unsigned int req_cnt;
...@@ -72,7 +72,23 @@ static ssize_t ap_request_count_show(struct device *dev, ...@@ -72,7 +72,23 @@ static ssize_t ap_request_count_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt); return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
} }
static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); static ssize_t ap_req_count_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ap_card *ac = to_ap_card(dev);
struct ap_queue *aq;
spin_lock_bh(&ap_list_lock);
for_each_ap_queue(aq, ac)
aq->total_request_count = 0;
spin_unlock_bh(&ap_list_lock);
atomic_set(&ac->total_request_count, 0);
return count;
}
static DEVICE_ATTR(request_count, 0644, ap_req_count_show, ap_req_count_store);
static ssize_t ap_requestq_count_show(struct device *dev, static ssize_t ap_requestq_count_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
......
...@@ -459,9 +459,9 @@ EXPORT_SYMBOL(ap_queue_resume); ...@@ -459,9 +459,9 @@ EXPORT_SYMBOL(ap_queue_resume);
/* /*
* AP queue related attributes. * AP queue related attributes.
*/ */
static ssize_t ap_request_count_show(struct device *dev, static ssize_t ap_req_count_show(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
{ {
struct ap_queue *aq = to_ap_queue(dev); struct ap_queue *aq = to_ap_queue(dev);
unsigned int req_cnt; unsigned int req_cnt;
...@@ -472,7 +472,20 @@ static ssize_t ap_request_count_show(struct device *dev, ...@@ -472,7 +472,20 @@ static ssize_t ap_request_count_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt); return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
} }
static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); static ssize_t ap_req_count_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ap_queue *aq = to_ap_queue(dev);
spin_lock_bh(&aq->lock);
aq->total_request_count = 0;
spin_unlock_bh(&aq->lock);
return count;
}
static DEVICE_ATTR(request_count, 0644, ap_req_count_show, ap_req_count_store);
static ssize_t ap_requestq_count_show(struct device *dev, static ssize_t ap_requestq_count_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
......
This diff is collapsed.
...@@ -374,7 +374,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) ...@@ -374,7 +374,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
return rc; return rc;
} }
static long zcrypt_send_cprb(struct ica_xcRB *xcRB) long zcrypt_send_cprb(struct ica_xcRB *xcRB)
{ {
struct zcrypt_card *zc, *pref_zc; struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq; struct zcrypt_queue *zq, *pref_zq;
...@@ -444,6 +444,7 @@ static long zcrypt_send_cprb(struct ica_xcRB *xcRB) ...@@ -444,6 +444,7 @@ static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
AP_QID_CARD(qid), AP_QID_QUEUE(qid)); AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc; return rc;
} }
EXPORT_SYMBOL(zcrypt_send_cprb);
static bool is_desired_ep11_card(unsigned int dev_id, static bool is_desired_ep11_card(unsigned int dev_id,
unsigned short target_num, unsigned short target_num,
...@@ -619,7 +620,7 @@ static long zcrypt_rng(char *buffer) ...@@ -619,7 +620,7 @@ static long zcrypt_rng(char *buffer)
return rc; return rc;
} }
static void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix) void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix)
{ {
struct zcrypt_card *zc; struct zcrypt_card *zc;
struct zcrypt_queue *zq; struct zcrypt_queue *zq;
......
...@@ -190,5 +190,7 @@ void zcrypt_msgtype_unregister(struct zcrypt_ops *); ...@@ -190,5 +190,7 @@ void zcrypt_msgtype_unregister(struct zcrypt_ops *);
struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int); struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
int zcrypt_api_init(void); int zcrypt_api_init(void);
void zcrypt_api_exit(void); void zcrypt_api_exit(void);
long zcrypt_send_cprb(struct ica_xcRB *xcRB);
void zcrypt_device_status_mask(struct zcrypt_device_matrix *devstatus);
#endif /* _ZCRYPT_API_H_ */ #endif /* _ZCRYPT_API_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment