Commit 57ca04ab authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull m ore s390 updates from Martin Schwidefsky:
 "Over 95% of the changes in this pull request are related to the zcrypt
  driver. There are five improvements for zcrypt: the ID for the CEX6
  cards is added, workload balancing and multi-domain support are
  introduced, the debug logs are overhauled and a set of tracepoints is
  added.

  Then there are several patches in regard to inline assemblies. One
  compile fix and several missing memory clobbers. As far as we can tell
  the omitted memory clobbers have not caused any breakage.

  A small change to the PCI arch code, the machine can tells us how big
  the function measurement blocks are. The PCI function measurement will
  be disabled for a device if the queried length is larger than the
  allocated size for these blocks.

  And two more patches to correct five printk messages.

  That is it for s390 in regard to the 4.10 merge window. Happy holidays"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (23 commits)
  s390/pci: query fmb length
  s390/zcrypt: add missing memory clobber to ap_qci inline assembly
  s390/extmem: add missing memory clobber to dcss_set_subcodes
  s390/nmi: fix inline assembly constraints
  s390/lib: add missing memory barriers to string inline assemblies
  s390/cpumf: fix qsi inline assembly
  s390/setup: reword printk messages
  s390/dasd: fix typos in DASD error messages
  s390: fix compile error with memmove_early() inline assembly
  s390/zcrypt: tracepoint definitions for zcrypt device driver.
  s390/zcrypt: Rework debug feature invocations.
  s390/zcrypt: Improved invalid domain response handling.
  s390/zcrypt: Fix ap_max_domain_id for older machine types
  s390/zcrypt: Correct function bits for CEX2x and CEX3x cards.
  s390/zcrypt: Fixed attrition of AP adapters and domains
  s390/zcrypt: Introduce new zcrypt device status API
  s390/zcrypt: add multi domain support
  s390/zcrypt: Introduce workload balancing
  s390/zcrypt: get rid of ap_poll_requests
  s390/zcrypt: header for the AP inline assmblies
  ...
parents 73e2e0c9 0b7589ec
...@@ -213,18 +213,14 @@ static inline int stcctm5(u64 num, u64 *val) ...@@ -213,18 +213,14 @@ static inline int stcctm5(u64 num, u64 *val)
/* Query sampling information */ /* Query sampling information */
static inline int qsi(struct hws_qsi_info_block *info) static inline int qsi(struct hws_qsi_info_block *info)
{ {
int cc; int cc = 1;
cc = 1;
asm volatile( asm volatile(
"0: .insn s,0xb2860000,0(%1)\n" "0: .insn s,0xb2860000,%1\n"
"1: lhi %0,0\n" "1: lhi %0,0\n"
"2:\n" "2:\n"
EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "=d" (cc), "+a" (info) : "+d" (cc), "+Q" (*info));
: "m" (*info)
: "cc", "memory");
return cc ? -EINVAL : 0; return cc ? -EINVAL : 0;
} }
......
...@@ -133,6 +133,7 @@ struct zpci_dev { ...@@ -133,6 +133,7 @@ struct zpci_dev {
/* Function measurement block */ /* Function measurement block */
struct zpci_fmb *fmb; struct zpci_fmb *fmb;
u16 fmb_update; /* update interval */ u16 fmb_update; /* update interval */
u16 fmb_length;
/* software counters */ /* software counters */
atomic64_t allocated_pages; atomic64_t allocated_pages;
atomic64_t mapped_pages; atomic64_t mapped_pages;
......
...@@ -87,7 +87,8 @@ struct clp_rsp_query_pci { ...@@ -87,7 +87,8 @@ struct clp_rsp_query_pci {
u16 pchid; u16 pchid;
u32 bar[PCI_BAR_COUNT]; u32 bar[PCI_BAR_COUNT];
u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */ u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
u32 : 24; u32 : 16;
u8 fmb_len;
u8 pft; /* pci function type */ u8 pft; /* pci function type */
u64 sdma; /* start dma as */ u64 sdma; /* start dma as */
u64 edma; /* end dma as */ u64 edma; /* end dma as */
......
...@@ -62,7 +62,7 @@ static inline void *memchr(const void * s, int c, size_t n) ...@@ -62,7 +62,7 @@ static inline void *memchr(const void * s, int c, size_t n)
" jl 1f\n" " jl 1f\n"
" la %0,0\n" " la %0,0\n"
"1:" "1:"
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc"); : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
return (void *) ret; return (void *) ret;
} }
...@@ -74,7 +74,7 @@ static inline void *memscan(void *s, int c, size_t n) ...@@ -74,7 +74,7 @@ static inline void *memscan(void *s, int c, size_t n)
asm volatile( asm volatile(
"0: srst %0,%1\n" "0: srst %0,%1\n"
" jo 0b\n" " jo 0b\n"
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc"); : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
return (void *) ret; return (void *) ret;
} }
...@@ -115,7 +115,7 @@ static inline size_t strlen(const char *s) ...@@ -115,7 +115,7 @@ static inline size_t strlen(const char *s)
asm volatile( asm volatile(
"0: srst %0,%1\n" "0: srst %0,%1\n"
" jo 0b" " jo 0b"
: "+d" (r0), "+a" (tmp) : : "cc"); : "+d" (r0), "+a" (tmp) : : "cc", "memory");
return r0 - (unsigned long) s; return r0 - (unsigned long) s;
} }
...@@ -128,7 +128,7 @@ static inline size_t strnlen(const char * s, size_t n) ...@@ -128,7 +128,7 @@ static inline size_t strnlen(const char * s, size_t n)
asm volatile( asm volatile(
"0: srst %0,%1\n" "0: srst %0,%1\n"
" jo 0b" " jo 0b"
: "+a" (end), "+a" (tmp) : "d" (r0) : "cc"); : "+a" (end), "+a" (tmp) : "d" (r0) : "cc", "memory");
return end - s; return end - s;
} }
#else /* IN_ARCH_STRING_C */ #else /* IN_ARCH_STRING_C */
......
/*
* Tracepoint definitions for the s390 zcrypt device driver
*
* Copyright IBM Corp. 2016
* Author(s): Harald Freudenberger <freude@de.ibm.com>
*
* Currently there are two tracepoint events defined here.
* An s390_zcrypt_req request event occurs as soon as the request is
* recognized by the zcrypt ioctl function. This event may act as some kind
* of request-processing-starts-now indication.
* As late as possible within the zcrypt ioctl function there occurs the
* s390_zcrypt_rep event which may act as the point in time where the
* request has been processed by the kernel and the result is about to be
* transferred back to userspace.
* The glue which binds together request and reply event is the ptr
* parameter, which is the local buffer address where the request from
* userspace has been stored by the ioctl function.
*
* The main purpose of this zcrypt tracepoint api is to get some data for
* performance measurements together with information about on which card
* and queue the request has been processed. It is not an ffdc interface as
* there is already code in the zcrypt device driver to serve the s390
* debug feature interface.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM s390
#if !defined(_TRACE_S390_ZCRYPT_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_S390_ZCRYPT_H
#include <linux/tracepoint.h>
#define TP_ICARSAMODEXPO 0x0001
#define TP_ICARSACRT 0x0002
#define TB_ZSECSENDCPRB 0x0003
#define TP_ZSENDEP11CPRB 0x0004
#define TP_HWRNGCPRB 0x0005
#define show_zcrypt_tp_type(type) \
__print_symbolic(type, \
{ TP_ICARSAMODEXPO, "ICARSAMODEXPO" }, \
{ TP_ICARSACRT, "ICARSACRT" }, \
{ TB_ZSECSENDCPRB, "ZSECSENDCPRB" }, \
{ TP_ZSENDEP11CPRB, "ZSENDEP11CPRB" }, \
{ TP_HWRNGCPRB, "HWRNGCPRB" })
/**
* trace_s390_zcrypt_req - zcrypt request tracepoint function
* @ptr: Address of the local buffer where the request from userspace
* is stored. Can be used as a unique id to relate together
* request and reply.
* @type: One of the TP_ defines above.
*
* Called when a request from userspace is recognised within the ioctl
* function of the zcrypt device driver and may act as an entry
* timestamp.
*/
TRACE_EVENT(s390_zcrypt_req,
TP_PROTO(void *ptr, u32 type),
TP_ARGS(ptr, type),
TP_STRUCT__entry(
__field(void *, ptr)
__field(u32, type)),
TP_fast_assign(
__entry->ptr = ptr;
__entry->type = type;),
TP_printk("ptr=%p type=%s",
__entry->ptr,
show_zcrypt_tp_type(__entry->type))
);
/**
* trace_s390_zcrypt_rep - zcrypt reply tracepoint function
* @ptr: Address of the local buffer where the request from userspace
* is stored. Can be used as a unique id to match together
* request and reply.
* @fc: Function code.
* @rc: The bare returncode as returned by the device driver ioctl
* function.
* @dev: The adapter nr where this request was actually processed.
* @dom: Domain id of the device where this request was processed.
*
* Called upon recognising the reply from the crypto adapter. This
* message may act as the exit timestamp for the request but also
* carries some info about on which adapter the request was processed
* and the returncode from the device driver.
*/
TRACE_EVENT(s390_zcrypt_rep,
TP_PROTO(void *ptr, u32 fc, u32 rc, u16 dev, u16 dom),
TP_ARGS(ptr, fc, rc, dev, dom),
TP_STRUCT__entry(
__field(void *, ptr)
__field(u32, fc)
__field(u32, rc)
__field(u16, device)
__field(u16, domain)),
TP_fast_assign(
__entry->ptr = ptr;
__entry->fc = fc;
__entry->rc = rc;
__entry->device = dev;
__entry->domain = dom;),
TP_printk("ptr=%p fc=0x%04x rc=%d dev=0x%02hx domain=0x%04hx",
__entry->ptr,
(unsigned int) __entry->fc,
(int) __entry->rc,
(unsigned short) __entry->device,
(unsigned short) __entry->domain)
);
#endif /* _TRACE_S390_ZCRYPT_H */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH asm/trace
#define TRACE_INCLUDE_FILE zcrypt
#include <trace/define_trace.h>
...@@ -215,6 +215,42 @@ struct ep11_urb { ...@@ -215,6 +215,42 @@ struct ep11_urb {
uint64_t resp; uint64_t resp;
} __attribute__((packed)); } __attribute__((packed));
/**
* struct zcrypt_device_status
* @hwtype: raw hardware type
* @qid: 6 bit device index, 8 bit domain
* @functions: AP device function bit field 'abcdef'
* a, b, c = reserved
* d = CCA coprocessor
* e = Accelerator
* f = EP11 coprocessor
* @online online status
* @reserved reserved
*/
struct zcrypt_device_status {
unsigned int hwtype:8;
unsigned int qid:14;
unsigned int online:1;
unsigned int functions:6;
unsigned int reserved:3;
};
#define MAX_ZDEV_CARDIDS 64
#define MAX_ZDEV_DOMAINS 256
/**
* Maximum number of zcrypt devices
*/
#define MAX_ZDEV_ENTRIES (MAX_ZDEV_CARDIDS * MAX_ZDEV_DOMAINS)
/**
* zcrypt_device_matrix
* Device matrix of all zcrypt devices
*/
struct zcrypt_device_matrix {
struct zcrypt_device_status device[MAX_ZDEV_ENTRIES];
};
#define AUTOSELECT ((unsigned int)0xFFFFFFFF) #define AUTOSELECT ((unsigned int)0xFFFFFFFF)
#define ZCRYPT_IOCTL_MAGIC 'z' #define ZCRYPT_IOCTL_MAGIC 'z'
...@@ -321,6 +357,7 @@ struct ep11_urb { ...@@ -321,6 +357,7 @@ struct ep11_urb {
#define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0) #define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0)
#define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0) #define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0)
#define ZSENDEP11CPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0) #define ZSENDEP11CPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0)
#define ZDEVICESTATUS _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x4f, 0)
/* New status calls */ /* New status calls */
#define Z90STAT_TOTALCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x40, int) #define Z90STAT_TOTALCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x40, int)
......
...@@ -417,7 +417,7 @@ static __init void memmove_early(void *dst, const void *src, size_t n) ...@@ -417,7 +417,7 @@ static __init void memmove_early(void *dst, const void *src, size_t n)
" brctg %[n],0b\n" " brctg %[n],0b\n"
"1:\n" "1:\n"
: [addr] "=&d" (addr), : [addr] "=&d" (addr),
[psw_pgm_addr] "=&Q" (S390_lowcore.program_new_psw.addr), [psw_pgm_addr] "=Q" (S390_lowcore.program_new_psw.addr),
[dst] "+&a" (dst), [src] "+&a" (src), [n] "+d" (n) [dst] "+&a" (dst), [src] "+&a" (src), [n] "+d" (n)
: [incr] "d" (incr) : [incr] "d" (incr)
: "cc", "memory"); : "cc", "memory");
......
...@@ -102,7 +102,7 @@ static int notrace s390_validate_registers(union mci mci, int umode) ...@@ -102,7 +102,7 @@ static int notrace s390_validate_registers(union mci mci, int umode)
{ {
int kill_task; int kill_task;
u64 zero; u64 zero;
void *fpt_save_area, *fpt_creg_save_area; void *fpt_save_area;
kill_task = 0; kill_task = 0;
zero = 0; zero = 0;
...@@ -130,7 +130,6 @@ static int notrace s390_validate_registers(union mci mci, int umode) ...@@ -130,7 +130,6 @@ static int notrace s390_validate_registers(union mci mci, int umode)
kill_task = 1; kill_task = 1;
} }
fpt_save_area = &S390_lowcore.floating_pt_save_area; fpt_save_area = &S390_lowcore.floating_pt_save_area;
fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
if (!mci.fc) { if (!mci.fc) {
/* /*
* Floating point control register can't be restored. * Floating point control register can't be restored.
...@@ -142,11 +141,13 @@ static int notrace s390_validate_registers(union mci mci, int umode) ...@@ -142,11 +141,13 @@ static int notrace s390_validate_registers(union mci mci, int umode)
*/ */
if (S390_lowcore.fpu_flags & KERNEL_FPC) if (S390_lowcore.fpu_flags & KERNEL_FPC)
s390_handle_damage(); s390_handle_damage();
asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero)); asm volatile("lfpc %0" : : "Q" (zero));
if (!test_cpu_flag(CIF_FPU)) if (!test_cpu_flag(CIF_FPU))
kill_task = 1; kill_task = 1;
} else } else {
asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area)); asm volatile("lfpc %0"
: : "Q" (S390_lowcore.fpt_creg_save_area));
}
if (!MACHINE_HAS_VX) { if (!MACHINE_HAS_VX) {
/* Validate floating point registers */ /* Validate floating point registers */
...@@ -167,7 +168,7 @@ static int notrace s390_validate_registers(union mci mci, int umode) ...@@ -167,7 +168,7 @@ static int notrace s390_validate_registers(union mci mci, int umode)
" ld 13,104(%0)\n" " ld 13,104(%0)\n"
" ld 14,112(%0)\n" " ld 14,112(%0)\n"
" ld 15,120(%0)\n" " ld 15,120(%0)\n"
: : "a" (fpt_save_area)); : : "a" (fpt_save_area) : "memory");
} else { } else {
/* Validate vector registers */ /* Validate vector registers */
union ctlreg0 cr0; union ctlreg0 cr0;
...@@ -217,7 +218,7 @@ static int notrace s390_validate_registers(union mci mci, int umode) ...@@ -217,7 +218,7 @@ static int notrace s390_validate_registers(union mci mci, int umode)
} else { } else {
asm volatile( asm volatile(
" lctlg 0,15,0(%0)" " lctlg 0,15,0(%0)"
: : "a" (&S390_lowcore.cregs_save_area)); : : "a" (&S390_lowcore.cregs_save_area) : "memory");
} }
/* /*
* We don't even try to validate the TOD register, since we simply * We don't even try to validate the TOD register, since we simply
...@@ -234,9 +235,9 @@ static int notrace s390_validate_registers(union mci mci, int umode) ...@@ -234,9 +235,9 @@ static int notrace s390_validate_registers(union mci mci, int umode)
: : : "0", "cc"); : : : "0", "cc");
else else
asm volatile( asm volatile(
" l 0,0(%0)\n" " l 0,%0\n"
" sckpf" " sckpf"
: : "a" (&S390_lowcore.tod_progreg_save_area) : : "Q" (S390_lowcore.tod_progreg_save_area)
: "0", "cc"); : "0", "cc");
/* Validate clock comparator register */ /* Validate clock comparator register */
set_clock_comparator(S390_lowcore.clock_comparator); set_clock_comparator(S390_lowcore.clock_comparator);
......
...@@ -485,7 +485,7 @@ static void __init setup_memory_end(void) ...@@ -485,7 +485,7 @@ static void __init setup_memory_end(void)
max_pfn = max_low_pfn = PFN_DOWN(memory_end); max_pfn = max_low_pfn = PFN_DOWN(memory_end);
memblock_remove(memory_end, ULONG_MAX); memblock_remove(memory_end, ULONG_MAX);
pr_notice("Max memory size: %luMB\n", memory_end >> 20); pr_notice("The maximum memory size is %luMB\n", memory_end >> 20);
} }
static void __init setup_vmcoreinfo(void) static void __init setup_vmcoreinfo(void)
...@@ -650,7 +650,7 @@ static void __init check_initrd(void) ...@@ -650,7 +650,7 @@ static void __init check_initrd(void)
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
if (INITRD_START && INITRD_SIZE && if (INITRD_START && INITRD_SIZE &&
!memblock_is_region_memory(INITRD_START, INITRD_SIZE)) { !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) {
pr_err("initrd does not fit memory.\n"); pr_err("The initial RAM disk does not fit into the memory\n");
memblock_free(INITRD_START, INITRD_SIZE); memblock_free(INITRD_START, INITRD_SIZE);
initrd_start = initrd_end = 0; initrd_start = initrd_end = 0;
} }
......
...@@ -20,7 +20,7 @@ static inline char *__strend(const char *s) ...@@ -20,7 +20,7 @@ static inline char *__strend(const char *s)
asm volatile ("0: srst %0,%1\n" asm volatile ("0: srst %0,%1\n"
" jo 0b" " jo 0b"
: "+d" (r0), "+a" (s) : : "cc" ); : "+d" (r0), "+a" (s) : : "cc", "memory");
return (char *) r0; return (char *) r0;
} }
...@@ -31,7 +31,7 @@ static inline char *__strnend(const char *s, size_t n) ...@@ -31,7 +31,7 @@ static inline char *__strnend(const char *s, size_t n)
asm volatile ("0: srst %0,%1\n" asm volatile ("0: srst %0,%1\n"
" jo 0b" " jo 0b"
: "+d" (p), "+a" (s) : "d" (r0) : "cc" ); : "+d" (p), "+a" (s) : "d" (r0) : "cc", "memory");
return (char *) p; return (char *) p;
} }
...@@ -213,7 +213,7 @@ int strcmp(const char *cs, const char *ct) ...@@ -213,7 +213,7 @@ int strcmp(const char *cs, const char *ct)
" sr %0,%1\n" " sr %0,%1\n"
"1:" "1:"
: "+d" (ret), "+d" (r0), "+a" (cs), "+a" (ct) : "+d" (ret), "+d" (r0), "+a" (cs), "+a" (ct)
: : "cc" ); : : "cc", "memory");
return ret; return ret;
} }
EXPORT_SYMBOL(strcmp); EXPORT_SYMBOL(strcmp);
...@@ -250,7 +250,7 @@ static inline int clcle(const char *s1, unsigned long l1, ...@@ -250,7 +250,7 @@ static inline int clcle(const char *s1, unsigned long l1,
" ipm %0\n" " ipm %0\n"
" srl %0,28" " srl %0,28"
: "=&d" (cc), "+a" (r2), "+a" (r3), : "=&d" (cc), "+a" (r2), "+a" (r3),
"+a" (r4), "+a" (r5) : : "cc"); "+a" (r4), "+a" (r5) : : "cc", "memory");
return cc; return cc;
} }
...@@ -298,7 +298,7 @@ void *memchr(const void *s, int c, size_t n) ...@@ -298,7 +298,7 @@ void *memchr(const void *s, int c, size_t n)
" jl 1f\n" " jl 1f\n"
" la %0,0\n" " la %0,0\n"
"1:" "1:"
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
return (void *) ret; return (void *) ret;
} }
EXPORT_SYMBOL(memchr); EXPORT_SYMBOL(memchr);
...@@ -336,7 +336,7 @@ void *memscan(void *s, int c, size_t n) ...@@ -336,7 +336,7 @@ void *memscan(void *s, int c, size_t n)
asm volatile ("0: srst %0,%1\n" asm volatile ("0: srst %0,%1\n"
" jo 0b\n" " jo 0b\n"
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
return (void *) ret; return (void *) ret;
} }
EXPORT_SYMBOL(memscan); EXPORT_SYMBOL(memscan);
...@@ -122,7 +122,7 @@ dcss_set_subcodes(void) ...@@ -122,7 +122,7 @@ dcss_set_subcodes(void)
"1: la %2,3\n" "1: la %2,3\n"
"2:\n" "2:\n"
EX_TABLE(0b, 1b) EX_TABLE(0b, 1b)
: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc", "memory");
kfree(name); kfree(name);
/* Diag x'64' new subcodes are supported, set to new subcodes */ /* Diag x'64' new subcodes are supported, set to new subcodes */
......
...@@ -180,7 +180,7 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev) ...@@ -180,7 +180,7 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev)
{ {
struct mod_pci_args args = { 0, 0, 0, 0 }; struct mod_pci_args args = { 0, 0, 0, 0 };
if (zdev->fmb) if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
return -EINVAL; return -EINVAL;
zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL); zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
......
...@@ -148,6 +148,7 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev, ...@@ -148,6 +148,7 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev,
zdev->pft = response->pft; zdev->pft = response->pft;
zdev->vfn = response->vfn; zdev->vfn = response->vfn;
zdev->uid = response->uid; zdev->uid = response->uid;
zdev->fmb_length = sizeof(u32) * response->fmb_len;
memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip)); memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip));
if (response->util_str_avail) { if (response->util_str_avail) {
......
...@@ -674,7 +674,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense) ...@@ -674,7 +674,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
break; break;
case 0x0D: case 0x0D:
dev_warn(&device->cdev->dev, dev_warn(&device->cdev->dev,
"FORMAT 4 - No syn byte in count " "FORMAT 4 - No sync byte in count "
"address area; offset active\n"); "address area; offset active\n");
break; break;
case 0x0E: case 0x0E:
...@@ -684,7 +684,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense) ...@@ -684,7 +684,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
break; break;
case 0x0F: case 0x0F:
dev_warn(&device->cdev->dev, dev_warn(&device->cdev->dev,
"FORMAT 4 - No syn byte in data area; " "FORMAT 4 - No sync byte in data area; "
"offset active\n"); "offset active\n");
break; break;
default: default:
...@@ -999,7 +999,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense) ...@@ -999,7 +999,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
break; break;
default: default:
dev_warn(&device->cdev->dev, dev_warn(&device->cdev->dev,
"FORMAT D - Reserved\n"); "FORMAT F - Reserved\n");
} }
break; break;
......
...@@ -2,10 +2,11 @@ ...@@ -2,10 +2,11 @@
# S/390 crypto devices # S/390 crypto devices
# #
ap-objs := ap_bus.o ap-objs := ap_bus.o ap_card.o ap_queue.o
# zcrypt_api depends on ap obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o
obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o # zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o
# msgtype* depend on zcrypt_api zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o
obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
# adapter drivers depend on ap, zcrypt_api and msgtype* obj-$(CONFIG_ZCRYPT) += zcrypt.o
# adapter drivers depend on ap.o and zcrypt.o
obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
/*
* Copyright IBM Corp. 2016
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*
* Adjunct processor bus inline assemblies.
*/
#ifndef _AP_ASM_H_
#define _AP_ASM_H_
#include <asm/isc.h>
/**
* ap_intructions_available() - Test if AP instructions are available.
*
* Returns 0 if the AP instructions are installed.
*/
static inline int ap_instructions_available(void)
{
register unsigned long reg0 asm ("0") = AP_MKQID(0, 0);
register unsigned long reg1 asm ("1") = -ENODEV;
register unsigned long reg2 asm ("2") = 0UL;
asm volatile(
" .long 0xb2af0000\n" /* PQAP(TAPQ) */
"0: la %1,0\n"
"1:\n"
EX_TABLE(0b, 1b)
: "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc");
return reg1;
}
/**
* ap_tapq(): Test adjunct processor queue.
* @qid: The AP queue number
* @info: Pointer to queue descriptor
*
* Returns AP queue status structure.
*/
static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
{
register unsigned long reg0 asm ("0") = qid;
register struct ap_queue_status reg1 asm ("1");
register unsigned long reg2 asm ("2") = 0UL;
asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
: "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
if (info)
*info = reg2;
return reg1;
}
/**
* ap_pqap_rapq(): Reset adjunct processor queue.
* @qid: The AP queue number
*
* Returns AP queue status structure.
*/
static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
{
register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
register struct ap_queue_status reg1 asm ("1");
register unsigned long reg2 asm ("2") = 0UL;
asm volatile(
".long 0xb2af0000" /* PQAP(RAPQ) */
: "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
return reg1;
}
/**
* ap_aqic(): Enable interruption for a specific AP.
* @qid: The AP queue number
* @ind: The notification indicator byte
*
* Returns AP queue status.
*/
static inline struct ap_queue_status ap_aqic(ap_qid_t qid, void *ind)
{
register unsigned long reg0 asm ("0") = qid | (3UL << 24);
register unsigned long reg1_in asm ("1") = (8UL << 44) | AP_ISC;
register struct ap_queue_status reg1_out asm ("1");
register void *reg2 asm ("2") = ind;
asm volatile(
".long 0xb2af0000" /* PQAP(AQIC) */
: "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
:
: "cc");
return reg1_out;
}
/**
* ap_qci(): Get AP configuration data
*
* Returns 0 on success, or -EOPNOTSUPP.
*/
static inline int ap_qci(void *config)
{
register unsigned long reg0 asm ("0") = 0x04000000UL;
register unsigned long reg1 asm ("1") = -EINVAL;
register void *reg2 asm ("2") = (void *) config;
asm volatile(
".long 0xb2af0000\n" /* PQAP(QCI) */
"0: la %1,0\n"
"1:\n"
EX_TABLE(0b, 1b)
: "+d" (reg0), "+d" (reg1), "+d" (reg2)
:
: "cc", "memory");
return reg1;
}
/**
* ap_nqap(): Send message to adjunct processor queue.
* @qid: The AP queue number
* @psmid: The program supplied message identifier
* @msg: The message text
* @length: The message length
*
* Returns AP queue status structure.
* Condition code 1 on NQAP can't happen because the L bit is 1.
* Condition code 2 on NQAP also means the send is incomplete,
* because a segment boundary was reached. The NQAP is repeated.
*/
static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
unsigned long long psmid,
void *msg, size_t length)
{
struct msgblock { char _[length]; };
register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
register struct ap_queue_status reg1 asm ("1");
register unsigned long reg2 asm ("2") = (unsigned long) msg;
register unsigned long reg3 asm ("3") = (unsigned long) length;
register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
asm volatile (
"0: .long 0xb2ad0042\n" /* NQAP */
" brc 2,0b"
: "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
: "d" (reg4), "d" (reg5), "m" (*(struct msgblock *) msg)
: "cc");
return reg1;
}
/**
* ap_dqap(): Receive message from adjunct processor queue.
* @qid: The AP queue number
* @psmid: Pointer to program supplied message identifier
* @msg: The message text
* @length: The message length
*
* Returns AP queue status structure.
* Condition code 1 on DQAP means the receive has taken place
* but only partially. The response is incomplete, hence the
* DQAP is repeated.
* Condition code 2 on DQAP also means the receive is incomplete,
* this time because a segment boundary was reached. Again, the
* DQAP is repeated.
* Note that gpr2 is used by the DQAP instruction to keep track of
* any 'residual' length, in case the instruction gets interrupted.
* Hence it gets zeroed before the instruction.
*/
static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
unsigned long long *psmid,
void *msg, size_t length)
{
struct msgblock { char _[length]; };
register unsigned long reg0 asm("0") = qid | 0x80000000UL;
register struct ap_queue_status reg1 asm ("1");
register unsigned long reg2 asm("2") = 0UL;
register unsigned long reg4 asm("4") = (unsigned long) msg;
register unsigned long reg5 asm("5") = (unsigned long) length;
register unsigned long reg6 asm("6") = 0UL;
register unsigned long reg7 asm("7") = 0UL;
asm volatile(
"0: .long 0xb2ae0064\n" /* DQAP */
" brc 6,0b\n"
: "+d" (reg0), "=d" (reg1), "+d" (reg2),
"+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
"=m" (*(struct msgblock *) msg) : : "cc");
*psmid = (((unsigned long long) reg6) << 32) + reg7;
return reg1;
}
#endif /* _AP_ASM_H_ */
...@@ -46,8 +46,12 @@ ...@@ -46,8 +46,12 @@
#include <linux/ktime.h> #include <linux/ktime.h>
#include <asm/facility.h> #include <asm/facility.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/mod_devicetable.h>
#include <linux/debugfs.h>
#include "ap_bus.h" #include "ap_bus.h"
#include "ap_asm.h"
#include "ap_debug.h"
/* /*
* Module description. * Module description.
...@@ -62,6 +66,7 @@ MODULE_ALIAS_CRYPTO("z90crypt"); ...@@ -62,6 +66,7 @@ MODULE_ALIAS_CRYPTO("z90crypt");
* Module parameter * Module parameter
*/ */
int ap_domain_index = -1; /* Adjunct Processor Domain Index */ int ap_domain_index = -1; /* Adjunct Processor Domain Index */
static DEFINE_SPINLOCK(ap_domain_lock);
module_param_named(domain, ap_domain_index, int, S_IRUSR|S_IRGRP); module_param_named(domain, ap_domain_index, int, S_IRUSR|S_IRGRP);
MODULE_PARM_DESC(domain, "domain index for ap devices"); MODULE_PARM_DESC(domain, "domain index for ap devices");
EXPORT_SYMBOL(ap_domain_index); EXPORT_SYMBOL(ap_domain_index);
...@@ -70,12 +75,20 @@ static int ap_thread_flag = 0; ...@@ -70,12 +75,20 @@ static int ap_thread_flag = 0;
module_param_named(poll_thread, ap_thread_flag, int, S_IRUSR|S_IRGRP); module_param_named(poll_thread, ap_thread_flag, int, S_IRUSR|S_IRGRP);
MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
static struct device *ap_root_device = NULL; static struct device *ap_root_device;
DEFINE_SPINLOCK(ap_list_lock);
LIST_HEAD(ap_card_list);
static struct ap_config_info *ap_configuration; static struct ap_config_info *ap_configuration;
static DEFINE_SPINLOCK(ap_device_list_lock);
static LIST_HEAD(ap_device_list);
static bool initialised; static bool initialised;
/*
* AP bus related debug feature things.
*/
static struct dentry *ap_dbf_root;
debug_info_t *ap_dbf_info;
/* /*
* Workqueue timer for bus rescan. * Workqueue timer for bus rescan.
*/ */
...@@ -89,7 +102,6 @@ static DECLARE_WORK(ap_scan_work, ap_scan_bus); ...@@ -89,7 +102,6 @@ static DECLARE_WORK(ap_scan_work, ap_scan_bus);
*/ */
static void ap_tasklet_fn(unsigned long); static void ap_tasklet_fn(unsigned long);
static DECLARE_TASKLET(ap_tasklet, ap_tasklet_fn, 0); static DECLARE_TASKLET(ap_tasklet, ap_tasklet_fn, 0);
static atomic_t ap_poll_requests = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
static struct task_struct *ap_poll_kthread = NULL; static struct task_struct *ap_poll_kthread = NULL;
static DEFINE_MUTEX(ap_poll_thread_mutex); static DEFINE_MUTEX(ap_poll_thread_mutex);
...@@ -129,23 +141,17 @@ static inline int ap_using_interrupts(void) ...@@ -129,23 +141,17 @@ static inline int ap_using_interrupts(void)
} }
/** /**
* ap_intructions_available() - Test if AP instructions are available. * ap_airq_ptr() - Get the address of the adapter interrupt indicator
* *
* Returns 0 if the AP instructions are installed. * Returns the address of the local-summary-indicator of the adapter
* interrupt handler for AP, or NULL if adapter interrupts are not
* available.
*/ */
static inline int ap_instructions_available(void) void *ap_airq_ptr(void)
{ {
register unsigned long reg0 asm ("0") = AP_MKQID(0,0); if (ap_using_interrupts())
register unsigned long reg1 asm ("1") = -ENODEV; return ap_airq.lsi_ptr;
register unsigned long reg2 asm ("2") = 0UL; return NULL;
asm volatile(
" .long 0xb2af0000\n" /* PQAP(TAPQ) */
"0: la %1,0\n"
"1:\n"
EX_TABLE(0b, 1b)
: "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
return reg1;
} }
/** /**
...@@ -169,19 +175,6 @@ static int ap_configuration_available(void) ...@@ -169,19 +175,6 @@ static int ap_configuration_available(void)
return test_facility(12); return test_facility(12);
} }
static inline struct ap_queue_status
__pqap_tapq(ap_qid_t qid, unsigned long *info)
{
register unsigned long reg0 asm ("0") = qid;
register struct ap_queue_status reg1 asm ("1");
register unsigned long reg2 asm ("2") = 0UL;
asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
: "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
*info = reg2;
return reg1;
}
/** /**
* ap_test_queue(): Test adjunct processor queue. * ap_test_queue(): Test adjunct processor queue.
* @qid: The AP queue number * @qid: The AP queue number
...@@ -192,85 +185,16 @@ __pqap_tapq(ap_qid_t qid, unsigned long *info) ...@@ -192,85 +185,16 @@ __pqap_tapq(ap_qid_t qid, unsigned long *info)
static inline struct ap_queue_status static inline struct ap_queue_status
ap_test_queue(ap_qid_t qid, unsigned long *info) ap_test_queue(ap_qid_t qid, unsigned long *info)
{ {
struct ap_queue_status aqs;
unsigned long _info;
if (test_facility(15)) if (test_facility(15))
qid |= 1UL << 23; /* set APFT T bit*/ qid |= 1UL << 23; /* set APFT T bit*/
aqs = __pqap_tapq(qid, &_info); return ap_tapq(qid, info);
if (info)
*info = _info;
return aqs;
}
/**
* ap_reset_queue(): Reset adjunct processor queue.
* @qid: The AP queue number
*
* Returns AP queue status structure.
*/
static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
{
register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
register struct ap_queue_status reg1 asm ("1");
register unsigned long reg2 asm ("2") = 0UL;
asm volatile(
".long 0xb2af0000" /* PQAP(RAPQ) */
: "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
return reg1;
}
/**
* ap_queue_interruption_control(): Enable interruption for a specific AP.
* @qid: The AP queue number
* @ind: The notification indicator byte
*
* Returns AP queue status.
*/
static inline struct ap_queue_status
ap_queue_interruption_control(ap_qid_t qid, void *ind)
{
register unsigned long reg0 asm ("0") = qid | 0x03000000UL;
register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC;
register struct ap_queue_status reg1_out asm ("1");
register void *reg2 asm ("2") = ind;
asm volatile(
".long 0xb2af0000" /* PQAP(AQIC) */
: "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
:
: "cc" );
return reg1_out;
}
/**
* ap_query_configuration(): Get AP configuration data
*
* Returns 0 on success, or -EOPNOTSUPP.
*/
static inline int __ap_query_configuration(void)
{
register unsigned long reg0 asm ("0") = 0x04000000UL;
register unsigned long reg1 asm ("1") = -EINVAL;
register void *reg2 asm ("2") = (void *) ap_configuration;
asm volatile(
".long 0xb2af0000\n" /* PQAP(QCI) */
"0: la %1,0\n"
"1:\n"
EX_TABLE(0b, 1b)
: "+d" (reg0), "+d" (reg1), "+d" (reg2)
:
: "cc");
return reg1;
} }
static inline int ap_query_configuration(void) static inline int ap_query_configuration(void)
{ {
if (!ap_configuration) if (!ap_configuration)
return -EOPNOTSUPP; return -EOPNOTSUPP;
return __ap_query_configuration(); return ap_qci(ap_configuration);
} }
/** /**
...@@ -330,162 +254,6 @@ static inline int ap_test_config_domain(unsigned int domain) ...@@ -330,162 +254,6 @@ static inline int ap_test_config_domain(unsigned int domain)
return ap_test_config(ap_configuration->aqm, domain); return ap_test_config(ap_configuration->aqm, domain);
} }
/**
* ap_queue_enable_interruption(): Enable interruption on an AP.
* @qid: The AP queue number
* @ind: the notification indicator byte
*
* Enables interruption on AP queue via ap_queue_interruption_control(). Based
* on the return value it waits a while and tests the AP queue if interrupts
* have been switched on using ap_test_queue().
*/
static int ap_queue_enable_interruption(struct ap_device *ap_dev, void *ind)
{
struct ap_queue_status status;
status = ap_queue_interruption_control(ap_dev->qid, ind);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_OTHERWISE_CHANGED:
return 0;
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
case AP_RESPONSE_INVALID_ADDRESS:
pr_err("Registering adapter interrupts for AP %d failed\n",
AP_QID_DEVICE(ap_dev->qid));
return -EOPNOTSUPP;
case AP_RESPONSE_RESET_IN_PROGRESS:
case AP_RESPONSE_BUSY:
default:
return -EBUSY;
}
}
static inline struct ap_queue_status
__nqap(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
{
typedef struct { char _[length]; } msgblock;
register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
register struct ap_queue_status reg1 asm ("1");
register unsigned long reg2 asm ("2") = (unsigned long) msg;
register unsigned long reg3 asm ("3") = (unsigned long) length;
register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
asm volatile (
"0: .long 0xb2ad0042\n" /* NQAP */
" brc 2,0b"
: "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
: "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
: "cc");
return reg1;
}
/**
* __ap_send(): Send message to adjunct processor queue.
* @qid: The AP queue number
* @psmid: The program supplied message identifier
* @msg: The message text
* @length: The message length
* @special: Special Bit
*
* Returns AP queue status structure.
* Condition code 1 on NQAP can't happen because the L bit is 1.
* Condition code 2 on NQAP also means the send is incomplete,
* because a segment boundary was reached. The NQAP is repeated.
*/
static inline struct ap_queue_status
__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
unsigned int special)
{
if (special == 1)
qid |= 0x400000UL;
return __nqap(qid, psmid, msg, length);
}
int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
{
struct ap_queue_status status;
status = __ap_send(qid, psmid, msg, length, 0);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
return 0;
case AP_RESPONSE_Q_FULL:
case AP_RESPONSE_RESET_IN_PROGRESS:
return -EBUSY;
case AP_RESPONSE_REQ_FAC_NOT_INST:
return -EINVAL;
default: /* Device is gone. */
return -ENODEV;
}
}
EXPORT_SYMBOL(ap_send);
/**
* __ap_recv(): Receive message from adjunct processor queue.
* @qid: The AP queue number
* @psmid: Pointer to program supplied message identifier
* @msg: The message text
* @length: The message length
*
* Returns AP queue status structure.
* Condition code 1 on DQAP means the receive has taken place
* but only partially. The response is incomplete, hence the
* DQAP is repeated.
* Condition code 2 on DQAP also means the receive is incomplete,
* this time because a segment boundary was reached. Again, the
* DQAP is repeated.
* Note that gpr2 is used by the DQAP instruction to keep track of
* any 'residual' length, in case the instruction gets interrupted.
* Hence it gets zeroed before the instruction.
*/
static inline struct ap_queue_status
__ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
{
typedef struct { char _[length]; } msgblock;
register unsigned long reg0 asm("0") = qid | 0x80000000UL;
register struct ap_queue_status reg1 asm ("1");
register unsigned long reg2 asm("2") = 0UL;
register unsigned long reg4 asm("4") = (unsigned long) msg;
register unsigned long reg5 asm("5") = (unsigned long) length;
register unsigned long reg6 asm("6") = 0UL;
register unsigned long reg7 asm("7") = 0UL;
asm volatile(
"0: .long 0xb2ae0064\n" /* DQAP */
" brc 6,0b\n"
: "+d" (reg0), "=d" (reg1), "+d" (reg2),
"+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
"=m" (*(msgblock *) msg) : : "cc" );
*psmid = (((unsigned long long) reg6) << 32) + reg7;
return reg1;
}
int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
{
struct ap_queue_status status;
if (msg == NULL)
return -EINVAL;
status = __ap_recv(qid, psmid, msg, length);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
return 0;
case AP_RESPONSE_NO_PENDING_REPLY:
if (status.queue_empty)
return -ENOENT;
return -EBUSY;
case AP_RESPONSE_RESET_IN_PROGRESS:
return -EBUSY;
default:
return -ENODEV;
}
}
EXPORT_SYMBOL(ap_recv);
/** /**
* ap_query_queue(): Check if an AP queue is available. * ap_query_queue(): Check if an AP queue is available.
* @qid: The AP queue number * @qid: The AP queue number
...@@ -500,7 +268,7 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type, ...@@ -500,7 +268,7 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
unsigned long info; unsigned long info;
int nd; int nd;
if (!ap_test_config_card_id(AP_QID_DEVICE(qid))) if (!ap_test_config_card_id(AP_QID_CARD(qid)))
return -ENODEV; return -ENODEV;
status = ap_test_queue(qid, &info); status = ap_test_queue(qid, &info);
...@@ -511,8 +279,28 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type, ...@@ -511,8 +279,28 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
*facilities = (unsigned int)(info >> 32); *facilities = (unsigned int)(info >> 32);
/* Update maximum domain id */ /* Update maximum domain id */
nd = (info >> 16) & 0xff; nd = (info >> 16) & 0xff;
/* if N bit is available, z13 and newer */
if ((info & (1UL << 57)) && nd > 0) if ((info & (1UL << 57)) && nd > 0)
ap_max_domain_id = nd; ap_max_domain_id = nd;
else /* older machine types */
ap_max_domain_id = 15;
switch (*device_type) {
/* For CEX2 and CEX3 the available functions
* are not refrected by the facilities bits.
* Instead it is coded into the type. So here
* modify the function bits based on the type.
*/
case AP_DEVICE_TYPE_CEX2A:
case AP_DEVICE_TYPE_CEX3A:
*facilities |= 0x08000000;
break;
case AP_DEVICE_TYPE_CEX2C:
case AP_DEVICE_TYPE_CEX3C:
*facilities |= 0x10000000;
break;
default:
break;
}
return 0; return 0;
case AP_RESPONSE_Q_NOT_AVAIL: case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED: case AP_RESPONSE_DECONFIGURED:
...@@ -528,9 +316,7 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type, ...@@ -528,9 +316,7 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
} }
} }
/* State machine definitions and helpers */ void ap_wait(enum ap_wait wait)
static void ap_sm_wait(enum ap_wait wait)
{ {
ktime_t hr_time; ktime_t hr_time;
...@@ -559,350 +345,21 @@ static void ap_sm_wait(enum ap_wait wait) ...@@ -559,350 +345,21 @@ static void ap_sm_wait(enum ap_wait wait)
} }
} }
static enum ap_wait ap_sm_nop(struct ap_device *ap_dev)
{
return AP_WAIT_NONE;
}
/**
* ap_sm_recv(): Receive pending reply messages from an AP device but do
* not change the state of the device.
* @ap_dev: pointer to the AP device
*
* Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
*/
static struct ap_queue_status ap_sm_recv(struct ap_device *ap_dev)
{
struct ap_queue_status status;
struct ap_message *ap_msg;
status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
ap_dev->reply->message, ap_dev->reply->length);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
atomic_dec(&ap_poll_requests);
ap_dev->queue_count--;
if (ap_dev->queue_count > 0)
mod_timer(&ap_dev->timeout,
jiffies + ap_dev->drv->request_timeout);
list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
if (ap_msg->psmid != ap_dev->reply->psmid)
continue;
list_del_init(&ap_msg->list);
ap_dev->pendingq_count--;
ap_msg->receive(ap_dev, ap_msg, ap_dev->reply);
break;
}
case AP_RESPONSE_NO_PENDING_REPLY:
if (!status.queue_empty || ap_dev->queue_count <= 0)
break;
/* The card shouldn't forget requests but who knows. */
atomic_sub(ap_dev->queue_count, &ap_poll_requests);
ap_dev->queue_count = 0;
list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
ap_dev->requestq_count += ap_dev->pendingq_count;
ap_dev->pendingq_count = 0;
break;
default:
break;
}
return status;
}
/**
* ap_sm_read(): Receive pending reply messages from an AP device.
* @ap_dev: pointer to the AP device
*
* Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
*/
static enum ap_wait ap_sm_read(struct ap_device *ap_dev)
{
struct ap_queue_status status;
if (!ap_dev->reply)
return AP_WAIT_NONE;
status = ap_sm_recv(ap_dev);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
if (ap_dev->queue_count > 0) {
ap_dev->state = AP_STATE_WORKING;
return AP_WAIT_AGAIN;
}
ap_dev->state = AP_STATE_IDLE;
return AP_WAIT_NONE;
case AP_RESPONSE_NO_PENDING_REPLY:
if (ap_dev->queue_count > 0)
return AP_WAIT_INTERRUPT;
ap_dev->state = AP_STATE_IDLE;
return AP_WAIT_NONE;
default:
ap_dev->state = AP_STATE_BORKED;
return AP_WAIT_NONE;
}
}
/**
* ap_sm_suspend_read(): Receive pending reply messages from an AP device
* without changing the device state in between. In suspend mode we don't
* allow sending new requests, therefore just fetch pending replies.
* @ap_dev: pointer to the AP device
*
* Returns AP_WAIT_NONE or AP_WAIT_AGAIN
*/
static enum ap_wait ap_sm_suspend_read(struct ap_device *ap_dev)
{
struct ap_queue_status status;
if (!ap_dev->reply)
return AP_WAIT_NONE;
status = ap_sm_recv(ap_dev);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
if (ap_dev->queue_count > 0)
return AP_WAIT_AGAIN;
/* fall through */
default:
return AP_WAIT_NONE;
}
}
/**
* ap_sm_write(): Send messages from the request queue to an AP device.
* @ap_dev: pointer to the AP device
*
* Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
*/
static enum ap_wait ap_sm_write(struct ap_device *ap_dev)
{
struct ap_queue_status status;
struct ap_message *ap_msg;
if (ap_dev->requestq_count <= 0)
return AP_WAIT_NONE;
/* Start the next request on the queue. */
ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
status = __ap_send(ap_dev->qid, ap_msg->psmid,
ap_msg->message, ap_msg->length, ap_msg->special);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
atomic_inc(&ap_poll_requests);
ap_dev->queue_count++;
if (ap_dev->queue_count == 1)
mod_timer(&ap_dev->timeout,
jiffies + ap_dev->drv->request_timeout);
list_move_tail(&ap_msg->list, &ap_dev->pendingq);
ap_dev->requestq_count--;
ap_dev->pendingq_count++;
if (ap_dev->queue_count < ap_dev->queue_depth) {
ap_dev->state = AP_STATE_WORKING;
return AP_WAIT_AGAIN;
}
/* fall through */
case AP_RESPONSE_Q_FULL:
ap_dev->state = AP_STATE_QUEUE_FULL;
return AP_WAIT_INTERRUPT;
case AP_RESPONSE_RESET_IN_PROGRESS:
ap_dev->state = AP_STATE_RESET_WAIT;
return AP_WAIT_TIMEOUT;
case AP_RESPONSE_MESSAGE_TOO_BIG:
case AP_RESPONSE_REQ_FAC_NOT_INST:
list_del_init(&ap_msg->list);
ap_dev->requestq_count--;
ap_msg->rc = -EINVAL;
ap_msg->receive(ap_dev, ap_msg, NULL);
return AP_WAIT_AGAIN;
default:
ap_dev->state = AP_STATE_BORKED;
return AP_WAIT_NONE;
}
}
/**
* ap_sm_read_write(): Send and receive messages to/from an AP device.
* @ap_dev: pointer to the AP device
*
* Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
*/
static enum ap_wait ap_sm_read_write(struct ap_device *ap_dev)
{
return min(ap_sm_read(ap_dev), ap_sm_write(ap_dev));
}
/**
* ap_sm_reset(): Reset an AP queue.
* @qid: The AP queue number
*
* Submit the Reset command to an AP queue.
*/
static enum ap_wait ap_sm_reset(struct ap_device *ap_dev)
{
struct ap_queue_status status;
status = ap_reset_queue(ap_dev->qid);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_RESET_IN_PROGRESS:
ap_dev->state = AP_STATE_RESET_WAIT;
ap_dev->interrupt = AP_INTR_DISABLED;
return AP_WAIT_TIMEOUT;
case AP_RESPONSE_BUSY:
return AP_WAIT_TIMEOUT;
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
default:
ap_dev->state = AP_STATE_BORKED;
return AP_WAIT_NONE;
}
}
/**
* ap_sm_reset_wait(): Test queue for completion of the reset operation
* @ap_dev: pointer to the AP device
*
* Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
*/
static enum ap_wait ap_sm_reset_wait(struct ap_device *ap_dev)
{
struct ap_queue_status status;
unsigned long info;
if (ap_dev->queue_count > 0 && ap_dev->reply)
/* Try to read a completed message and get the status */
status = ap_sm_recv(ap_dev);
else
/* Get the status with TAPQ */
status = ap_test_queue(ap_dev->qid, &info);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
if (ap_using_interrupts() &&
ap_queue_enable_interruption(ap_dev,
ap_airq.lsi_ptr) == 0)
ap_dev->state = AP_STATE_SETIRQ_WAIT;
else
ap_dev->state = (ap_dev->queue_count > 0) ?
AP_STATE_WORKING : AP_STATE_IDLE;
return AP_WAIT_AGAIN;
case AP_RESPONSE_BUSY:
case AP_RESPONSE_RESET_IN_PROGRESS:
return AP_WAIT_TIMEOUT;
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
default:
ap_dev->state = AP_STATE_BORKED;
return AP_WAIT_NONE;
}
}
/**
* ap_sm_setirq_wait(): Test queue for completion of the irq enablement
* @ap_dev: pointer to the AP device
*
* Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
*/
static enum ap_wait ap_sm_setirq_wait(struct ap_device *ap_dev)
{
struct ap_queue_status status;
unsigned long info;
if (ap_dev->queue_count > 0 && ap_dev->reply)
/* Try to read a completed message and get the status */
status = ap_sm_recv(ap_dev);
else
/* Get the status with TAPQ */
status = ap_test_queue(ap_dev->qid, &info);
if (status.int_enabled == 1) {
/* Irqs are now enabled */
ap_dev->interrupt = AP_INTR_ENABLED;
ap_dev->state = (ap_dev->queue_count > 0) ?
AP_STATE_WORKING : AP_STATE_IDLE;
}
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
if (ap_dev->queue_count > 0)
return AP_WAIT_AGAIN;
/* fallthrough */
case AP_RESPONSE_NO_PENDING_REPLY:
return AP_WAIT_TIMEOUT;
default:
ap_dev->state = AP_STATE_BORKED;
return AP_WAIT_NONE;
}
}
/*
* AP state machine jump table
*/
static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
[AP_STATE_RESET_START] = {
[AP_EVENT_POLL] = ap_sm_reset,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
[AP_STATE_RESET_WAIT] = {
[AP_EVENT_POLL] = ap_sm_reset_wait,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
[AP_STATE_SETIRQ_WAIT] = {
[AP_EVENT_POLL] = ap_sm_setirq_wait,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
[AP_STATE_IDLE] = {
[AP_EVENT_POLL] = ap_sm_write,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
[AP_STATE_WORKING] = {
[AP_EVENT_POLL] = ap_sm_read_write,
[AP_EVENT_TIMEOUT] = ap_sm_reset,
},
[AP_STATE_QUEUE_FULL] = {
[AP_EVENT_POLL] = ap_sm_read,
[AP_EVENT_TIMEOUT] = ap_sm_reset,
},
[AP_STATE_SUSPEND_WAIT] = {
[AP_EVENT_POLL] = ap_sm_suspend_read,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
[AP_STATE_BORKED] = {
[AP_EVENT_POLL] = ap_sm_nop,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
};
static inline enum ap_wait ap_sm_event(struct ap_device *ap_dev,
enum ap_event event)
{
return ap_jumptable[ap_dev->state][event](ap_dev);
}
static inline enum ap_wait ap_sm_event_loop(struct ap_device *ap_dev,
enum ap_event event)
{
enum ap_wait wait;
while ((wait = ap_sm_event(ap_dev, event)) == AP_WAIT_AGAIN)
;
return wait;
}
/** /**
* ap_request_timeout(): Handling of request timeouts * ap_request_timeout(): Handling of request timeouts
* @data: Holds the AP device. * @data: Holds the AP device.
* *
* Handles request timeouts. * Handles request timeouts.
*/ */
static void ap_request_timeout(unsigned long data) void ap_request_timeout(unsigned long data)
{ {
struct ap_device *ap_dev = (struct ap_device *) data; struct ap_queue *aq = (struct ap_queue *) data;
if (ap_suspend_flag) if (ap_suspend_flag)
return; return;
spin_lock_bh(&ap_dev->lock); spin_lock_bh(&aq->lock);
ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_TIMEOUT)); ap_wait(ap_sm_event(aq, AP_EVENT_TIMEOUT));
spin_unlock_bh(&ap_dev->lock); spin_unlock_bh(&aq->lock);
} }
/** /**
...@@ -937,7 +394,8 @@ static void ap_interrupt_handler(struct airq_struct *airq) ...@@ -937,7 +394,8 @@ static void ap_interrupt_handler(struct airq_struct *airq)
*/ */
static void ap_tasklet_fn(unsigned long dummy) static void ap_tasklet_fn(unsigned long dummy)
{ {
struct ap_device *ap_dev; struct ap_card *ac;
struct ap_queue *aq;
enum ap_wait wait = AP_WAIT_NONE; enum ap_wait wait = AP_WAIT_NONE;
/* Reset the indicator if interrupts are used. Thus new interrupts can /* Reset the indicator if interrupts are used. Thus new interrupts can
...@@ -947,14 +405,35 @@ static void ap_tasklet_fn(unsigned long dummy) ...@@ -947,14 +405,35 @@ static void ap_tasklet_fn(unsigned long dummy)
if (ap_using_interrupts()) if (ap_using_interrupts())
xchg(ap_airq.lsi_ptr, 0); xchg(ap_airq.lsi_ptr, 0);
spin_lock(&ap_device_list_lock); spin_lock_bh(&ap_list_lock);
list_for_each_entry(ap_dev, &ap_device_list, list) { for_each_ap_card(ac) {
spin_lock_bh(&ap_dev->lock); for_each_ap_queue(aq, ac) {
wait = min(wait, ap_sm_event_loop(ap_dev, AP_EVENT_POLL)); spin_lock_bh(&aq->lock);
spin_unlock_bh(&ap_dev->lock); wait = min(wait, ap_sm_event_loop(aq, AP_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
} }
spin_unlock(&ap_device_list_lock); spin_unlock_bh(&ap_list_lock);
ap_sm_wait(wait);
ap_wait(wait);
}
static int ap_pending_requests(void)
{
struct ap_card *ac;
struct ap_queue *aq;
spin_lock_bh(&ap_list_lock);
for_each_ap_card(ac) {
for_each_ap_queue(aq, ac) {
if (aq->queue_count == 0)
continue;
spin_unlock_bh(&ap_list_lock);
return 1;
}
}
spin_unlock_bh(&ap_list_lock);
return 0;
} }
/** /**
...@@ -976,8 +455,7 @@ static int ap_poll_thread(void *data) ...@@ -976,8 +455,7 @@ static int ap_poll_thread(void *data)
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
add_wait_queue(&ap_poll_wait, &wait); add_wait_queue(&ap_poll_wait, &wait);
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
if (ap_suspend_flag || if (ap_suspend_flag || !ap_pending_requests()) {
atomic_read(&ap_poll_requests) <= 0) {
schedule(); schedule();
try_to_freeze(); try_to_freeze();
} }
...@@ -989,7 +467,8 @@ static int ap_poll_thread(void *data) ...@@ -989,7 +467,8 @@ static int ap_poll_thread(void *data)
continue; continue;
} }
ap_tasklet_fn(0); ap_tasklet_fn(0);
} while (!kthread_should_stop()); }
return 0; return 0;
} }
...@@ -1018,207 +497,8 @@ static void ap_poll_thread_stop(void) ...@@ -1018,207 +497,8 @@ static void ap_poll_thread_stop(void)
mutex_unlock(&ap_poll_thread_mutex); mutex_unlock(&ap_poll_thread_mutex);
} }
/** #define is_card_dev(x) ((x)->parent == ap_root_device)
* ap_queue_message(): Queue a request to an AP device. #define is_queue_dev(x) ((x)->parent != ap_root_device)
* @ap_dev: The AP device to queue the message to
* @ap_msg: The message that is to be added
*/
void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
{
/* For asynchronous message handling a valid receive-callback
* is required. */
BUG_ON(!ap_msg->receive);
spin_lock_bh(&ap_dev->lock);
/* Queue the message. */
list_add_tail(&ap_msg->list, &ap_dev->requestq);
ap_dev->requestq_count++;
ap_dev->total_request_count++;
/* Send/receive as many request from the queue as possible. */
ap_sm_wait(ap_sm_event_loop(ap_dev, AP_EVENT_POLL));
spin_unlock_bh(&ap_dev->lock);
}
EXPORT_SYMBOL(ap_queue_message);
/**
* ap_cancel_message(): Cancel a crypto request.
* @ap_dev: The AP device that has the message queued
* @ap_msg: The message that is to be removed
*
* Cancel a crypto request. This is done by removing the request
* from the device pending or request queue. Note that the
* request stays on the AP queue. When it finishes the message
* reply will be discarded because the psmid can't be found.
*/
void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
{
struct ap_message *tmp;
spin_lock_bh(&ap_dev->lock);
if (!list_empty(&ap_msg->list)) {
list_for_each_entry(tmp, &ap_dev->pendingq, list)
if (tmp->psmid == ap_msg->psmid) {
ap_dev->pendingq_count--;
goto found;
}
ap_dev->requestq_count--;
found:
list_del_init(&ap_msg->list);
}
spin_unlock_bh(&ap_dev->lock);
}
EXPORT_SYMBOL(ap_cancel_message);
/*
* AP device related attributes.
*/
static ssize_t ap_hwtype_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_device *ap_dev = to_ap_dev(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
}
static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
static ssize_t ap_raw_hwtype_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_device *ap_dev = to_ap_dev(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->raw_hwtype);
}
static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL);
static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct ap_device *ap_dev = to_ap_dev(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
}
static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
static ssize_t ap_request_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ap_device *ap_dev = to_ap_dev(dev);
int rc;
spin_lock_bh(&ap_dev->lock);
rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
spin_unlock_bh(&ap_dev->lock);
return rc;
}
static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
static ssize_t ap_requestq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_device *ap_dev = to_ap_dev(dev);
int rc;
spin_lock_bh(&ap_dev->lock);
rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->requestq_count);
spin_unlock_bh(&ap_dev->lock);
return rc;
}
static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
static ssize_t ap_pendingq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_device *ap_dev = to_ap_dev(dev);
int rc;
spin_lock_bh(&ap_dev->lock);
rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->pendingq_count);
spin_unlock_bh(&ap_dev->lock);
return rc;
}
static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
static ssize_t ap_reset_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_device *ap_dev = to_ap_dev(dev);
int rc = 0;
spin_lock_bh(&ap_dev->lock);
switch (ap_dev->state) {
case AP_STATE_RESET_START:
case AP_STATE_RESET_WAIT:
rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n");
break;
case AP_STATE_WORKING:
case AP_STATE_QUEUE_FULL:
rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
break;
default:
rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
}
spin_unlock_bh(&ap_dev->lock);
return rc;
}
static DEVICE_ATTR(reset, 0444, ap_reset_show, NULL);
static ssize_t ap_interrupt_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_device *ap_dev = to_ap_dev(dev);
int rc = 0;
spin_lock_bh(&ap_dev->lock);
if (ap_dev->state == AP_STATE_SETIRQ_WAIT)
rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
else if (ap_dev->interrupt == AP_INTR_ENABLED)
rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
else
rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
spin_unlock_bh(&ap_dev->lock);
return rc;
}
static DEVICE_ATTR(interrupt, 0444, ap_interrupt_show, NULL);
static ssize_t ap_modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type);
}
static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
static ssize_t ap_functions_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_device *ap_dev = to_ap_dev(dev);
return snprintf(buf, PAGE_SIZE, "0x%08X\n", ap_dev->functions);
}
static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
static struct attribute *ap_dev_attrs[] = {
&dev_attr_hwtype.attr,
&dev_attr_raw_hwtype.attr,
&dev_attr_depth.attr,
&dev_attr_request_count.attr,
&dev_attr_requestq_count.attr,
&dev_attr_pendingq_count.attr,
&dev_attr_reset.attr,
&dev_attr_interrupt.attr,
&dev_attr_modalias.attr,
&dev_attr_ap_functions.attr,
NULL
};
static struct attribute_group ap_dev_attr_group = {
.attrs = ap_dev_attrs
};
/** /**
* ap_bus_match() * ap_bus_match()
...@@ -1229,7 +509,6 @@ static struct attribute_group ap_dev_attr_group = { ...@@ -1229,7 +509,6 @@ static struct attribute_group ap_dev_attr_group = {
*/ */
static int ap_bus_match(struct device *dev, struct device_driver *drv) static int ap_bus_match(struct device *dev, struct device_driver *drv)
{ {
struct ap_device *ap_dev = to_ap_dev(dev);
struct ap_driver *ap_drv = to_ap_drv(drv); struct ap_driver *ap_drv = to_ap_drv(drv);
struct ap_device_id *id; struct ap_device_id *id;
...@@ -1238,10 +517,14 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv) ...@@ -1238,10 +517,14 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv)
* supported types of the device_driver. * supported types of the device_driver.
*/ */
for (id = ap_drv->ids; id->match_flags; id++) { for (id = ap_drv->ids; id->match_flags; id++) {
if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) && if (is_card_dev(dev) &&
(id->dev_type != ap_dev->device_type)) id->match_flags & AP_DEVICE_ID_MATCH_CARD_TYPE &&
continue; id->dev_type == to_ap_dev(dev)->device_type)
return 1; return 1;
if (is_queue_dev(dev) &&
id->match_flags & AP_DEVICE_ID_MATCH_QUEUE_TYPE &&
id->dev_type == to_ap_dev(dev)->device_type)
return 1;
} }
return 0; return 0;
} }
...@@ -1277,18 +560,24 @@ static int ap_dev_suspend(struct device *dev) ...@@ -1277,18 +560,24 @@ static int ap_dev_suspend(struct device *dev)
{ {
struct ap_device *ap_dev = to_ap_dev(dev); struct ap_device *ap_dev = to_ap_dev(dev);
/* Poll on the device until all requests are finished. */ if (ap_dev->drv && ap_dev->drv->suspend)
spin_lock_bh(&ap_dev->lock); ap_dev->drv->suspend(ap_dev);
ap_dev->state = AP_STATE_SUSPEND_WAIT; return 0;
while (ap_sm_event(ap_dev, AP_EVENT_POLL) != AP_WAIT_NONE) }
;
ap_dev->state = AP_STATE_BORKED; static int ap_dev_resume(struct device *dev)
spin_unlock_bh(&ap_dev->lock); {
struct ap_device *ap_dev = to_ap_dev(dev);
if (ap_dev->drv && ap_dev->drv->resume)
ap_dev->drv->resume(ap_dev);
return 0; return 0;
} }
static void ap_bus_suspend(void) static void ap_bus_suspend(void)
{ {
AP_DBF(DBF_DEBUG, "ap_bus_suspend running\n");
ap_suspend_flag = 1; ap_suspend_flag = 1;
/* /*
* Disable scanning for devices, thus we do not want to scan * Disable scanning for devices, thus we do not want to scan
...@@ -1298,9 +587,25 @@ static void ap_bus_suspend(void) ...@@ -1298,9 +587,25 @@ static void ap_bus_suspend(void)
tasklet_disable(&ap_tasklet); tasklet_disable(&ap_tasklet);
} }
static int __ap_devices_unregister(struct device *dev, void *dummy) static int __ap_card_devices_unregister(struct device *dev, void *dummy)
{
if (is_card_dev(dev))
device_unregister(dev);
return 0;
}
static int __ap_queue_devices_unregister(struct device *dev, void *dummy)
{ {
device_unregister(dev); if (is_queue_dev(dev))
device_unregister(dev);
return 0;
}
static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
{
if (is_queue_dev(dev) &&
AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long) data)
device_unregister(dev);
return 0; return 0;
} }
...@@ -1308,8 +613,15 @@ static void ap_bus_resume(void) ...@@ -1308,8 +613,15 @@ static void ap_bus_resume(void)
{ {
int rc; int rc;
/* Unconditionally remove all AP devices */ AP_DBF(DBF_DEBUG, "ap_bus_resume running\n");
bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_devices_unregister);
/* remove all queue devices */
bus_for_each_dev(&ap_bus_type, NULL, NULL,
__ap_queue_devices_unregister);
/* remove all card devices */
bus_for_each_dev(&ap_bus_type, NULL, NULL,
__ap_card_devices_unregister);
/* Reset thin interrupt setting */ /* Reset thin interrupt setting */
if (ap_interrupts_available() && !ap_using_interrupts()) { if (ap_interrupts_available() && !ap_using_interrupts()) {
rc = register_adapter_interrupt(&ap_airq); rc = register_adapter_interrupt(&ap_airq);
...@@ -1351,7 +663,7 @@ static struct notifier_block ap_power_notifier = { ...@@ -1351,7 +663,7 @@ static struct notifier_block ap_power_notifier = {
.notifier_call = ap_power_event, .notifier_call = ap_power_event,
}; };
static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, NULL); static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, ap_dev_resume);
static struct bus_type ap_bus_type = { static struct bus_type ap_bus_type = {
.name = "ap", .name = "ap",
...@@ -1360,17 +672,6 @@ static struct bus_type ap_bus_type = { ...@@ -1360,17 +672,6 @@ static struct bus_type ap_bus_type = {
.pm = &ap_bus_pm_ops, .pm = &ap_bus_pm_ops,
}; };
void ap_device_init_reply(struct ap_device *ap_dev,
struct ap_message *reply)
{
ap_dev->reply = reply;
spin_lock_bh(&ap_dev->lock);
ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_POLL));
spin_unlock_bh(&ap_dev->lock);
}
EXPORT_SYMBOL(ap_device_init_reply);
static int ap_device_probe(struct device *dev) static int ap_device_probe(struct device *dev)
{ {
struct ap_device *ap_dev = to_ap_dev(dev); struct ap_device *ap_dev = to_ap_dev(dev);
...@@ -1384,61 +685,22 @@ static int ap_device_probe(struct device *dev) ...@@ -1384,61 +685,22 @@ static int ap_device_probe(struct device *dev)
return rc; return rc;
} }
/**
* __ap_flush_queue(): Flush requests.
* @ap_dev: Pointer to the AP device
*
* Flush all requests from the request/pending queue of an AP device.
*/
static void __ap_flush_queue(struct ap_device *ap_dev)
{
struct ap_message *ap_msg, *next;
list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
list_del_init(&ap_msg->list);
ap_dev->pendingq_count--;
ap_msg->rc = -EAGAIN;
ap_msg->receive(ap_dev, ap_msg, NULL);
}
list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
list_del_init(&ap_msg->list);
ap_dev->requestq_count--;
ap_msg->rc = -EAGAIN;
ap_msg->receive(ap_dev, ap_msg, NULL);
}
}
void ap_flush_queue(struct ap_device *ap_dev)
{
spin_lock_bh(&ap_dev->lock);
__ap_flush_queue(ap_dev);
spin_unlock_bh(&ap_dev->lock);
}
EXPORT_SYMBOL(ap_flush_queue);
static int ap_device_remove(struct device *dev) static int ap_device_remove(struct device *dev)
{ {
struct ap_device *ap_dev = to_ap_dev(dev); struct ap_device *ap_dev = to_ap_dev(dev);
struct ap_driver *ap_drv = ap_dev->drv; struct ap_driver *ap_drv = ap_dev->drv;
ap_flush_queue(ap_dev); spin_lock_bh(&ap_list_lock);
del_timer_sync(&ap_dev->timeout); if (is_card_dev(dev))
spin_lock_bh(&ap_device_list_lock); list_del_init(&to_ap_card(dev)->list);
list_del_init(&ap_dev->list); else
spin_unlock_bh(&ap_device_list_lock); list_del_init(&to_ap_queue(dev)->list);
spin_unlock_bh(&ap_list_lock);
if (ap_drv->remove) if (ap_drv->remove)
ap_drv->remove(ap_dev); ap_drv->remove(ap_dev);
spin_lock_bh(&ap_dev->lock);
atomic_sub(ap_dev->queue_count, &ap_poll_requests);
spin_unlock_bh(&ap_dev->lock);
return 0; return 0;
} }
static void ap_device_release(struct device *dev)
{
kfree(to_ap_dev(dev));
}
int ap_driver_register(struct ap_driver *ap_drv, struct module *owner, int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
char *name) char *name)
{ {
...@@ -1481,18 +743,30 @@ static ssize_t ap_domain_show(struct bus_type *bus, char *buf) ...@@ -1481,18 +743,30 @@ static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index); return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
} }
static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL); static ssize_t ap_domain_store(struct bus_type *bus,
const char *buf, size_t count)
{
int domain;
if (sscanf(buf, "%i\n", &domain) != 1 ||
domain < 0 || domain > ap_max_domain_id)
return -EINVAL;
spin_lock_bh(&ap_domain_lock);
ap_domain_index = domain;
spin_unlock_bh(&ap_domain_lock);
AP_DBF(DBF_DEBUG, "store new default domain=%d\n", domain);
return count;
}
static BUS_ATTR(ap_domain, 0644, ap_domain_show, ap_domain_store);
static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf) static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
{ {
if (!ap_configuration) /* QCI not supported */ if (!ap_configuration) /* QCI not supported */
return snprintf(buf, PAGE_SIZE, "not supported\n"); return snprintf(buf, PAGE_SIZE, "not supported\n");
if (!test_facility(76))
/* format 0 - 16 bit domain field */
return snprintf(buf, PAGE_SIZE, "%08x%08x\n",
ap_configuration->adm[0],
ap_configuration->adm[1]);
/* format 1 - 256 bit domain field */
return snprintf(buf, PAGE_SIZE, return snprintf(buf, PAGE_SIZE,
"0x%08x%08x%08x%08x%08x%08x%08x%08x\n", "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
ap_configuration->adm[0], ap_configuration->adm[1], ap_configuration->adm[0], ap_configuration->adm[1],
...@@ -1504,6 +778,22 @@ static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf) ...@@ -1504,6 +778,22 @@ static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
static BUS_ATTR(ap_control_domain_mask, 0444, static BUS_ATTR(ap_control_domain_mask, 0444,
ap_control_domain_mask_show, NULL); ap_control_domain_mask_show, NULL);
static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf)
{
if (!ap_configuration) /* QCI not supported */
return snprintf(buf, PAGE_SIZE, "not supported\n");
return snprintf(buf, PAGE_SIZE,
"0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
ap_configuration->aqm[0], ap_configuration->aqm[1],
ap_configuration->aqm[2], ap_configuration->aqm[3],
ap_configuration->aqm[4], ap_configuration->aqm[5],
ap_configuration->aqm[6], ap_configuration->aqm[7]);
}
static BUS_ATTR(ap_usage_domain_mask, 0444,
ap_usage_domain_mask_show, NULL);
static ssize_t ap_config_time_show(struct bus_type *bus, char *buf) static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
{ {
return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
...@@ -1599,6 +889,7 @@ static BUS_ATTR(ap_max_domain_id, 0444, ap_max_domain_id_show, NULL); ...@@ -1599,6 +889,7 @@ static BUS_ATTR(ap_max_domain_id, 0444, ap_max_domain_id_show, NULL);
static struct bus_attribute *const ap_bus_attrs[] = { static struct bus_attribute *const ap_bus_attrs[] = {
&bus_attr_ap_domain, &bus_attr_ap_domain,
&bus_attr_ap_control_domain_mask, &bus_attr_ap_control_domain_mask,
&bus_attr_ap_usage_domain_mask,
&bus_attr_config_time, &bus_attr_config_time,
&bus_attr_poll_thread, &bus_attr_poll_thread,
&bus_attr_ap_interrupts, &bus_attr_ap_interrupts,
...@@ -1623,9 +914,12 @@ static int ap_select_domain(void) ...@@ -1623,9 +914,12 @@ static int ap_select_domain(void)
* the "domain=" parameter or the domain with the maximum number * the "domain=" parameter or the domain with the maximum number
* of devices. * of devices.
*/ */
if (ap_domain_index >= 0) spin_lock_bh(&ap_domain_lock);
if (ap_domain_index >= 0) {
/* Domain has already been selected. */ /* Domain has already been selected. */
spin_unlock_bh(&ap_domain_lock);
return 0; return 0;
}
best_domain = -1; best_domain = -1;
max_count = 0; max_count = 0;
for (i = 0; i < AP_DOMAINS; i++) { for (i = 0; i < AP_DOMAINS; i++) {
...@@ -1647,109 +941,171 @@ static int ap_select_domain(void) ...@@ -1647,109 +941,171 @@ static int ap_select_domain(void)
} }
if (best_domain >= 0){ if (best_domain >= 0){
ap_domain_index = best_domain; ap_domain_index = best_domain;
spin_unlock_bh(&ap_domain_lock);
return 0; return 0;
} }
spin_unlock_bh(&ap_domain_lock);
return -ENODEV; return -ENODEV;
} }
/** /*
* __ap_scan_bus(): Scan the AP bus. * helper function to be used with bus_find_dev
* @dev: Pointer to device * matches for the card device with the given id
* @data: Pointer to data
*
* Scan the AP bus for new devices.
*/ */
static int __ap_scan_bus(struct device *dev, void *data) static int __match_card_device_with_id(struct device *dev, void *data)
{ {
return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data; return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long) data;
} }
/* helper function to be used with bus_find_dev
* matches for the queue device with a given qid
*/
static int __match_queue_device_with_qid(struct device *dev, void *data)
{
return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long) data;
}
/**
* ap_scan_bus(): Scan the AP bus for new devices
* Runs periodically, workqueue timer (ap_config_time)
*/
static void ap_scan_bus(struct work_struct *unused) static void ap_scan_bus(struct work_struct *unused)
{ {
struct ap_device *ap_dev; struct ap_queue *aq;
struct ap_card *ac;
struct device *dev; struct device *dev;
ap_qid_t qid; ap_qid_t qid;
int queue_depth = 0, device_type = 0; int depth = 0, type = 0;
unsigned int device_functions = 0; unsigned int functions = 0;
int rc, i, borked; int rc, id, dom, borked, domains;
AP_DBF(DBF_DEBUG, "ap_scan_bus running\n");
ap_query_configuration(); ap_query_configuration();
if (ap_select_domain() != 0) if (ap_select_domain() != 0)
goto out; goto out;
for (i = 0; i < AP_DEVICES; i++) { for (id = 0; id < AP_DEVICES; id++) {
qid = AP_MKQID(i, ap_domain_index); /* check if device is registered */
dev = bus_find_device(&ap_bus_type, NULL, dev = bus_find_device(&ap_bus_type, NULL,
(void *)(unsigned long)qid, (void *)(long) id,
__ap_scan_bus); __match_card_device_with_id);
rc = ap_query_queue(qid, &queue_depth, &device_type, ac = dev ? to_ap_card(dev) : NULL;
&device_functions); if (!ap_test_config_card_id(id)) {
if (dev) { if (dev) {
ap_dev = to_ap_dev(dev); /* Card device has been removed from
spin_lock_bh(&ap_dev->lock); * configuration, remove the belonging
if (rc == -ENODEV) * queue devices.
ap_dev->state = AP_STATE_BORKED; */
borked = ap_dev->state == AP_STATE_BORKED; bus_for_each_dev(&ap_bus_type, NULL,
spin_unlock_bh(&ap_dev->lock); (void *)(long) id,
if (borked) /* Remove broken device */ __ap_queue_devices_with_id_unregister);
/* now remove the card device */
device_unregister(dev); device_unregister(dev);
put_device(dev); put_device(dev);
if (!borked) }
continue;
}
if (rc)
continue;
ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
if (!ap_dev)
break;
ap_dev->qid = qid;
ap_dev->state = AP_STATE_RESET_START;
ap_dev->interrupt = AP_INTR_DISABLED;
ap_dev->queue_depth = queue_depth;
ap_dev->raw_hwtype = device_type;
ap_dev->device_type = device_type;
ap_dev->functions = device_functions;
spin_lock_init(&ap_dev->lock);
INIT_LIST_HEAD(&ap_dev->pendingq);
INIT_LIST_HEAD(&ap_dev->requestq);
INIT_LIST_HEAD(&ap_dev->list);
setup_timer(&ap_dev->timeout, ap_request_timeout,
(unsigned long) ap_dev);
ap_dev->device.bus = &ap_bus_type;
ap_dev->device.parent = ap_root_device;
rc = dev_set_name(&ap_dev->device, "card%02x",
AP_QID_DEVICE(ap_dev->qid));
if (rc) {
kfree(ap_dev);
continue;
}
/* Add to list of devices */
spin_lock_bh(&ap_device_list_lock);
list_add(&ap_dev->list, &ap_device_list);
spin_unlock_bh(&ap_device_list_lock);
/* Start with a device reset */
spin_lock_bh(&ap_dev->lock);
ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_POLL));
spin_unlock_bh(&ap_dev->lock);
/* Register device */
ap_dev->device.release = ap_device_release;
rc = device_register(&ap_dev->device);
if (rc) {
spin_lock_bh(&ap_dev->lock);
list_del_init(&ap_dev->list);
spin_unlock_bh(&ap_dev->lock);
put_device(&ap_dev->device);
continue; continue;
} }
/* Add device attributes. */ /* According to the configuration there should be a card
rc = sysfs_create_group(&ap_dev->device.kobj, * device, so check if there is at least one valid queue
&ap_dev_attr_group); * and maybe create queue devices and the card device.
if (rc) { */
device_unregister(&ap_dev->device); domains = 0;
continue; for (dom = 0; dom < AP_DOMAINS; dom++) {
qid = AP_MKQID(id, dom);
dev = bus_find_device(&ap_bus_type, NULL,
(void *)(long) qid,
__match_queue_device_with_qid);
aq = dev ? to_ap_queue(dev) : NULL;
if (!ap_test_config_domain(dom)) {
if (dev) {
/* Queue device exists but has been
* removed from configuration.
*/
device_unregister(dev);
put_device(dev);
}
continue;
}
rc = ap_query_queue(qid, &depth, &type, &functions);
if (dev) {
spin_lock_bh(&aq->lock);
if (rc == -ENODEV ||
/* adapter reconfiguration */
(ac && ac->functions != functions))
aq->state = AP_STATE_BORKED;
borked = aq->state == AP_STATE_BORKED;
spin_unlock_bh(&aq->lock);
if (borked) /* Remove broken device */
device_unregister(dev);
put_device(dev);
if (!borked) {
domains++;
continue;
}
}
if (rc)
continue;
/* new queue device needed */
if (!ac) {
/* but first create the card device */
ac = ap_card_create(id, depth,
type, functions);
if (!ac)
continue;
ac->ap_dev.device.bus = &ap_bus_type;
ac->ap_dev.device.parent = ap_root_device;
dev_set_name(&ac->ap_dev.device,
"card%02x", id);
/* Register card with AP bus */
rc = device_register(&ac->ap_dev.device);
if (rc) {
put_device(&ac->ap_dev.device);
ac = NULL;
break;
}
/* get it and thus adjust reference counter */
get_device(&ac->ap_dev.device);
/* Add card device to card list */
spin_lock_bh(&ap_list_lock);
list_add(&ac->list, &ap_card_list);
spin_unlock_bh(&ap_list_lock);
}
/* now create the new queue device */
aq = ap_queue_create(qid, type);
if (!aq)
continue;
aq->card = ac;
aq->ap_dev.device.bus = &ap_bus_type;
aq->ap_dev.device.parent = &ac->ap_dev.device;
dev_set_name(&aq->ap_dev.device,
"%02x.%04x", id, dom);
/* Add queue device to card queue list */
spin_lock_bh(&ap_list_lock);
list_add(&aq->list, &ac->queues);
spin_unlock_bh(&ap_list_lock);
/* Start with a device reset */
spin_lock_bh(&aq->lock);
ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
spin_unlock_bh(&aq->lock);
/* Register device */
rc = device_register(&aq->ap_dev.device);
if (rc) {
spin_lock_bh(&ap_list_lock);
list_del_init(&aq->list);
spin_unlock_bh(&ap_list_lock);
put_device(&aq->ap_dev.device);
continue;
}
domains++;
} /* end domain loop */
if (ac) {
/* remove card dev if there are no queue devices */
if (!domains)
device_unregister(&ac->ap_dev.device);
put_device(&ac->ap_dev.device);
} }
} } /* end device loop */
out: out:
mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
} }
...@@ -1768,7 +1124,7 @@ static void ap_reset_domain(void) ...@@ -1768,7 +1124,7 @@ static void ap_reset_domain(void)
if (ap_domain_index == -1 || !ap_test_config_domain(ap_domain_index)) if (ap_domain_index == -1 || !ap_test_config_domain(ap_domain_index))
return; return;
for (i = 0; i < AP_DEVICES; i++) for (i = 0; i < AP_DEVICES; i++)
ap_reset_queue(AP_MKQID(i, ap_domain_index)); ap_rapq(AP_MKQID(i, ap_domain_index));
} }
static void ap_reset_all(void) static void ap_reset_all(void)
...@@ -1781,7 +1137,7 @@ static void ap_reset_all(void) ...@@ -1781,7 +1137,7 @@ static void ap_reset_all(void)
for (j = 0; j < AP_DEVICES; j++) { for (j = 0; j < AP_DEVICES; j++) {
if (!ap_test_config_card_id(j)) if (!ap_test_config_card_id(j))
continue; continue;
ap_reset_queue(AP_MKQID(j, i)); ap_rapq(AP_MKQID(j, i));
} }
} }
} }
...@@ -1790,6 +1146,23 @@ static struct reset_call ap_reset_call = { ...@@ -1790,6 +1146,23 @@ static struct reset_call ap_reset_call = {
.fn = ap_reset_all, .fn = ap_reset_all,
}; };
int __init ap_debug_init(void)
{
ap_dbf_root = debugfs_create_dir("ap", NULL);
ap_dbf_info = debug_register("ap", 1, 1,
DBF_MAX_SPRINTF_ARGS * sizeof(long));
debug_register_view(ap_dbf_info, &debug_sprintf_view);
debug_set_level(ap_dbf_info, DBF_ERR);
return 0;
}
void ap_debug_exit(void)
{
debugfs_remove(ap_dbf_root);
debug_unregister(ap_dbf_info);
}
/** /**
* ap_module_init(): The module initialization code. * ap_module_init(): The module initialization code.
* *
...@@ -1800,6 +1173,10 @@ int __init ap_module_init(void) ...@@ -1800,6 +1173,10 @@ int __init ap_module_init(void)
int max_domain_id; int max_domain_id;
int rc, i; int rc, i;
rc = ap_debug_init();
if (rc)
return rc;
if (ap_instructions_available() != 0) { if (ap_instructions_available() != 0) {
pr_warn("The hardware system does not support AP instructions\n"); pr_warn("The hardware system does not support AP instructions\n");
return -ENODEV; return -ENODEV;
...@@ -1909,7 +1286,15 @@ void ap_module_exit(void) ...@@ -1909,7 +1286,15 @@ void ap_module_exit(void)
del_timer_sync(&ap_config_timer); del_timer_sync(&ap_config_timer);
hrtimer_cancel(&ap_poll_timer); hrtimer_cancel(&ap_poll_timer);
tasklet_kill(&ap_tasklet); tasklet_kill(&ap_tasklet);
bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_devices_unregister);
/* first remove queue devices */
bus_for_each_dev(&ap_bus_type, NULL, NULL,
__ap_queue_devices_unregister);
/* now remove the card devices */
bus_for_each_dev(&ap_bus_type, NULL, NULL,
__ap_card_devices_unregister);
/* remove bus attributes */
for (i = 0; ap_bus_attrs[i]; i++) for (i = 0; ap_bus_attrs[i]; i++)
bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
unregister_pm_notifier(&ap_power_notifier); unregister_pm_notifier(&ap_power_notifier);
...@@ -1919,6 +1304,8 @@ void ap_module_exit(void) ...@@ -1919,6 +1304,8 @@ void ap_module_exit(void)
unregister_reset_call(&ap_reset_call); unregister_reset_call(&ap_reset_call);
if (ap_using_interrupts()) if (ap_using_interrupts())
unregister_adapter_interrupt(&ap_airq); unregister_adapter_interrupt(&ap_airq);
ap_debug_exit();
} }
module_init(ap_module_init); module_init(ap_module_init);
......
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#define _AP_BUS_H_ #define _AP_BUS_H_
#include <linux/device.h> #include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/types.h> #include <linux/types.h>
#define AP_DEVICES 64 /* Number of AP devices. */ #define AP_DEVICES 64 /* Number of AP devices. */
...@@ -38,14 +37,17 @@ ...@@ -38,14 +37,17 @@
extern int ap_domain_index; extern int ap_domain_index;
extern spinlock_t ap_list_lock;
extern struct list_head ap_card_list;
/** /**
* The ap_qid_t identifier of an ap queue. It contains a * The ap_qid_t identifier of an ap queue. It contains a
* 6 bit device index and a 4 bit queue index (domain). * 6 bit card index and a 4 bit queue index (domain).
*/ */
typedef unsigned int ap_qid_t; typedef unsigned int ap_qid_t;
#define AP_MKQID(_device, _queue) (((_device) & 63) << 8 | ((_queue) & 255)) #define AP_MKQID(_card, _queue) (((_card) & 63) << 8 | ((_queue) & 255))
#define AP_QID_DEVICE(_qid) (((_qid) >> 8) & 63) #define AP_QID_CARD(_qid) (((_qid) >> 8) & 63)
#define AP_QID_QUEUE(_qid) ((_qid) & 255) #define AP_QID_QUEUE(_qid) ((_qid) & 255)
/** /**
...@@ -55,7 +57,7 @@ typedef unsigned int ap_qid_t; ...@@ -55,7 +57,7 @@ typedef unsigned int ap_qid_t;
* @queue_full: Is 1 if the queue is full * @queue_full: Is 1 if the queue is full
* @pad: A 4 bit pad * @pad: A 4 bit pad
* @int_enabled: Shows if interrupts are enabled for the AP * @int_enabled: Shows if interrupts are enabled for the AP
* @response_conde: Holds the 8 bit response code * @response_code: Holds the 8 bit response code
* @pad2: A 16 bit pad * @pad2: A 16 bit pad
* *
* The ap queue status word is returned by all three AP functions * The ap queue status word is returned by all three AP functions
...@@ -105,6 +107,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr) ...@@ -105,6 +107,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
#define AP_DEVICE_TYPE_CEX3C 9 #define AP_DEVICE_TYPE_CEX3C 9
#define AP_DEVICE_TYPE_CEX4 10 #define AP_DEVICE_TYPE_CEX4 10
#define AP_DEVICE_TYPE_CEX5 11 #define AP_DEVICE_TYPE_CEX5 11
#define AP_DEVICE_TYPE_CEX6 12
/* /*
* Known function facilities * Known function facilities
...@@ -166,7 +169,8 @@ struct ap_driver { ...@@ -166,7 +169,8 @@ struct ap_driver {
int (*probe)(struct ap_device *); int (*probe)(struct ap_device *);
void (*remove)(struct ap_device *); void (*remove)(struct ap_device *);
int request_timeout; /* request timeout in jiffies */ void (*suspend)(struct ap_device *);
void (*resume)(struct ap_device *);
}; };
#define to_ap_drv(x) container_of((x), struct ap_driver, driver) #define to_ap_drv(x) container_of((x), struct ap_driver, driver)
...@@ -174,38 +178,51 @@ struct ap_driver { ...@@ -174,38 +178,51 @@ struct ap_driver {
int ap_driver_register(struct ap_driver *, struct module *, char *); int ap_driver_register(struct ap_driver *, struct module *, char *);
void ap_driver_unregister(struct ap_driver *); void ap_driver_unregister(struct ap_driver *);
typedef enum ap_wait (ap_func_t)(struct ap_device *ap_dev);
struct ap_device { struct ap_device {
struct device device; struct device device;
struct ap_driver *drv; /* Pointer to AP device driver. */ struct ap_driver *drv; /* Pointer to AP device driver. */
spinlock_t lock; /* Per device lock. */ int device_type; /* AP device type. */
struct list_head list; /* private list of all AP devices. */ };
enum ap_state state; /* State of the AP device. */ #define to_ap_dev(x) container_of((x), struct ap_device, device)
ap_qid_t qid; /* AP queue id. */ struct ap_card {
int queue_depth; /* AP queue depth.*/ struct ap_device ap_dev;
int device_type; /* AP device type. */ struct list_head list; /* Private list of AP cards. */
struct list_head queues; /* List of assoc. AP queues */
void *private; /* ap driver private pointer. */
int raw_hwtype; /* AP raw hardware type. */ int raw_hwtype; /* AP raw hardware type. */
unsigned int functions; /* AP device function bitfield. */ unsigned int functions; /* AP device function bitfield. */
struct timer_list timeout; /* Timer for request timeouts. */ int queue_depth; /* AP queue depth.*/
int id; /* AP card number. */
atomic_t total_request_count; /* # requests ever for this AP device.*/
};
#define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
struct ap_queue {
struct ap_device ap_dev;
struct list_head list; /* Private list of AP queues. */
struct ap_card *card; /* Ptr to assoc. AP card. */
spinlock_t lock; /* Per device lock. */
void *private; /* ap driver private pointer. */
ap_qid_t qid; /* AP queue id. */
int interrupt; /* indicate if interrupts are enabled */ int interrupt; /* indicate if interrupts are enabled */
int queue_count; /* # messages currently on AP queue. */ int queue_count; /* # messages currently on AP queue. */
enum ap_state state; /* State of the AP device. */
struct list_head pendingq; /* List of message sent to AP queue. */
int pendingq_count; /* # requests on pendingq list. */ int pendingq_count; /* # requests on pendingq list. */
struct list_head requestq; /* List of message yet to be sent. */
int requestq_count; /* # requests on requestq list. */ int requestq_count; /* # requests on requestq list. */
int total_request_count; /* # requests ever for this AP device. */ int total_request_count; /* # requests ever for this AP device.*/
int request_timeout; /* Request timout in jiffies. */
struct timer_list timeout; /* Timer for request timeouts. */
struct list_head pendingq; /* List of message sent to AP queue. */
struct list_head requestq; /* List of message yet to be sent. */
struct ap_message *reply; /* Per device reply message. */ struct ap_message *reply; /* Per device reply message. */
void *private; /* ap driver private pointer. */
}; };
#define to_ap_dev(x) container_of((x), struct ap_device, device) #define to_ap_queue(x) container_of((x), struct ap_queue, ap_dev.device)
typedef enum ap_wait (ap_func_t)(struct ap_queue *queue);
struct ap_message { struct ap_message {
struct list_head list; /* Request queueing. */ struct list_head list; /* Request queueing. */
...@@ -217,7 +234,7 @@ struct ap_message { ...@@ -217,7 +234,7 @@ struct ap_message {
void *private; /* ap driver private pointer. */ void *private; /* ap driver private pointer. */
unsigned int special:1; /* Used for special commands. */ unsigned int special:1; /* Used for special commands. */
/* receive is called from tasklet context */ /* receive is called from tasklet context */
void (*receive)(struct ap_device *, struct ap_message *, void (*receive)(struct ap_queue *, struct ap_message *,
struct ap_message *); struct ap_message *);
}; };
...@@ -232,10 +249,6 @@ struct ap_config_info { ...@@ -232,10 +249,6 @@ struct ap_config_info {
unsigned char reserved4[16]; unsigned char reserved4[16];
} __packed; } __packed;
#define AP_DEVICE(dt) \
.dev_type=(dt), \
.match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
/** /**
* ap_init_message() - Initialize ap_message. * ap_init_message() - Initialize ap_message.
* Initialize a message before using. Otherwise this might result in * Initialize a message before using. Otherwise this might result in
...@@ -250,6 +263,12 @@ static inline void ap_init_message(struct ap_message *ap_msg) ...@@ -250,6 +263,12 @@ static inline void ap_init_message(struct ap_message *ap_msg)
ap_msg->receive = NULL; ap_msg->receive = NULL;
} }
#define for_each_ap_card(_ac) \
list_for_each_entry(_ac, &ap_card_list, list)
#define for_each_ap_queue(_aq, _ac) \
list_for_each_entry(_aq, &(_ac)->queues, list)
/* /*
* Note: don't use ap_send/ap_recv after using ap_queue_message * Note: don't use ap_send/ap_recv after using ap_queue_message
* for the first time. Otherwise the ap message queue will get * for the first time. Otherwise the ap message queue will get
...@@ -258,11 +277,26 @@ static inline void ap_init_message(struct ap_message *ap_msg) ...@@ -258,11 +277,26 @@ static inline void ap_init_message(struct ap_message *ap_msg)
int ap_send(ap_qid_t, unsigned long long, void *, size_t); int ap_send(ap_qid_t, unsigned long long, void *, size_t);
int ap_recv(ap_qid_t, unsigned long long *, void *, size_t); int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg); enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event);
void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg); enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event);
void ap_flush_queue(struct ap_device *ap_dev);
void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg);
void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg);
void ap_flush_queue(struct ap_queue *aq);
void *ap_airq_ptr(void);
void ap_wait(enum ap_wait wait);
void ap_request_timeout(unsigned long data);
void ap_bus_force_rescan(void); void ap_bus_force_rescan(void);
void ap_device_init_reply(struct ap_device *ap_dev, struct ap_message *ap_msg);
void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
void ap_queue_remove(struct ap_queue *aq);
void ap_queue_suspend(struct ap_device *ap_dev);
void ap_queue_resume(struct ap_device *ap_dev);
struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
unsigned int device_functions);
int ap_module_init(void); int ap_module_init(void);
void ap_module_exit(void); void ap_module_exit(void);
......
/*
* Copyright IBM Corp. 2016
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*
* Adjunct processor bus, card related code.
*/
#define KMSG_COMPONENT "ap"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/facility.h>
#include "ap_bus.h"
#include "ap_asm.h"
/*
* AP card related attributes.
*/
static ssize_t ap_hwtype_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_card *ac = to_ap_card(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type);
}
static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
static ssize_t ap_raw_hwtype_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_card *ac = to_ap_card(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype);
}
static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL);
static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct ap_card *ac = to_ap_card(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth);
}
static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
static ssize_t ap_functions_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_card *ac = to_ap_card(dev);
return snprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions);
}
static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
static ssize_t ap_request_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ap_card *ac = to_ap_card(dev);
unsigned int req_cnt;
req_cnt = 0;
spin_lock_bh(&ap_list_lock);
req_cnt = atomic_read(&ac->total_request_count);
spin_unlock_bh(&ap_list_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
}
static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
static ssize_t ap_requestq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_card *ac = to_ap_card(dev);
struct ap_queue *aq;
unsigned int reqq_cnt;
reqq_cnt = 0;
spin_lock_bh(&ap_list_lock);
for_each_ap_queue(aq, ac)
reqq_cnt += aq->requestq_count;
spin_unlock_bh(&ap_list_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
}
static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
static ssize_t ap_pendingq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_card *ac = to_ap_card(dev);
struct ap_queue *aq;
unsigned int penq_cnt;
penq_cnt = 0;
spin_lock_bh(&ap_list_lock);
for_each_ap_queue(aq, ac)
penq_cnt += aq->pendingq_count;
spin_unlock_bh(&ap_list_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
}
static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
static ssize_t ap_modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type);
}
static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
static struct attribute *ap_card_dev_attrs[] = {
&dev_attr_hwtype.attr,
&dev_attr_raw_hwtype.attr,
&dev_attr_depth.attr,
&dev_attr_ap_functions.attr,
&dev_attr_request_count.attr,
&dev_attr_requestq_count.attr,
&dev_attr_pendingq_count.attr,
&dev_attr_modalias.attr,
NULL
};
static struct attribute_group ap_card_dev_attr_group = {
.attrs = ap_card_dev_attrs
};
static const struct attribute_group *ap_card_dev_attr_groups[] = {
&ap_card_dev_attr_group,
NULL
};
struct device_type ap_card_type = {
.name = "ap_card",
.groups = ap_card_dev_attr_groups,
};
static void ap_card_device_release(struct device *dev)
{
kfree(to_ap_card(dev));
}
struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
unsigned int functions)
{
struct ap_card *ac;
ac = kzalloc(sizeof(*ac), GFP_KERNEL);
if (!ac)
return NULL;
INIT_LIST_HEAD(&ac->queues);
ac->ap_dev.device.release = ap_card_device_release;
ac->ap_dev.device.type = &ap_card_type;
ac->ap_dev.device_type = device_type;
/* CEX6 toleration: map to CEX5 */
if (device_type == AP_DEVICE_TYPE_CEX6)
ac->ap_dev.device_type = AP_DEVICE_TYPE_CEX5;
ac->raw_hwtype = device_type;
ac->queue_depth = queue_depth;
ac->functions = functions;
ac->id = id;
return ac;
}
/*
* Copyright IBM Corp. 2016
* Author(s): Harald Freudenberger <freude@de.ibm.com>
*/
#ifndef AP_DEBUG_H
#define AP_DEBUG_H
#include <asm/debug.h>
#define DBF_ERR 3 /* error conditions */
#define DBF_WARN 4 /* warning conditions */
#define DBF_INFO 5 /* informational */
#define DBF_DEBUG 6 /* for debugging only */
#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
#define DBF_MAX_SPRINTF_ARGS 5
#define AP_DBF(...) \
debug_sprintf_event(ap_dbf_info, ##__VA_ARGS__)
extern debug_info_t *ap_dbf_info;
int ap_debug_init(void);
void ap_debug_exit(void);
#endif /* AP_DEBUG_H */
/*
* Copyright IBM Corp. 2016
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*
* Adjunct processor bus, queue related code.
*/
#define KMSG_COMPONENT "ap"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/facility.h>
#include "ap_bus.h"
#include "ap_asm.h"
/**
* ap_queue_enable_interruption(): Enable interruption on an AP queue.
* @qid: The AP queue number
* @ind: the notification indicator byte
*
* Enables interruption on AP queue via ap_aqic(). Based on the return
* value it waits a while and tests the AP queue if interrupts
* have been switched on using ap_test_queue().
*/
static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
{
struct ap_queue_status status;
status = ap_aqic(aq->qid, ind);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_OTHERWISE_CHANGED:
return 0;
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
case AP_RESPONSE_INVALID_ADDRESS:
pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
AP_QID_CARD(aq->qid),
AP_QID_QUEUE(aq->qid));
return -EOPNOTSUPP;
case AP_RESPONSE_RESET_IN_PROGRESS:
case AP_RESPONSE_BUSY:
default:
return -EBUSY;
}
}
/**
* __ap_send(): Send message to adjunct processor queue.
* @qid: The AP queue number
* @psmid: The program supplied message identifier
* @msg: The message text
* @length: The message length
* @special: Special Bit
*
* Returns AP queue status structure.
* Condition code 1 on NQAP can't happen because the L bit is 1.
* Condition code 2 on NQAP also means the send is incomplete,
* because a segment boundary was reached. The NQAP is repeated.
*/
static inline struct ap_queue_status
__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
unsigned int special)
{
if (special == 1)
qid |= 0x400000UL;
return ap_nqap(qid, psmid, msg, length);
}
int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
{
struct ap_queue_status status;
status = __ap_send(qid, psmid, msg, length, 0);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
return 0;
case AP_RESPONSE_Q_FULL:
case AP_RESPONSE_RESET_IN_PROGRESS:
return -EBUSY;
case AP_RESPONSE_REQ_FAC_NOT_INST:
return -EINVAL;
default: /* Device is gone. */
return -ENODEV;
}
}
EXPORT_SYMBOL(ap_send);
int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
{
struct ap_queue_status status;
if (msg == NULL)
return -EINVAL;
status = ap_dqap(qid, psmid, msg, length);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
return 0;
case AP_RESPONSE_NO_PENDING_REPLY:
if (status.queue_empty)
return -ENOENT;
return -EBUSY;
case AP_RESPONSE_RESET_IN_PROGRESS:
return -EBUSY;
default:
return -ENODEV;
}
}
EXPORT_SYMBOL(ap_recv);
/* State machine definitions and helpers */
static enum ap_wait ap_sm_nop(struct ap_queue *aq)
{
return AP_WAIT_NONE;
}
/**
* ap_sm_recv(): Receive pending reply messages from an AP queue but do
* not change the state of the device.
* @aq: pointer to the AP queue
*
* Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
*/
static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
{
struct ap_queue_status status;
struct ap_message *ap_msg;
status = ap_dqap(aq->qid, &aq->reply->psmid,
aq->reply->message, aq->reply->length);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
aq->queue_count--;
if (aq->queue_count > 0)
mod_timer(&aq->timeout,
jiffies + aq->request_timeout);
list_for_each_entry(ap_msg, &aq->pendingq, list) {
if (ap_msg->psmid != aq->reply->psmid)
continue;
list_del_init(&ap_msg->list);
aq->pendingq_count--;
ap_msg->receive(aq, ap_msg, aq->reply);
break;
}
case AP_RESPONSE_NO_PENDING_REPLY:
if (!status.queue_empty || aq->queue_count <= 0)
break;
/* The card shouldn't forget requests but who knows. */
aq->queue_count = 0;
list_splice_init(&aq->pendingq, &aq->requestq);
aq->requestq_count += aq->pendingq_count;
aq->pendingq_count = 0;
break;
default:
break;
}
return status;
}
/**
* ap_sm_read(): Receive pending reply messages from an AP queue.
* @aq: pointer to the AP queue
*
* Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
*/
static enum ap_wait ap_sm_read(struct ap_queue *aq)
{
struct ap_queue_status status;
if (!aq->reply)
return AP_WAIT_NONE;
status = ap_sm_recv(aq);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
if (aq->queue_count > 0) {
aq->state = AP_STATE_WORKING;
return AP_WAIT_AGAIN;
}
aq->state = AP_STATE_IDLE;
return AP_WAIT_NONE;
case AP_RESPONSE_NO_PENDING_REPLY:
if (aq->queue_count > 0)
return AP_WAIT_INTERRUPT;
aq->state = AP_STATE_IDLE;
return AP_WAIT_NONE;
default:
aq->state = AP_STATE_BORKED;
return AP_WAIT_NONE;
}
}
/**
* ap_sm_suspend_read(): Receive pending reply messages from an AP queue
* without changing the device state in between. In suspend mode we don't
* allow sending new requests, therefore just fetch pending replies.
* @aq: pointer to the AP queue
*
* Returns AP_WAIT_NONE or AP_WAIT_AGAIN
*/
static enum ap_wait ap_sm_suspend_read(struct ap_queue *aq)
{
struct ap_queue_status status;
if (!aq->reply)
return AP_WAIT_NONE;
status = ap_sm_recv(aq);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
if (aq->queue_count > 0)
return AP_WAIT_AGAIN;
/* fall through */
default:
return AP_WAIT_NONE;
}
}
/**
* ap_sm_write(): Send messages from the request queue to an AP queue.
* @aq: pointer to the AP queue
*
* Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
*/
static enum ap_wait ap_sm_write(struct ap_queue *aq)
{
struct ap_queue_status status;
struct ap_message *ap_msg;
if (aq->requestq_count <= 0)
return AP_WAIT_NONE;
/* Start the next request on the queue. */
ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
status = __ap_send(aq->qid, ap_msg->psmid,
ap_msg->message, ap_msg->length, ap_msg->special);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
aq->queue_count++;
if (aq->queue_count == 1)
mod_timer(&aq->timeout, jiffies + aq->request_timeout);
list_move_tail(&ap_msg->list, &aq->pendingq);
aq->requestq_count--;
aq->pendingq_count++;
if (aq->queue_count < aq->card->queue_depth) {
aq->state = AP_STATE_WORKING;
return AP_WAIT_AGAIN;
}
/* fall through */
case AP_RESPONSE_Q_FULL:
aq->state = AP_STATE_QUEUE_FULL;
return AP_WAIT_INTERRUPT;
case AP_RESPONSE_RESET_IN_PROGRESS:
aq->state = AP_STATE_RESET_WAIT;
return AP_WAIT_TIMEOUT;
case AP_RESPONSE_MESSAGE_TOO_BIG:
case AP_RESPONSE_REQ_FAC_NOT_INST:
list_del_init(&ap_msg->list);
aq->requestq_count--;
ap_msg->rc = -EINVAL;
ap_msg->receive(aq, ap_msg, NULL);
return AP_WAIT_AGAIN;
default:
aq->state = AP_STATE_BORKED;
return AP_WAIT_NONE;
}
}
/**
* ap_sm_read_write(): Send and receive messages to/from an AP queue.
* @aq: pointer to the AP queue
*
* Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
*/
static enum ap_wait ap_sm_read_write(struct ap_queue *aq)
{
return min(ap_sm_read(aq), ap_sm_write(aq));
}
/**
* ap_sm_reset(): Reset an AP queue.
* @qid: The AP queue number
*
* Submit the Reset command to an AP queue.
*/
static enum ap_wait ap_sm_reset(struct ap_queue *aq)
{
struct ap_queue_status status;
status = ap_rapq(aq->qid);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_RESET_IN_PROGRESS:
aq->state = AP_STATE_RESET_WAIT;
aq->interrupt = AP_INTR_DISABLED;
return AP_WAIT_TIMEOUT;
case AP_RESPONSE_BUSY:
return AP_WAIT_TIMEOUT;
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
default:
aq->state = AP_STATE_BORKED;
return AP_WAIT_NONE;
}
}
/**
* ap_sm_reset_wait(): Test queue for completion of the reset operation
* @aq: pointer to the AP queue
*
* Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
*/
static enum ap_wait ap_sm_reset_wait(struct ap_queue *aq)
{
struct ap_queue_status status;
void *lsi_ptr;
if (aq->queue_count > 0 && aq->reply)
/* Try to read a completed message and get the status */
status = ap_sm_recv(aq);
else
/* Get the status with TAPQ */
status = ap_tapq(aq->qid, NULL);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
lsi_ptr = ap_airq_ptr();
if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0)
aq->state = AP_STATE_SETIRQ_WAIT;
else
aq->state = (aq->queue_count > 0) ?
AP_STATE_WORKING : AP_STATE_IDLE;
return AP_WAIT_AGAIN;
case AP_RESPONSE_BUSY:
case AP_RESPONSE_RESET_IN_PROGRESS:
return AP_WAIT_TIMEOUT;
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
default:
aq->state = AP_STATE_BORKED;
return AP_WAIT_NONE;
}
}
/**
* ap_sm_setirq_wait(): Test queue for completion of the irq enablement
* @aq: pointer to the AP queue
*
* Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
*/
static enum ap_wait ap_sm_setirq_wait(struct ap_queue *aq)
{
struct ap_queue_status status;
if (aq->queue_count > 0 && aq->reply)
/* Try to read a completed message and get the status */
status = ap_sm_recv(aq);
else
/* Get the status with TAPQ */
status = ap_tapq(aq->qid, NULL);
if (status.int_enabled == 1) {
/* Irqs are now enabled */
aq->interrupt = AP_INTR_ENABLED;
aq->state = (aq->queue_count > 0) ?
AP_STATE_WORKING : AP_STATE_IDLE;
}
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
if (aq->queue_count > 0)
return AP_WAIT_AGAIN;
/* fallthrough */
case AP_RESPONSE_NO_PENDING_REPLY:
return AP_WAIT_TIMEOUT;
default:
aq->state = AP_STATE_BORKED;
return AP_WAIT_NONE;
}
}
/*
* AP state machine jump table
*/
static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
[AP_STATE_RESET_START] = {
[AP_EVENT_POLL] = ap_sm_reset,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
[AP_STATE_RESET_WAIT] = {
[AP_EVENT_POLL] = ap_sm_reset_wait,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
[AP_STATE_SETIRQ_WAIT] = {
[AP_EVENT_POLL] = ap_sm_setirq_wait,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
[AP_STATE_IDLE] = {
[AP_EVENT_POLL] = ap_sm_write,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
[AP_STATE_WORKING] = {
[AP_EVENT_POLL] = ap_sm_read_write,
[AP_EVENT_TIMEOUT] = ap_sm_reset,
},
[AP_STATE_QUEUE_FULL] = {
[AP_EVENT_POLL] = ap_sm_read,
[AP_EVENT_TIMEOUT] = ap_sm_reset,
},
[AP_STATE_SUSPEND_WAIT] = {
[AP_EVENT_POLL] = ap_sm_suspend_read,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
[AP_STATE_BORKED] = {
[AP_EVENT_POLL] = ap_sm_nop,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
};
enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event)
{
return ap_jumptable[aq->state][event](aq);
}
enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event)
{
enum ap_wait wait;
while ((wait = ap_sm_event(aq, event)) == AP_WAIT_AGAIN)
;
return wait;
}
/*
* Power management for queue devices
*/
void ap_queue_suspend(struct ap_device *ap_dev)
{
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
/* Poll on the device until all requests are finished. */
spin_lock_bh(&aq->lock);
aq->state = AP_STATE_SUSPEND_WAIT;
while (ap_sm_event(aq, AP_EVENT_POLL) != AP_WAIT_NONE)
;
aq->state = AP_STATE_BORKED;
spin_unlock_bh(&aq->lock);
}
EXPORT_SYMBOL(ap_queue_suspend);
void ap_queue_resume(struct ap_device *ap_dev)
{
}
EXPORT_SYMBOL(ap_queue_resume);
/*
* AP queue related attributes.
*/
static ssize_t ap_request_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
unsigned int req_cnt;
spin_lock_bh(&aq->lock);
req_cnt = aq->total_request_count;
spin_unlock_bh(&aq->lock);
return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
}
static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
static ssize_t ap_requestq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
unsigned int reqq_cnt = 0;
spin_lock_bh(&aq->lock);
reqq_cnt = aq->requestq_count;
spin_unlock_bh(&aq->lock);
return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
}
static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
static ssize_t ap_pendingq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
unsigned int penq_cnt = 0;
spin_lock_bh(&aq->lock);
penq_cnt = aq->pendingq_count;
spin_unlock_bh(&aq->lock);
return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
}
static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
static ssize_t ap_reset_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
int rc = 0;
spin_lock_bh(&aq->lock);
switch (aq->state) {
case AP_STATE_RESET_START:
case AP_STATE_RESET_WAIT:
rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n");
break;
case AP_STATE_WORKING:
case AP_STATE_QUEUE_FULL:
rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
break;
default:
rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
}
spin_unlock_bh(&aq->lock);
return rc;
}
static DEVICE_ATTR(reset, 0444, ap_reset_show, NULL);
static ssize_t ap_interrupt_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
int rc = 0;
spin_lock_bh(&aq->lock);
if (aq->state == AP_STATE_SETIRQ_WAIT)
rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
else if (aq->interrupt == AP_INTR_ENABLED)
rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
else
rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
spin_unlock_bh(&aq->lock);
return rc;
}
static DEVICE_ATTR(interrupt, 0444, ap_interrupt_show, NULL);
static struct attribute *ap_queue_dev_attrs[] = {
&dev_attr_request_count.attr,
&dev_attr_requestq_count.attr,
&dev_attr_pendingq_count.attr,
&dev_attr_reset.attr,
&dev_attr_interrupt.attr,
NULL
};
static struct attribute_group ap_queue_dev_attr_group = {
.attrs = ap_queue_dev_attrs
};
static const struct attribute_group *ap_queue_dev_attr_groups[] = {
&ap_queue_dev_attr_group,
NULL
};
struct device_type ap_queue_type = {
.name = "ap_queue",
.groups = ap_queue_dev_attr_groups,
};
static void ap_queue_device_release(struct device *dev)
{
kfree(to_ap_queue(dev));
}
struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
{
struct ap_queue *aq;
aq = kzalloc(sizeof(*aq), GFP_KERNEL);
if (!aq)
return NULL;
aq->ap_dev.device.release = ap_queue_device_release;
aq->ap_dev.device.type = &ap_queue_type;
aq->ap_dev.device_type = device_type;
/* CEX6 toleration: map to CEX5 */
if (device_type == AP_DEVICE_TYPE_CEX6)
aq->ap_dev.device_type = AP_DEVICE_TYPE_CEX5;
aq->qid = qid;
aq->state = AP_STATE_RESET_START;
aq->interrupt = AP_INTR_DISABLED;
spin_lock_init(&aq->lock);
INIT_LIST_HEAD(&aq->pendingq);
INIT_LIST_HEAD(&aq->requestq);
setup_timer(&aq->timeout, ap_request_timeout, (unsigned long) aq);
return aq;
}
void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
{
aq->reply = reply;
spin_lock_bh(&aq->lock);
ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
EXPORT_SYMBOL(ap_queue_init_reply);
/**
* ap_queue_message(): Queue a request to an AP device.
* @aq: The AP device to queue the message to
* @ap_msg: The message that is to be added
*/
void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
{
/* For asynchronous message handling a valid receive-callback
* is required.
*/
BUG_ON(!ap_msg->receive);
spin_lock_bh(&aq->lock);
/* Queue the message. */
list_add_tail(&ap_msg->list, &aq->requestq);
aq->requestq_count++;
aq->total_request_count++;
atomic_inc(&aq->card->total_request_count);
/* Send/receive as many request from the queue as possible. */
ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
EXPORT_SYMBOL(ap_queue_message);
/**
* ap_cancel_message(): Cancel a crypto request.
* @aq: The AP device that has the message queued
* @ap_msg: The message that is to be removed
*
* Cancel a crypto request. This is done by removing the request
* from the device pending or request queue. Note that the
* request stays on the AP queue. When it finishes the message
* reply will be discarded because the psmid can't be found.
*/
void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
{
struct ap_message *tmp;
spin_lock_bh(&aq->lock);
if (!list_empty(&ap_msg->list)) {
list_for_each_entry(tmp, &aq->pendingq, list)
if (tmp->psmid == ap_msg->psmid) {
aq->pendingq_count--;
goto found;
}
aq->requestq_count--;
found:
list_del_init(&ap_msg->list);
}
spin_unlock_bh(&aq->lock);
}
EXPORT_SYMBOL(ap_cancel_message);
/**
* __ap_flush_queue(): Flush requests.
* @aq: Pointer to the AP queue
*
* Flush all requests from the request/pending queue of an AP device.
*/
static void __ap_flush_queue(struct ap_queue *aq)
{
struct ap_message *ap_msg, *next;
list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
list_del_init(&ap_msg->list);
aq->pendingq_count--;
ap_msg->rc = -EAGAIN;
ap_msg->receive(aq, ap_msg, NULL);
}
list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
list_del_init(&ap_msg->list);
aq->requestq_count--;
ap_msg->rc = -EAGAIN;
ap_msg->receive(aq, ap_msg, NULL);
}
}
void ap_flush_queue(struct ap_queue *aq)
{
spin_lock_bh(&aq->lock);
__ap_flush_queue(aq);
spin_unlock_bh(&aq->lock);
}
EXPORT_SYMBOL(ap_flush_queue);
void ap_queue_remove(struct ap_queue *aq)
{
ap_flush_queue(aq);
del_timer_sync(&aq->timeout);
}
EXPORT_SYMBOL(ap_queue_remove);
...@@ -41,10 +41,14 @@ ...@@ -41,10 +41,14 @@
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <asm/debug.h> #include <asm/debug.h>
#include "zcrypt_debug.h" #define CREATE_TRACE_POINTS
#include <asm/trace/zcrypt.h>
#include "zcrypt_api.h" #include "zcrypt_api.h"
#include "zcrypt_debug.h"
#include "zcrypt_msgtype6.h" #include "zcrypt_msgtype6.h"
#include "zcrypt_msgtype50.h"
/* /*
* Module description. * Module description.
...@@ -54,76 +58,31 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ ...@@ -54,76 +58,31 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
"Copyright IBM Corp. 2001, 2012"); "Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
/*
* zcrypt tracepoint functions
*/
EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);
static int zcrypt_hwrng_seed = 1; static int zcrypt_hwrng_seed = 1;
module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP); module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP);
MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on)."); MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on).");
static DEFINE_SPINLOCK(zcrypt_device_lock); DEFINE_SPINLOCK(zcrypt_list_lock);
static LIST_HEAD(zcrypt_device_list); LIST_HEAD(zcrypt_card_list);
static int zcrypt_device_count = 0; int zcrypt_device_count;
static atomic_t zcrypt_open_count = ATOMIC_INIT(0); static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0); static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
atomic_t zcrypt_rescan_req = ATOMIC_INIT(0); atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
EXPORT_SYMBOL(zcrypt_rescan_req); EXPORT_SYMBOL(zcrypt_rescan_req);
static int zcrypt_rng_device_add(void);
static void zcrypt_rng_device_remove(void);
static DEFINE_SPINLOCK(zcrypt_ops_list_lock);
static LIST_HEAD(zcrypt_ops_list); static LIST_HEAD(zcrypt_ops_list);
static debug_info_t *zcrypt_dbf_common; /* Zcrypt related debug feature stuff. */
static debug_info_t *zcrypt_dbf_devices; static struct dentry *zcrypt_dbf_root;
static struct dentry *debugfs_root; debug_info_t *zcrypt_dbf_info;
/*
* Device attributes common for all crypto devices.
*/
static ssize_t zcrypt_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct zcrypt_device *zdev = to_ap_dev(dev)->private;
return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string);
}
static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL);
static ssize_t zcrypt_online_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct zcrypt_device *zdev = to_ap_dev(dev)->private;
return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online);
}
static ssize_t zcrypt_online_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zcrypt_device *zdev = to_ap_dev(dev)->private;
int online;
if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
return -EINVAL;
zdev->online = online;
ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dman", zdev->ap_dev->qid,
zdev->online);
if (!online)
ap_flush_queue(zdev->ap_dev);
return count;
}
static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store);
static struct attribute * zcrypt_device_attrs[] = {
&dev_attr_type.attr,
&dev_attr_online.attr,
NULL,
};
static struct attribute_group zcrypt_device_attr_group = {
.attrs = zcrypt_device_attrs,
};
/** /**
* Process a rescan of the transport layer. * Process a rescan of the transport layer.
...@@ -136,242 +95,34 @@ static inline int zcrypt_process_rescan(void) ...@@ -136,242 +95,34 @@ static inline int zcrypt_process_rescan(void)
atomic_set(&zcrypt_rescan_req, 0); atomic_set(&zcrypt_rescan_req, 0);
atomic_inc(&zcrypt_rescan_count); atomic_inc(&zcrypt_rescan_count);
ap_bus_force_rescan(); ap_bus_force_rescan();
ZCRYPT_DBF_COMMON(DBF_INFO, "rescan%07d", ZCRYPT_DBF(DBF_INFO, "rescan count=%07d",
atomic_inc_return(&zcrypt_rescan_count)); atomic_inc_return(&zcrypt_rescan_count));
return 1; return 1;
} }
return 0; return 0;
} }
/**
* __zcrypt_increase_preference(): Increase preference of a crypto device.
* @zdev: Pointer the crypto device
*
* Move the device towards the head of the device list.
* Need to be called while holding the zcrypt device list lock.
* Note: cards with speed_rating of 0 are kept at the end of the list.
*/
static void __zcrypt_increase_preference(struct zcrypt_device *zdev)
{
struct zcrypt_device *tmp;
struct list_head *l;
if (zdev->speed_rating == 0)
return;
for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) {
tmp = list_entry(l, struct zcrypt_device, list);
if ((tmp->request_count + 1) * tmp->speed_rating <=
(zdev->request_count + 1) * zdev->speed_rating &&
tmp->speed_rating != 0)
break;
}
if (l == zdev->list.prev)
return;
/* Move zdev behind l */
list_move(&zdev->list, l);
}
/**
* __zcrypt_decrease_preference(): Decrease preference of a crypto device.
* @zdev: Pointer to a crypto device.
*
* Move the device towards the tail of the device list.
* Need to be called while holding the zcrypt device list lock.
* Note: cards with speed_rating of 0 are kept at the end of the list.
*/
static void __zcrypt_decrease_preference(struct zcrypt_device *zdev)
{
struct zcrypt_device *tmp;
struct list_head *l;
if (zdev->speed_rating == 0)
return;
for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) {
tmp = list_entry(l, struct zcrypt_device, list);
if ((tmp->request_count + 1) * tmp->speed_rating >
(zdev->request_count + 1) * zdev->speed_rating ||
tmp->speed_rating == 0)
break;
}
if (l == zdev->list.next)
return;
/* Move zdev before l */
list_move_tail(&zdev->list, l);
}
static void zcrypt_device_release(struct kref *kref)
{
struct zcrypt_device *zdev =
container_of(kref, struct zcrypt_device, refcount);
zcrypt_device_free(zdev);
}
void zcrypt_device_get(struct zcrypt_device *zdev)
{
kref_get(&zdev->refcount);
}
EXPORT_SYMBOL(zcrypt_device_get);
int zcrypt_device_put(struct zcrypt_device *zdev)
{
return kref_put(&zdev->refcount, zcrypt_device_release);
}
EXPORT_SYMBOL(zcrypt_device_put);
struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size)
{
struct zcrypt_device *zdev;
zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL);
if (!zdev)
return NULL;
zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL);
if (!zdev->reply.message)
goto out_free;
zdev->reply.length = max_response_size;
spin_lock_init(&zdev->lock);
INIT_LIST_HEAD(&zdev->list);
zdev->dbf_area = zcrypt_dbf_devices;
return zdev;
out_free:
kfree(zdev);
return NULL;
}
EXPORT_SYMBOL(zcrypt_device_alloc);
void zcrypt_device_free(struct zcrypt_device *zdev)
{
kfree(zdev->reply.message);
kfree(zdev);
}
EXPORT_SYMBOL(zcrypt_device_free);
/**
* zcrypt_device_register() - Register a crypto device.
* @zdev: Pointer to a crypto device
*
* Register a crypto device. Returns 0 if successful.
*/
int zcrypt_device_register(struct zcrypt_device *zdev)
{
int rc;
if (!zdev->ops)
return -ENODEV;
rc = sysfs_create_group(&zdev->ap_dev->device.kobj,
&zcrypt_device_attr_group);
if (rc)
goto out;
get_device(&zdev->ap_dev->device);
kref_init(&zdev->refcount);
spin_lock_bh(&zcrypt_device_lock);
zdev->online = 1; /* New devices are online by default. */
ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dreg", zdev->ap_dev->qid,
zdev->online);
list_add_tail(&zdev->list, &zcrypt_device_list);
__zcrypt_increase_preference(zdev);
zcrypt_device_count++;
spin_unlock_bh(&zcrypt_device_lock);
if (zdev->ops->rng) {
rc = zcrypt_rng_device_add();
if (rc)
goto out_unregister;
}
return 0;
out_unregister:
spin_lock_bh(&zcrypt_device_lock);
zcrypt_device_count--;
list_del_init(&zdev->list);
spin_unlock_bh(&zcrypt_device_lock);
sysfs_remove_group(&zdev->ap_dev->device.kobj,
&zcrypt_device_attr_group);
put_device(&zdev->ap_dev->device);
zcrypt_device_put(zdev);
out:
return rc;
}
EXPORT_SYMBOL(zcrypt_device_register);
/**
* zcrypt_device_unregister(): Unregister a crypto device.
* @zdev: Pointer to crypto device
*
* Unregister a crypto device.
*/
void zcrypt_device_unregister(struct zcrypt_device *zdev)
{
if (zdev->ops->rng)
zcrypt_rng_device_remove();
spin_lock_bh(&zcrypt_device_lock);
zcrypt_device_count--;
list_del_init(&zdev->list);
spin_unlock_bh(&zcrypt_device_lock);
sysfs_remove_group(&zdev->ap_dev->device.kobj,
&zcrypt_device_attr_group);
put_device(&zdev->ap_dev->device);
zcrypt_device_put(zdev);
}
EXPORT_SYMBOL(zcrypt_device_unregister);
void zcrypt_msgtype_register(struct zcrypt_ops *zops) void zcrypt_msgtype_register(struct zcrypt_ops *zops)
{ {
spin_lock_bh(&zcrypt_ops_list_lock);
list_add_tail(&zops->list, &zcrypt_ops_list); list_add_tail(&zops->list, &zcrypt_ops_list);
spin_unlock_bh(&zcrypt_ops_list_lock);
} }
EXPORT_SYMBOL(zcrypt_msgtype_register);
void zcrypt_msgtype_unregister(struct zcrypt_ops *zops) void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
{ {
spin_lock_bh(&zcrypt_ops_list_lock);
list_del_init(&zops->list); list_del_init(&zops->list);
spin_unlock_bh(&zcrypt_ops_list_lock);
} }
EXPORT_SYMBOL(zcrypt_msgtype_unregister);
static inline struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
struct zcrypt_ops *__ops_lookup(unsigned char *name, int variant)
{ {
struct zcrypt_ops *zops; struct zcrypt_ops *zops;
int found = 0;
spin_lock_bh(&zcrypt_ops_list_lock); list_for_each_entry(zops, &zcrypt_ops_list, list)
list_for_each_entry(zops, &zcrypt_ops_list, list) {
if ((zops->variant == variant) && if ((zops->variant == variant) &&
(!strncmp(zops->name, name, sizeof(zops->name)))) { (!strncmp(zops->name, name, sizeof(zops->name))))
found = 1; return zops;
break; return NULL;
}
}
if (!found || !try_module_get(zops->owner))
zops = NULL;
spin_unlock_bh(&zcrypt_ops_list_lock);
return zops;
}
struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *name, int variant)
{
struct zcrypt_ops *zops = NULL;
zops = __ops_lookup(name, variant);
if (!zops) {
request_module("%s", name);
zops = __ops_lookup(name, variant);
}
return zops;
}
EXPORT_SYMBOL(zcrypt_msgtype_request);
void zcrypt_msgtype_release(struct zcrypt_ops *zops)
{
if (zops)
module_put(zops->owner);
} }
EXPORT_SYMBOL(zcrypt_msgtype_release); EXPORT_SYMBOL(zcrypt_msgtype);
/** /**
* zcrypt_read (): Not supported beyond zcrypt 1.3.1. * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
...@@ -417,16 +168,80 @@ static int zcrypt_release(struct inode *inode, struct file *filp) ...@@ -417,16 +168,80 @@ static int zcrypt_release(struct inode *inode, struct file *filp)
return 0; return 0;
} }
static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
struct zcrypt_queue *zq,
unsigned int weight)
{
if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
return NULL;
zcrypt_queue_get(zq);
get_device(&zq->queue->ap_dev.device);
atomic_add(weight, &zc->load);
atomic_add(weight, &zq->load);
zq->request_count++;
return zq;
}
static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
struct zcrypt_queue *zq,
unsigned int weight)
{
struct module *mod = zq->queue->ap_dev.drv->driver.owner;
zq->request_count--;
atomic_sub(weight, &zc->load);
atomic_sub(weight, &zq->load);
put_device(&zq->queue->ap_dev.device);
zcrypt_queue_put(zq);
module_put(mod);
}
static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
struct zcrypt_card *pref_zc,
unsigned weight, unsigned pref_weight)
{
if (!pref_zc)
return 0;
weight += atomic_read(&zc->load);
pref_weight += atomic_read(&pref_zc->load);
if (weight == pref_weight)
return atomic_read(&zc->card->total_request_count) >
atomic_read(&pref_zc->card->total_request_count);
return weight > pref_weight;
}
static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
struct zcrypt_queue *pref_zq,
unsigned weight, unsigned pref_weight)
{
if (!pref_zq)
return 0;
weight += atomic_read(&zq->load);
pref_weight += atomic_read(&pref_zq->load);
if (weight == pref_weight)
return &zq->queue->total_request_count >
&pref_zq->queue->total_request_count;
return weight > pref_weight;
}
/* /*
* zcrypt ioctls. * zcrypt ioctls.
*/ */
static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
{ {
struct zcrypt_device *zdev; struct zcrypt_card *zc, *pref_zc;
int rc; struct zcrypt_queue *zq, *pref_zq;
unsigned int weight, pref_weight;
unsigned int func_code;
int qid = 0, rc = -ENODEV;
trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
if (mex->outputdatalength < mex->inputdatalength) {
rc = -EINVAL;
goto out;
}
if (mex->outputdatalength < mex->inputdatalength)
return -EINVAL;
/* /*
* As long as outputdatalength is big enough, we can set the * As long as outputdatalength is big enough, we can set the
* outputdatalength equal to the inputdatalength, since that is the * outputdatalength equal to the inputdatalength, since that is the
...@@ -434,44 +249,73 @@ static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) ...@@ -434,44 +249,73 @@ static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
*/ */
mex->outputdatalength = mex->inputdatalength; mex->outputdatalength = mex->inputdatalength;
spin_lock_bh(&zcrypt_device_lock); rc = get_rsa_modex_fc(mex, &func_code);
list_for_each_entry(zdev, &zcrypt_device_list, list) { if (rc)
if (!zdev->online || goto out;
!zdev->ops->rsa_modexpo ||
zdev->min_mod_size > mex->inputdatalength || pref_zc = NULL;
zdev->max_mod_size < mex->inputdatalength) pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
/* Check for online accelarator and CCA cards */
if (!zc->online || !(zc->card->functions & 0x18000000))
continue;
/* Check for size limits */
if (zc->min_mod_size > mex->inputdatalength ||
zc->max_mod_size < mex->inputdatalength)
continue;
/* get weight index of the card device */
weight = zc->speed_rating[func_code];
if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
continue; continue;
zcrypt_device_get(zdev); for_each_zcrypt_queue(zq, zc) {
get_device(&zdev->ap_dev->device); /* check if device is online and eligible */
zdev->request_count++; if (!zq->online || !zq->ops->rsa_modexpo)
__zcrypt_decrease_preference(zdev); continue;
if (try_module_get(zdev->ap_dev->drv->driver.owner)) { if (zcrypt_queue_compare(zq, pref_zq,
spin_unlock_bh(&zcrypt_device_lock); weight, pref_weight))
rc = zdev->ops->rsa_modexpo(zdev, mex); continue;
spin_lock_bh(&zcrypt_device_lock); pref_zc = zc;
module_put(zdev->ap_dev->drv->driver.owner); pref_zq = zq;
pref_weight = weight;
} }
else
rc = -EAGAIN;
zdev->request_count--;
__zcrypt_increase_preference(zdev);
put_device(&zdev->ap_dev->device);
zcrypt_device_put(zdev);
spin_unlock_bh(&zcrypt_device_lock);
return rc;
} }
spin_unlock_bh(&zcrypt_device_lock); pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
return -ENODEV; spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
rc = -ENODEV;
goto out;
}
qid = pref_zq->queue->qid;
rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
spin_lock(&zcrypt_list_lock);
zcrypt_drop_queue(pref_zc, pref_zq, weight);
spin_unlock(&zcrypt_list_lock);
out:
trace_s390_zcrypt_rep(mex, func_code, rc,
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
} }
static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
{ {
struct zcrypt_device *zdev; struct zcrypt_card *zc, *pref_zc;
unsigned long long z1, z2, z3; struct zcrypt_queue *zq, *pref_zq;
int rc, copied; unsigned int weight, pref_weight;
unsigned int func_code;
int qid = 0, rc = -ENODEV;
trace_s390_zcrypt_req(crt, TP_ICARSACRT);
if (crt->outputdatalength < crt->inputdatalength) {
rc = -EINVAL;
goto out;
}
if (crt->outputdatalength < crt->inputdatalength)
return -EINVAL;
/* /*
* As long as outputdatalength is big enough, we can set the * As long as outputdatalength is big enough, we can set the
* outputdatalength equal to the inputdatalength, since that is the * outputdatalength equal to the inputdatalength, since that is the
...@@ -479,308 +323,445 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) ...@@ -479,308 +323,445 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
*/ */
crt->outputdatalength = crt->inputdatalength; crt->outputdatalength = crt->inputdatalength;
copied = 0; rc = get_rsa_crt_fc(crt, &func_code);
restart: if (rc)
spin_lock_bh(&zcrypt_device_lock); goto out;
list_for_each_entry(zdev, &zcrypt_device_list, list) {
if (!zdev->online || pref_zc = NULL;
!zdev->ops->rsa_modexpo_crt || pref_zq = NULL;
zdev->min_mod_size > crt->inputdatalength || spin_lock(&zcrypt_list_lock);
zdev->max_mod_size < crt->inputdatalength) for_each_zcrypt_card(zc) {
/* Check for online accelarator and CCA cards */
if (!zc->online || !(zc->card->functions & 0x18000000))
continue;
/* Check for size limits */
if (zc->min_mod_size > crt->inputdatalength ||
zc->max_mod_size < crt->inputdatalength)
continue; continue;
if (zdev->short_crt && crt->inputdatalength > 240) { /* get weight index of the card device */
/* weight = zc->speed_rating[func_code];
* Check inputdata for leading zeros for cards if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
* that can't handle np_prime, bp_key, or continue;
* u_mult_inv > 128 bytes. for_each_zcrypt_queue(zq, zc) {
*/ /* check if device is online and eligible */
if (copied == 0) { if (!zq->online || !zq->ops->rsa_modexpo_crt)
unsigned int len;
spin_unlock_bh(&zcrypt_device_lock);
/* len is max 256 / 2 - 120 = 8
* For bigger device just assume len of leading
* 0s is 8 as stated in the requirements for
* ica_rsa_modexpo_crt struct in zcrypt.h.
*/
if (crt->inputdatalength <= 256)
len = crt->inputdatalength / 2 - 120;
else
len = 8;
if (len > sizeof(z1))
return -EFAULT;
z1 = z2 = z3 = 0;
if (copy_from_user(&z1, crt->np_prime, len) ||
copy_from_user(&z2, crt->bp_key, len) ||
copy_from_user(&z3, crt->u_mult_inv, len))
return -EFAULT;
z1 = z2 = z3 = 0;
copied = 1;
/*
* We have to restart device lookup -
* the device list may have changed by now.
*/
goto restart;
}
if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL)
/* The device can't handle this request. */
continue; continue;
if (zcrypt_queue_compare(zq, pref_zq,
weight, pref_weight))
continue;
pref_zc = zc;
pref_zq = zq;
pref_weight = weight;
} }
zcrypt_device_get(zdev);
get_device(&zdev->ap_dev->device);
zdev->request_count++;
__zcrypt_decrease_preference(zdev);
if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
spin_unlock_bh(&zcrypt_device_lock);
rc = zdev->ops->rsa_modexpo_crt(zdev, crt);
spin_lock_bh(&zcrypt_device_lock);
module_put(zdev->ap_dev->drv->driver.owner);
}
else
rc = -EAGAIN;
zdev->request_count--;
__zcrypt_increase_preference(zdev);
put_device(&zdev->ap_dev->device);
zcrypt_device_put(zdev);
spin_unlock_bh(&zcrypt_device_lock);
return rc;
} }
spin_unlock_bh(&zcrypt_device_lock); pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
return -ENODEV; spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
rc = -ENODEV;
goto out;
}
qid = pref_zq->queue->qid;
rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
spin_lock(&zcrypt_list_lock);
zcrypt_drop_queue(pref_zc, pref_zq, weight);
spin_unlock(&zcrypt_list_lock);
out:
trace_s390_zcrypt_rep(crt, func_code, rc,
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
} }
static long zcrypt_send_cprb(struct ica_xcRB *xcRB) static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
{ {
struct zcrypt_device *zdev; struct zcrypt_card *zc, *pref_zc;
int rc; struct zcrypt_queue *zq, *pref_zq;
struct ap_message ap_msg;
unsigned int weight, pref_weight;
unsigned int func_code;
unsigned short *domain;
int qid = 0, rc = -ENODEV;
trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
spin_lock_bh(&zcrypt_device_lock); rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain);
list_for_each_entry(zdev, &zcrypt_device_list, list) { if (rc)
if (!zdev->online || !zdev->ops->send_cprb || goto out;
(zdev->ops->variant == MSGTYPE06_VARIANT_EP11) ||
(xcRB->user_defined != AUTOSELECT && pref_zc = NULL;
AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined)) pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
/* Check for online CCA cards */
if (!zc->online || !(zc->card->functions & 0x10000000))
continue;
/* Check for user selected CCA card */
if (xcRB->user_defined != AUTOSELECT &&
xcRB->user_defined != zc->card->id)
continue; continue;
zcrypt_device_get(zdev); /* get weight index of the card device */
get_device(&zdev->ap_dev->device); weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
zdev->request_count++; if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
__zcrypt_decrease_preference(zdev); continue;
if (try_module_get(zdev->ap_dev->drv->driver.owner)) { for_each_zcrypt_queue(zq, zc) {
spin_unlock_bh(&zcrypt_device_lock); /* check if device is online and eligible */
rc = zdev->ops->send_cprb(zdev, xcRB); if (!zq->online ||
spin_lock_bh(&zcrypt_device_lock); !zq->ops->send_cprb ||
module_put(zdev->ap_dev->drv->driver.owner); ((*domain != (unsigned short) AUTOSELECT) &&
(*domain != AP_QID_QUEUE(zq->queue->qid))))
continue;
if (zcrypt_queue_compare(zq, pref_zq,
weight, pref_weight))
continue;
pref_zc = zc;
pref_zq = zq;
pref_weight = weight;
} }
else
rc = -EAGAIN;
zdev->request_count--;
__zcrypt_increase_preference(zdev);
put_device(&zdev->ap_dev->device);
zcrypt_device_put(zdev);
spin_unlock_bh(&zcrypt_device_lock);
return rc;
} }
spin_unlock_bh(&zcrypt_device_lock); pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
return -ENODEV; spin_unlock(&zcrypt_list_lock);
}
struct ep11_target_dev_list { if (!pref_zq) {
unsigned short targets_num; rc = -ENODEV;
struct ep11_target_dev *targets; goto out;
}; }
/* in case of auto select, provide the correct domain */
qid = pref_zq->queue->qid;
if (*domain == (unsigned short) AUTOSELECT)
*domain = AP_QID_QUEUE(qid);
static bool is_desired_ep11dev(unsigned int dev_qid, rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
struct ep11_target_dev_list dev_list)
spin_lock(&zcrypt_list_lock);
zcrypt_drop_queue(pref_zc, pref_zq, weight);
spin_unlock(&zcrypt_list_lock);
out:
trace_s390_zcrypt_rep(xcRB, func_code, rc,
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
}
static bool is_desired_ep11_card(unsigned int dev_id,
unsigned short target_num,
struct ep11_target_dev *targets)
{ {
int n; while (target_num-- > 0) {
if (dev_id == targets->ap_id)
return true;
targets++;
}
return false;
}
for (n = 0; n < dev_list.targets_num; n++, dev_list.targets++) { static bool is_desired_ep11_queue(unsigned int dev_qid,
if ((AP_QID_DEVICE(dev_qid) == dev_list.targets->ap_id) && unsigned short target_num,
(AP_QID_QUEUE(dev_qid) == dev_list.targets->dom_id)) { struct ep11_target_dev *targets)
{
while (target_num-- > 0) {
if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid)
return true; return true;
} targets++;
} }
return false; return false;
} }
static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
{ {
struct zcrypt_device *zdev; struct zcrypt_card *zc, *pref_zc;
bool autoselect = false; struct zcrypt_queue *zq, *pref_zq;
int rc; struct ep11_target_dev *targets;
struct ep11_target_dev_list ep11_dev_list = { unsigned short target_num;
.targets_num = 0x00, unsigned int weight, pref_weight;
.targets = NULL, unsigned int func_code;
}; struct ap_message ap_msg;
int qid = 0, rc = -ENODEV;
trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
ep11_dev_list.targets_num = (unsigned short) xcrb->targets_num; target_num = (unsigned short) xcrb->targets_num;
/* empty list indicates autoselect (all available targets) */ /* empty list indicates autoselect (all available targets) */
if (ep11_dev_list.targets_num == 0) targets = NULL;
autoselect = true; if (target_num != 0) {
else { struct ep11_target_dev __user *uptr;
ep11_dev_list.targets = kcalloc((unsigned short)
xcrb->targets_num,
sizeof(struct ep11_target_dev),
GFP_KERNEL);
if (!ep11_dev_list.targets)
return -ENOMEM;
if (copy_from_user(ep11_dev_list.targets, targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
(struct ep11_target_dev __force __user *) if (!targets) {
xcrb->targets, xcrb->targets_num * rc = -ENOMEM;
sizeof(struct ep11_target_dev))) goto out;
return -EFAULT; }
uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
if (copy_from_user(targets, uptr,
target_num * sizeof(*targets))) {
rc = -EFAULT;
goto out;
}
} }
spin_lock_bh(&zcrypt_device_lock); rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code);
list_for_each_entry(zdev, &zcrypt_device_list, list) { if (rc)
/* check if device is eligible */ goto out_free;
if (!zdev->online ||
zdev->ops->variant != MSGTYPE06_VARIANT_EP11)
continue;
/* check if device is selected as valid target */ pref_zc = NULL;
if (!is_desired_ep11dev(zdev->ap_dev->qid, ep11_dev_list) && pref_zq = NULL;
!autoselect) spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
/* Check for online EP11 cards */
if (!zc->online || !(zc->card->functions & 0x04000000))
continue;
/* Check for user selected EP11 card */
if (targets &&
!is_desired_ep11_card(zc->card->id, target_num, targets))
continue; continue;
/* get weight index of the card device */
weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
continue;
for_each_zcrypt_queue(zq, zc) {
/* check if device is online and eligible */
if (!zq->online ||
!zq->ops->send_ep11_cprb ||
(targets &&
!is_desired_ep11_queue(zq->queue->qid,
target_num, targets)))
continue;
if (zcrypt_queue_compare(zq, pref_zq,
weight, pref_weight))
continue;
pref_zc = zc;
pref_zq = zq;
pref_weight = weight;
}
}
pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
spin_unlock(&zcrypt_list_lock);
zcrypt_device_get(zdev); if (!pref_zq) {
get_device(&zdev->ap_dev->device); rc = -ENODEV;
zdev->request_count++; goto out_free;
__zcrypt_decrease_preference(zdev);
if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
spin_unlock_bh(&zcrypt_device_lock);
rc = zdev->ops->send_ep11_cprb(zdev, xcrb);
spin_lock_bh(&zcrypt_device_lock);
module_put(zdev->ap_dev->drv->driver.owner);
} else {
rc = -EAGAIN;
}
zdev->request_count--;
__zcrypt_increase_preference(zdev);
put_device(&zdev->ap_dev->device);
zcrypt_device_put(zdev);
spin_unlock_bh(&zcrypt_device_lock);
return rc;
} }
spin_unlock_bh(&zcrypt_device_lock);
return -ENODEV; qid = pref_zq->queue->qid;
rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
spin_lock(&zcrypt_list_lock);
zcrypt_drop_queue(pref_zc, pref_zq, weight);
spin_unlock(&zcrypt_list_lock);
out_free:
kfree(targets);
out:
trace_s390_zcrypt_rep(xcrb, func_code, rc,
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
} }
static long zcrypt_rng(char *buffer) static long zcrypt_rng(char *buffer)
{ {
struct zcrypt_device *zdev; struct zcrypt_card *zc, *pref_zc;
int rc; struct zcrypt_queue *zq, *pref_zq;
unsigned int weight, pref_weight;
unsigned int func_code;
struct ap_message ap_msg;
unsigned int domain;
int qid = 0, rc = -ENODEV;
trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
spin_lock_bh(&zcrypt_device_lock); rc = get_rng_fc(&ap_msg, &func_code, &domain);
list_for_each_entry(zdev, &zcrypt_device_list, list) { if (rc)
if (!zdev->online || !zdev->ops->rng) goto out;
pref_zc = NULL;
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
/* Check for online CCA cards */
if (!zc->online || !(zc->card->functions & 0x10000000))
continue; continue;
zcrypt_device_get(zdev); /* get weight index of the card device */
get_device(&zdev->ap_dev->device); weight = zc->speed_rating[func_code];
zdev->request_count++; if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
__zcrypt_decrease_preference(zdev); continue;
if (try_module_get(zdev->ap_dev->drv->driver.owner)) { for_each_zcrypt_queue(zq, zc) {
spin_unlock_bh(&zcrypt_device_lock); /* check if device is online and eligible */
rc = zdev->ops->rng(zdev, buffer); if (!zq->online || !zq->ops->rng)
spin_lock_bh(&zcrypt_device_lock); continue;
module_put(zdev->ap_dev->drv->driver.owner); if (zcrypt_queue_compare(zq, pref_zq,
} else weight, pref_weight))
rc = -EAGAIN; continue;
zdev->request_count--; pref_zc = zc;
__zcrypt_increase_preference(zdev); pref_zq = zq;
put_device(&zdev->ap_dev->device); pref_weight = weight;
zcrypt_device_put(zdev); }
spin_unlock_bh(&zcrypt_device_lock); }
return rc; pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq)
return -ENODEV;
qid = pref_zq->queue->qid;
rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
spin_lock(&zcrypt_list_lock);
zcrypt_drop_queue(pref_zc, pref_zq, weight);
spin_unlock(&zcrypt_list_lock);
out:
trace_s390_zcrypt_rep(buffer, func_code, rc,
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
}
static void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
struct zcrypt_device_status *stat;
memset(matrix, 0, sizeof(*matrix));
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
stat = matrix->device;
stat += AP_QID_CARD(zq->queue->qid) * MAX_ZDEV_DOMAINS;
stat += AP_QID_QUEUE(zq->queue->qid);
stat->hwtype = zc->card->ap_dev.device_type;
stat->functions = zc->card->functions >> 26;
stat->qid = zq->queue->qid;
stat->online = zq->online ? 0x01 : 0x00;
}
} }
spin_unlock_bh(&zcrypt_device_lock); spin_unlock(&zcrypt_list_lock);
return -ENODEV;
} }
EXPORT_SYMBOL(zcrypt_device_status_mask);
static void zcrypt_status_mask(char status[AP_DEVICES]) static void zcrypt_status_mask(char status[AP_DEVICES])
{ {
struct zcrypt_device *zdev; struct zcrypt_card *zc;
struct zcrypt_queue *zq;
memset(status, 0, sizeof(char) * AP_DEVICES); memset(status, 0, sizeof(char) * AP_DEVICES);
spin_lock_bh(&zcrypt_device_lock); spin_lock(&zcrypt_list_lock);
list_for_each_entry(zdev, &zcrypt_device_list, list) for_each_zcrypt_card(zc) {
status[AP_QID_DEVICE(zdev->ap_dev->qid)] = for_each_zcrypt_queue(zq, zc) {
zdev->online ? zdev->user_space_type : 0x0d; if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
spin_unlock_bh(&zcrypt_device_lock); continue;
status[AP_QID_CARD(zq->queue->qid)] =
zc->online ? zc->user_space_type : 0x0d;
}
}
spin_unlock(&zcrypt_list_lock);
} }
static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES]) static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
{ {
struct zcrypt_device *zdev; struct zcrypt_card *zc;
struct zcrypt_queue *zq;
memset(qdepth, 0, sizeof(char) * AP_DEVICES); memset(qdepth, 0, sizeof(char) * AP_DEVICES);
spin_lock_bh(&zcrypt_device_lock); spin_lock(&zcrypt_list_lock);
list_for_each_entry(zdev, &zcrypt_device_list, list) { for_each_zcrypt_card(zc) {
spin_lock(&zdev->ap_dev->lock); for_each_zcrypt_queue(zq, zc) {
qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] = if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
zdev->ap_dev->pendingq_count + continue;
zdev->ap_dev->requestq_count; spin_lock(&zq->queue->lock);
spin_unlock(&zdev->ap_dev->lock); qdepth[AP_QID_CARD(zq->queue->qid)] =
zq->queue->pendingq_count +
zq->queue->requestq_count;
spin_unlock(&zq->queue->lock);
}
} }
spin_unlock_bh(&zcrypt_device_lock); spin_unlock(&zcrypt_list_lock);
} }
static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES]) static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES])
{ {
struct zcrypt_device *zdev; struct zcrypt_card *zc;
struct zcrypt_queue *zq;
memset(reqcnt, 0, sizeof(int) * AP_DEVICES); memset(reqcnt, 0, sizeof(int) * AP_DEVICES);
spin_lock_bh(&zcrypt_device_lock); spin_lock(&zcrypt_list_lock);
list_for_each_entry(zdev, &zcrypt_device_list, list) { for_each_zcrypt_card(zc) {
spin_lock(&zdev->ap_dev->lock); for_each_zcrypt_queue(zq, zc) {
reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] = if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
zdev->ap_dev->total_request_count; continue;
spin_unlock(&zdev->ap_dev->lock); spin_lock(&zq->queue->lock);
reqcnt[AP_QID_CARD(zq->queue->qid)] =
zq->queue->total_request_count;
spin_unlock(&zq->queue->lock);
}
} }
spin_unlock_bh(&zcrypt_device_lock); spin_unlock(&zcrypt_list_lock);
} }
static int zcrypt_pendingq_count(void) static int zcrypt_pendingq_count(void)
{ {
struct zcrypt_device *zdev; struct zcrypt_card *zc;
int pendingq_count = 0; struct zcrypt_queue *zq;
int pendingq_count;
spin_lock_bh(&zcrypt_device_lock);
list_for_each_entry(zdev, &zcrypt_device_list, list) { pendingq_count = 0;
spin_lock(&zdev->ap_dev->lock); spin_lock(&zcrypt_list_lock);
pendingq_count += zdev->ap_dev->pendingq_count; for_each_zcrypt_card(zc) {
spin_unlock(&zdev->ap_dev->lock); for_each_zcrypt_queue(zq, zc) {
if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
continue;
spin_lock(&zq->queue->lock);
pendingq_count += zq->queue->pendingq_count;
spin_unlock(&zq->queue->lock);
}
} }
spin_unlock_bh(&zcrypt_device_lock); spin_unlock(&zcrypt_list_lock);
return pendingq_count; return pendingq_count;
} }
static int zcrypt_requestq_count(void) static int zcrypt_requestq_count(void)
{ {
struct zcrypt_device *zdev; struct zcrypt_card *zc;
int requestq_count = 0; struct zcrypt_queue *zq;
int requestq_count;
spin_lock_bh(&zcrypt_device_lock);
list_for_each_entry(zdev, &zcrypt_device_list, list) { requestq_count = 0;
spin_lock(&zdev->ap_dev->lock); spin_lock(&zcrypt_list_lock);
requestq_count += zdev->ap_dev->requestq_count; for_each_zcrypt_card(zc) {
spin_unlock(&zdev->ap_dev->lock); for_each_zcrypt_queue(zq, zc) {
if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
continue;
spin_lock(&zq->queue->lock);
requestq_count += zq->queue->requestq_count;
spin_unlock(&zq->queue->lock);
}
} }
spin_unlock_bh(&zcrypt_device_lock); spin_unlock(&zcrypt_list_lock);
return requestq_count; return requestq_count;
} }
static int zcrypt_count_type(int type) static int zcrypt_count_type(int type)
{ {
struct zcrypt_device *zdev; struct zcrypt_card *zc;
int device_count = 0; struct zcrypt_queue *zq;
int device_count;
spin_lock_bh(&zcrypt_device_lock);
list_for_each_entry(zdev, &zcrypt_device_list, list) device_count = 0;
if (zdev->user_space_type == type) spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
if (zc->card->id != type)
continue;
for_each_zcrypt_queue(zq, zc) {
if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
continue;
device_count++; device_count++;
spin_unlock_bh(&zcrypt_device_lock); }
}
spin_unlock(&zcrypt_list_lock);
return device_count; return device_count;
} }
...@@ -887,6 +868,25 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, ...@@ -887,6 +868,25 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT; return -EFAULT;
return rc; return rc;
} }
case ZDEVICESTATUS: {
struct zcrypt_device_matrix *device_status;
device_status = kzalloc(sizeof(struct zcrypt_device_matrix),
GFP_KERNEL);
if (!device_status)
return -ENOMEM;
zcrypt_device_status_mask(device_status);
if (copy_to_user((char __user *) arg, device_status,
sizeof(struct zcrypt_device_matrix))) {
kfree(device_status);
return -EFAULT;
}
kfree(device_status);
return 0;
}
case Z90STAT_STATUS_MASK: { case Z90STAT_STATUS_MASK: {
char status[AP_DEVICES]; char status[AP_DEVICES];
zcrypt_status_mask(status); zcrypt_status_mask(status);
...@@ -1249,29 +1249,36 @@ static int zcrypt_proc_open(struct inode *inode, struct file *file) ...@@ -1249,29 +1249,36 @@ static int zcrypt_proc_open(struct inode *inode, struct file *file)
static void zcrypt_disable_card(int index) static void zcrypt_disable_card(int index)
{ {
struct zcrypt_device *zdev; struct zcrypt_card *zc;
struct zcrypt_queue *zq;
spin_lock_bh(&zcrypt_device_lock); spin_lock(&zcrypt_list_lock);
list_for_each_entry(zdev, &zcrypt_device_list, list) for_each_zcrypt_card(zc) {
if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { for_each_zcrypt_queue(zq, zc) {
zdev->online = 0; if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
ap_flush_queue(zdev->ap_dev); continue;
break; zq->online = 0;
ap_flush_queue(zq->queue);
} }
spin_unlock_bh(&zcrypt_device_lock); }
spin_unlock(&zcrypt_list_lock);
} }
static void zcrypt_enable_card(int index) static void zcrypt_enable_card(int index)
{ {
struct zcrypt_device *zdev; struct zcrypt_card *zc;
struct zcrypt_queue *zq;
spin_lock_bh(&zcrypt_device_lock); spin_lock(&zcrypt_list_lock);
list_for_each_entry(zdev, &zcrypt_device_list, list) for_each_zcrypt_card(zc) {
if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { for_each_zcrypt_queue(zq, zc) {
zdev->online = 1; if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
break; continue;
zq->online = 1;
ap_flush_queue(zq->queue);
} }
spin_unlock_bh(&zcrypt_device_lock); }
spin_unlock(&zcrypt_list_lock);
} }
static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer, static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer,
...@@ -1369,7 +1376,7 @@ static struct hwrng zcrypt_rng_dev = { ...@@ -1369,7 +1376,7 @@ static struct hwrng zcrypt_rng_dev = {
.quality = 990, .quality = 990,
}; };
static int zcrypt_rng_device_add(void) int zcrypt_rng_device_add(void)
{ {
int rc = 0; int rc = 0;
...@@ -1399,7 +1406,7 @@ static int zcrypt_rng_device_add(void) ...@@ -1399,7 +1406,7 @@ static int zcrypt_rng_device_add(void)
return rc; return rc;
} }
static void zcrypt_rng_device_remove(void) void zcrypt_rng_device_remove(void)
{ {
mutex_lock(&zcrypt_rng_mutex); mutex_lock(&zcrypt_rng_mutex);
zcrypt_rng_device_count--; zcrypt_rng_device_count--;
...@@ -1412,24 +1419,19 @@ static void zcrypt_rng_device_remove(void) ...@@ -1412,24 +1419,19 @@ static void zcrypt_rng_device_remove(void)
int __init zcrypt_debug_init(void) int __init zcrypt_debug_init(void)
{ {
debugfs_root = debugfs_create_dir("zcrypt", NULL); zcrypt_dbf_root = debugfs_create_dir("zcrypt", NULL);
zcrypt_dbf_info = debug_register("zcrypt", 1, 1,
zcrypt_dbf_common = debug_register("zcrypt_common", 1, 1, 16); DBF_MAX_SPRINTF_ARGS * sizeof(long));
debug_register_view(zcrypt_dbf_common, &debug_hex_ascii_view); debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
debug_set_level(zcrypt_dbf_common, DBF_ERR); debug_set_level(zcrypt_dbf_info, DBF_ERR);
zcrypt_dbf_devices = debug_register("zcrypt_devices", 1, 1, 16);
debug_register_view(zcrypt_dbf_devices, &debug_hex_ascii_view);
debug_set_level(zcrypt_dbf_devices, DBF_ERR);
return 0; return 0;
} }
void zcrypt_debug_exit(void) void zcrypt_debug_exit(void)
{ {
debugfs_remove(debugfs_root); debugfs_remove(zcrypt_dbf_root);
debug_unregister(zcrypt_dbf_common); debug_unregister(zcrypt_dbf_info);
debug_unregister(zcrypt_dbf_devices);
} }
/** /**
...@@ -1453,12 +1455,15 @@ int __init zcrypt_api_init(void) ...@@ -1453,12 +1455,15 @@ int __init zcrypt_api_init(void)
goto out; goto out;
/* Set up the proc file system */ /* Set up the proc file system */
zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, &zcrypt_proc_fops); zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL,
&zcrypt_proc_fops);
if (!zcrypt_entry) { if (!zcrypt_entry) {
rc = -ENOMEM; rc = -ENOMEM;
goto out_misc; goto out_misc;
} }
zcrypt_msgtype6_init();
zcrypt_msgtype50_init();
return 0; return 0;
out_misc: out_misc:
...@@ -1472,10 +1477,12 @@ int __init zcrypt_api_init(void) ...@@ -1472,10 +1477,12 @@ int __init zcrypt_api_init(void)
* *
* The module termination code. * The module termination code.
*/ */
void zcrypt_api_exit(void) void __exit zcrypt_api_exit(void)
{ {
remove_proc_entry("driver/z90crypt", NULL); remove_proc_entry("driver/z90crypt", NULL);
misc_deregister(&zcrypt_misc_device); misc_deregister(&zcrypt_misc_device);
zcrypt_msgtype6_exit();
zcrypt_msgtype50_exit();
zcrypt_debug_exit(); zcrypt_debug_exit();
} }
......
...@@ -84,57 +84,110 @@ struct ica_z90_status { ...@@ -84,57 +84,110 @@ struct ica_z90_status {
*/ */
#define ZCRYPT_RNG_BUFFER_SIZE 4096 #define ZCRYPT_RNG_BUFFER_SIZE 4096
struct zcrypt_device; /*
* Identifier for Crypto Request Performance Index
*/
enum crypto_ops {
MEX_1K,
MEX_2K,
MEX_4K,
CRT_1K,
CRT_2K,
CRT_4K,
HWRNG,
SECKEY,
NUM_OPS
};
struct zcrypt_queue;
struct zcrypt_ops { struct zcrypt_ops {
long (*rsa_modexpo)(struct zcrypt_device *, struct ica_rsa_modexpo *); long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *);
long (*rsa_modexpo_crt)(struct zcrypt_device *, long (*rsa_modexpo_crt)(struct zcrypt_queue *,
struct ica_rsa_modexpo_crt *); struct ica_rsa_modexpo_crt *);
long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *); long (*send_cprb)(struct zcrypt_queue *, struct ica_xcRB *,
long (*send_ep11_cprb)(struct zcrypt_device *, struct ep11_urb *); struct ap_message *);
long (*rng)(struct zcrypt_device *, char *); long (*send_ep11_cprb)(struct zcrypt_queue *, struct ep11_urb *,
struct ap_message *);
long (*rng)(struct zcrypt_queue *, char *, struct ap_message *);
struct list_head list; /* zcrypt ops list. */ struct list_head list; /* zcrypt ops list. */
struct module *owner; struct module *owner;
int variant; int variant;
char name[128]; char name[128];
}; };
struct zcrypt_device { struct zcrypt_card {
struct list_head list; /* Device list. */ struct list_head list; /* Device list. */
spinlock_t lock; /* Per device lock. */ struct list_head zqueues; /* List of zcrypt queues */
struct kref refcount; /* device refcounting */ struct kref refcount; /* device refcounting */
struct ap_device *ap_dev; /* The "real" ap device. */ struct ap_card *card; /* The "real" ap card device. */
struct zcrypt_ops *ops; /* Crypto operations. */
int online; /* User online/offline */ int online; /* User online/offline */
int user_space_type; /* User space device id. */ int user_space_type; /* User space device id. */
char *type_string; /* User space device name. */ char *type_string; /* User space device name. */
int min_mod_size; /* Min number of bits. */ int min_mod_size; /* Min number of bits. */
int max_mod_size; /* Max number of bits. */ int max_mod_size; /* Max number of bits. */
int short_crt; /* Card has crt length restriction. */ int max_exp_bit_length;
int speed_rating; /* Speed of the crypto device. */ int speed_rating[NUM_OPS]; /* Speed idx of crypto ops. */
atomic_t load; /* Utilization of the crypto device */
int request_count; /* # current requests. */ int request_count; /* # current requests. */
};
struct ap_message reply; /* Per-device reply structure. */ struct zcrypt_queue {
int max_exp_bit_length; struct list_head list; /* Device list. */
struct kref refcount; /* device refcounting */
struct zcrypt_card *zcard;
struct zcrypt_ops *ops; /* Crypto operations. */
struct ap_queue *queue; /* The "real" ap queue device. */
int online; /* User online/offline */
atomic_t load; /* Utilization of the crypto device */
debug_info_t *dbf_area; /* debugging */ int request_count; /* # current requests. */
struct ap_message reply; /* Per-device reply structure. */
}; };
/* transport layer rescanning */ /* transport layer rescanning */
extern atomic_t zcrypt_rescan_req; extern atomic_t zcrypt_rescan_req;
struct zcrypt_device *zcrypt_device_alloc(size_t); extern spinlock_t zcrypt_list_lock;
void zcrypt_device_free(struct zcrypt_device *); extern int zcrypt_device_count;
void zcrypt_device_get(struct zcrypt_device *); extern struct list_head zcrypt_card_list;
int zcrypt_device_put(struct zcrypt_device *);
int zcrypt_device_register(struct zcrypt_device *); #define for_each_zcrypt_card(_zc) \
void zcrypt_device_unregister(struct zcrypt_device *); list_for_each_entry(_zc, &zcrypt_card_list, list)
#define for_each_zcrypt_queue(_zq, _zc) \
list_for_each_entry(_zq, &(_zc)->zqueues, list)
struct zcrypt_card *zcrypt_card_alloc(void);
void zcrypt_card_free(struct zcrypt_card *);
void zcrypt_card_get(struct zcrypt_card *);
int zcrypt_card_put(struct zcrypt_card *);
int zcrypt_card_register(struct zcrypt_card *);
void zcrypt_card_unregister(struct zcrypt_card *);
struct zcrypt_card *zcrypt_card_get_best(unsigned int *,
unsigned int, unsigned int);
void zcrypt_card_put_best(struct zcrypt_card *, unsigned int);
struct zcrypt_queue *zcrypt_queue_alloc(size_t);
void zcrypt_queue_free(struct zcrypt_queue *);
void zcrypt_queue_get(struct zcrypt_queue *);
int zcrypt_queue_put(struct zcrypt_queue *);
int zcrypt_queue_register(struct zcrypt_queue *);
void zcrypt_queue_unregister(struct zcrypt_queue *);
void zcrypt_queue_force_online(struct zcrypt_queue *, int);
struct zcrypt_queue *zcrypt_queue_get_best(unsigned int, unsigned int);
void zcrypt_queue_put_best(struct zcrypt_queue *, unsigned int);
int zcrypt_rng_device_add(void);
void zcrypt_rng_device_remove(void);
void zcrypt_msgtype_register(struct zcrypt_ops *); void zcrypt_msgtype_register(struct zcrypt_ops *);
void zcrypt_msgtype_unregister(struct zcrypt_ops *); void zcrypt_msgtype_unregister(struct zcrypt_ops *);
struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *, int); struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
void zcrypt_msgtype_release(struct zcrypt_ops *);
int zcrypt_api_init(void); int zcrypt_api_init(void);
void zcrypt_api_exit(void); void zcrypt_api_exit(void);
......
/*
* zcrypt 2.1.0
*
* Copyright IBM Corp. 2001, 2012
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
* Cornelia Huck <cornelia.huck@de.ibm.com>
*
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
#include <linux/hw_random.h>
#include <linux/debugfs.h>
#include <asm/debug.h>
#include "zcrypt_debug.h"
#include "zcrypt_api.h"
#include "zcrypt_msgtype6.h"
#include "zcrypt_msgtype50.h"
/*
* Device attributes common for all crypto card devices.
*/
static ssize_t zcrypt_card_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct zcrypt_card *zc = to_ap_card(dev)->private;
return snprintf(buf, PAGE_SIZE, "%s\n", zc->type_string);
}
static DEVICE_ATTR(type, 0444, zcrypt_card_type_show, NULL);
static ssize_t zcrypt_card_online_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zcrypt_card *zc = to_ap_card(dev)->private;
return snprintf(buf, PAGE_SIZE, "%d\n", zc->online);
}
static ssize_t zcrypt_card_online_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zcrypt_card *zc = to_ap_card(dev)->private;
struct zcrypt_queue *zq;
int online, id;
if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
return -EINVAL;
zc->online = online;
id = zc->card->id;
ZCRYPT_DBF(DBF_INFO, "card=%02x online=%d\n", id, online);
spin_lock(&zcrypt_list_lock);
list_for_each_entry(zq, &zc->zqueues, list)
zcrypt_queue_force_online(zq, online);
spin_unlock(&zcrypt_list_lock);
return count;
}
static DEVICE_ATTR(online, 0644, zcrypt_card_online_show,
zcrypt_card_online_store);
static struct attribute *zcrypt_card_attrs[] = {
&dev_attr_type.attr,
&dev_attr_online.attr,
NULL,
};
static struct attribute_group zcrypt_card_attr_group = {
.attrs = zcrypt_card_attrs,
};
struct zcrypt_card *zcrypt_card_alloc(void)
{
struct zcrypt_card *zc;
zc = kzalloc(sizeof(struct zcrypt_card), GFP_KERNEL);
if (!zc)
return NULL;
INIT_LIST_HEAD(&zc->list);
INIT_LIST_HEAD(&zc->zqueues);
kref_init(&zc->refcount);
return zc;
}
EXPORT_SYMBOL(zcrypt_card_alloc);
void zcrypt_card_free(struct zcrypt_card *zc)
{
kfree(zc);
}
EXPORT_SYMBOL(zcrypt_card_free);
static void zcrypt_card_release(struct kref *kref)
{
struct zcrypt_card *zdev =
container_of(kref, struct zcrypt_card, refcount);
zcrypt_card_free(zdev);
}
void zcrypt_card_get(struct zcrypt_card *zc)
{
kref_get(&zc->refcount);
}
EXPORT_SYMBOL(zcrypt_card_get);
int zcrypt_card_put(struct zcrypt_card *zc)
{
return kref_put(&zc->refcount, zcrypt_card_release);
}
EXPORT_SYMBOL(zcrypt_card_put);
/**
* zcrypt_card_register() - Register a crypto card device.
* @zc: Pointer to a crypto card device
*
* Register a crypto card device. Returns 0 if successful.
*/
int zcrypt_card_register(struct zcrypt_card *zc)
{
int rc;
rc = sysfs_create_group(&zc->card->ap_dev.device.kobj,
&zcrypt_card_attr_group);
if (rc)
return rc;
spin_lock(&zcrypt_list_lock);
list_add_tail(&zc->list, &zcrypt_card_list);
spin_unlock(&zcrypt_list_lock);
zc->online = 1;
ZCRYPT_DBF(DBF_INFO, "card=%02x register online=1\n", zc->card->id);
return rc;
}
EXPORT_SYMBOL(zcrypt_card_register);
/**
* zcrypt_card_unregister(): Unregister a crypto card device.
* @zc: Pointer to crypto card device
*
* Unregister a crypto card device.
*/
void zcrypt_card_unregister(struct zcrypt_card *zc)
{
ZCRYPT_DBF(DBF_INFO, "card=%02x unregister\n", zc->card->id);
spin_lock(&zcrypt_list_lock);
list_del_init(&zc->list);
spin_unlock(&zcrypt_list_lock);
sysfs_remove_group(&zc->card->ap_dev.device.kobj,
&zcrypt_card_attr_group);
}
EXPORT_SYMBOL(zcrypt_card_unregister);
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <linux/mod_devicetable.h>
#include "ap_bus.h" #include "ap_bus.h"
#include "zcrypt_api.h" #include "zcrypt_api.h"
...@@ -43,9 +44,6 @@ ...@@ -43,9 +44,6 @@
#define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE #define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE
#define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */ #define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */
#define CEX2A_SPEED_RATING 970
#define CEX3A_SPEED_RATING 900 /* Fixme: Needs finetuning */
#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */ #define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */ #define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
...@@ -57,107 +55,195 @@ ...@@ -57,107 +55,195 @@
#define CEX2A_CLEANUP_TIME (15*HZ) #define CEX2A_CLEANUP_TIME (15*HZ)
#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME #define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME
static struct ap_device_id zcrypt_cex2a_ids[] = {
{ AP_DEVICE(AP_DEVICE_TYPE_CEX2A) },
{ AP_DEVICE(AP_DEVICE_TYPE_CEX3A) },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids);
MODULE_AUTHOR("IBM Corporation"); MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " \ MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " \
"Copyright IBM Corp. 2001, 2012"); "Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
static int zcrypt_cex2a_probe(struct ap_device *ap_dev); static struct ap_device_id zcrypt_cex2a_card_ids[] = {
static void zcrypt_cex2a_remove(struct ap_device *ap_dev); { .dev_type = AP_DEVICE_TYPE_CEX2A,
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX3A,
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_card_ids);
static struct ap_driver zcrypt_cex2a_driver = { static struct ap_device_id zcrypt_cex2a_queue_ids[] = {
.probe = zcrypt_cex2a_probe, { .dev_type = AP_DEVICE_TYPE_CEX2A,
.remove = zcrypt_cex2a_remove, .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
.ids = zcrypt_cex2a_ids, { .dev_type = AP_DEVICE_TYPE_CEX3A,
.request_timeout = CEX2A_CLEANUP_TIME, .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ /* end of list */ },
}; };
MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_queue_ids);
/** /**
* Probe function for CEX2A cards. It always accepts the AP device * Probe function for CEX2A card devices. It always accepts the AP device
* since the bus_match already checked the hardware type. * since the bus_match already checked the card type.
* @ap_dev: pointer to the AP device. * @ap_dev: pointer to the AP device.
*/ */
static int zcrypt_cex2a_probe(struct ap_device *ap_dev) static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
{ {
struct zcrypt_device *zdev = NULL; /*
* Normalized speed ratings per crypto adapter
* MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
*/
static const int CEX2A_SPEED_IDX[] = {
800, 1000, 2000, 900, 1200, 2400, 0, 0};
static const int CEX3A_SPEED_IDX[] = {
400, 500, 1000, 450, 550, 1200, 0, 0};
struct ap_card *ac = to_ap_card(&ap_dev->device);
struct zcrypt_card *zc;
int rc = 0; int rc = 0;
zc = zcrypt_card_alloc();
if (!zc)
return -ENOMEM;
zc->card = ac;
ac->private = zc;
if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) {
zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
zc->max_mod_size = CEX2A_MAX_MOD_SIZE;
memcpy(zc->speed_rating, CEX2A_SPEED_IDX,
sizeof(CEX2A_SPEED_IDX));
zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
zc->type_string = "CEX2A";
zc->user_space_type = ZCRYPT_CEX2A;
} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX3A) {
zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
zc->max_mod_size = CEX2A_MAX_MOD_SIZE;
zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) {
zc->max_mod_size = CEX3A_MAX_MOD_SIZE;
zc->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
}
memcpy(zc->speed_rating, CEX3A_SPEED_IDX,
sizeof(CEX3A_SPEED_IDX));
zc->type_string = "CEX3A";
zc->user_space_type = ZCRYPT_CEX3A;
} else {
zcrypt_card_free(zc);
return -ENODEV;
}
zc->online = 1;
rc = zcrypt_card_register(zc);
if (rc) {
ac->private = NULL;
zcrypt_card_free(zc);
}
return rc;
}
/**
* This is called to remove the CEX2A card driver information
* if an AP card device is removed.
*/
static void zcrypt_cex2a_card_remove(struct ap_device *ap_dev)
{
struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
if (zc)
zcrypt_card_unregister(zc);
}
static struct ap_driver zcrypt_cex2a_card_driver = {
.probe = zcrypt_cex2a_card_probe,
.remove = zcrypt_cex2a_card_remove,
.ids = zcrypt_cex2a_card_ids,
};
/**
* Probe function for CEX2A queue devices. It always accepts the AP device
* since the bus_match already checked the queue type.
* @ap_dev: pointer to the AP device.
*/
static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
{
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
struct zcrypt_queue *zq = NULL;
int rc;
switch (ap_dev->device_type) { switch (ap_dev->device_type) {
case AP_DEVICE_TYPE_CEX2A: case AP_DEVICE_TYPE_CEX2A:
zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE); zq = zcrypt_queue_alloc(CEX2A_MAX_RESPONSE_SIZE);
if (!zdev) if (!zq)
return -ENOMEM; return -ENOMEM;
zdev->user_space_type = ZCRYPT_CEX2A;
zdev->type_string = "CEX2A";
zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
zdev->short_crt = 1;
zdev->speed_rating = CEX2A_SPEED_RATING;
zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
break; break;
case AP_DEVICE_TYPE_CEX3A: case AP_DEVICE_TYPE_CEX3A:
zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE); zq = zcrypt_queue_alloc(CEX3A_MAX_RESPONSE_SIZE);
if (!zdev) if (!zq)
return -ENOMEM; return -ENOMEM;
zdev->user_space_type = ZCRYPT_CEX3A;
zdev->type_string = "CEX3A";
zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) &&
ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) {
zdev->max_mod_size = CEX3A_MAX_MOD_SIZE;
zdev->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
}
zdev->short_crt = 1;
zdev->speed_rating = CEX3A_SPEED_RATING;
break; break;
} }
if (!zdev) if (!zq)
return -ENODEV; return -ENODEV;
zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME, zq->ops = zcrypt_msgtype(MSGTYPE50_NAME, MSGTYPE50_VARIANT_DEFAULT);
MSGTYPE50_VARIANT_DEFAULT); zq->queue = aq;
zdev->ap_dev = ap_dev; zq->online = 1;
zdev->online = 1; atomic_set(&zq->load, 0);
ap_device_init_reply(ap_dev, &zdev->reply); ap_queue_init_reply(aq, &zq->reply);
ap_dev->private = zdev; aq->request_timeout = CEX2A_CLEANUP_TIME,
rc = zcrypt_device_register(zdev); aq->private = zq;
rc = zcrypt_queue_register(zq);
if (rc) { if (rc) {
ap_dev->private = NULL; aq->private = NULL;
zcrypt_msgtype_release(zdev->ops); zcrypt_queue_free(zq);
zcrypt_device_free(zdev);
} }
return rc; return rc;
} }
/** /**
* This is called to remove the extended CEX2A driver information * This is called to remove the CEX2A queue driver information
* if an AP device is removed. * if an AP queue device is removed.
*/ */
static void zcrypt_cex2a_remove(struct ap_device *ap_dev) static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
{ {
struct zcrypt_device *zdev = ap_dev->private; struct ap_queue *aq = to_ap_queue(&ap_dev->device);
struct zcrypt_ops *zops = zdev->ops; struct zcrypt_queue *zq = aq->private;
zcrypt_device_unregister(zdev); ap_queue_remove(aq);
zcrypt_msgtype_release(zops); if (zq)
zcrypt_queue_unregister(zq);
} }
static struct ap_driver zcrypt_cex2a_queue_driver = {
.probe = zcrypt_cex2a_queue_probe,
.remove = zcrypt_cex2a_queue_remove,
.suspend = ap_queue_suspend,
.resume = ap_queue_resume,
.ids = zcrypt_cex2a_queue_ids,
};
int __init zcrypt_cex2a_init(void) int __init zcrypt_cex2a_init(void)
{ {
return ap_driver_register(&zcrypt_cex2a_driver, THIS_MODULE, "cex2a"); int rc;
rc = ap_driver_register(&zcrypt_cex2a_card_driver,
THIS_MODULE, "cex2acard");
if (rc)
return rc;
rc = ap_driver_register(&zcrypt_cex2a_queue_driver,
THIS_MODULE, "cex2aqueue");
if (rc)
ap_driver_unregister(&zcrypt_cex2a_card_driver);
return rc;
} }
void __exit zcrypt_cex2a_exit(void) void __exit zcrypt_cex2a_exit(void)
{ {
ap_driver_unregister(&zcrypt_cex2a_driver); ap_driver_unregister(&zcrypt_cex2a_queue_driver);
ap_driver_unregister(&zcrypt_cex2a_card_driver);
} }
module_init(zcrypt_cex2a_init); module_init(zcrypt_cex2a_init);
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/mod_devicetable.h>
#include "ap_bus.h" #include "ap_bus.h"
#include "zcrypt_api.h" #include "zcrypt_api.h"
...@@ -24,13 +25,6 @@ ...@@ -24,13 +25,6 @@
#define CEX4C_MIN_MOD_SIZE 16 /* 256 bits */ #define CEX4C_MIN_MOD_SIZE 16 /* 256 bits */
#define CEX4C_MAX_MOD_SIZE 512 /* 4096 bits */ #define CEX4C_MAX_MOD_SIZE 512 /* 4096 bits */
#define CEX4A_SPEED_RATING 900 /* TODO new card, new speed rating */
#define CEX4C_SPEED_RATING 6500 /* TODO new card, new speed rating */
#define CEX4P_SPEED_RATING 7000 /* TODO new card, new speed rating */
#define CEX5A_SPEED_RATING 450 /* TODO new card, new speed rating */
#define CEX5C_SPEED_RATING 3250 /* TODO new card, new speed rating */
#define CEX5P_SPEED_RATING 3500 /* TODO new card, new speed rating */
#define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE #define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE
#define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE #define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE
...@@ -41,147 +35,246 @@ ...@@ -41,147 +35,246 @@
*/ */
#define CEX4_CLEANUP_TIME (900*HZ) #define CEX4_CLEANUP_TIME (900*HZ)
static struct ap_device_id zcrypt_cex4_ids[] = {
{ AP_DEVICE(AP_DEVICE_TYPE_CEX4) },
{ AP_DEVICE(AP_DEVICE_TYPE_CEX5) },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(ap, zcrypt_cex4_ids);
MODULE_AUTHOR("IBM Corporation"); MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("CEX4 Cryptographic Card device driver, " \ MODULE_DESCRIPTION("CEX4 Cryptographic Card device driver, " \
"Copyright IBM Corp. 2012"); "Copyright IBM Corp. 2012");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
static int zcrypt_cex4_probe(struct ap_device *ap_dev); static struct ap_device_id zcrypt_cex4_card_ids[] = {
static void zcrypt_cex4_remove(struct ap_device *ap_dev); { .dev_type = AP_DEVICE_TYPE_CEX4,
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX5,
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
{ /* end of list */ },
};
static struct ap_driver zcrypt_cex4_driver = { MODULE_DEVICE_TABLE(ap, zcrypt_cex4_card_ids);
.probe = zcrypt_cex4_probe,
.remove = zcrypt_cex4_remove, static struct ap_device_id zcrypt_cex4_queue_ids[] = {
.ids = zcrypt_cex4_ids, { .dev_type = AP_DEVICE_TYPE_CEX4,
.request_timeout = CEX4_CLEANUP_TIME, .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX5,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ /* end of list */ },
}; };
MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids);
/** /**
* Probe function for CEX4 cards. It always accepts the AP device * Probe function for CEX4 card device. It always accepts the AP device
* since the bus_match already checked the hardware type. * since the bus_match already checked the hardware type.
* @ap_dev: pointer to the AP device. * @ap_dev: pointer to the AP device.
*/ */
static int zcrypt_cex4_probe(struct ap_device *ap_dev) static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
{ {
struct zcrypt_device *zdev = NULL; /*
* Normalized speed ratings per crypto adapter
* MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
*/
static const int CEX4A_SPEED_IDX[] = {
5, 6, 59, 20, 115, 581, 0, 0};
static const int CEX5A_SPEED_IDX[] = {
3, 3, 6, 8, 32, 218, 0, 0};
static const int CEX4C_SPEED_IDX[] = {
24, 25, 82, 41, 138, 1111, 79, 8};
static const int CEX5C_SPEED_IDX[] = {
10, 14, 23, 17, 45, 242, 63, 4};
static const int CEX4P_SPEED_IDX[] = {
142, 198, 1852, 203, 331, 1563, 0, 8};
static const int CEX5P_SPEED_IDX[] = {
49, 67, 131, 52, 85, 287, 0, 4};
struct ap_card *ac = to_ap_card(&ap_dev->device);
struct zcrypt_card *zc;
int rc = 0; int rc = 0;
switch (ap_dev->device_type) { zc = zcrypt_card_alloc();
case AP_DEVICE_TYPE_CEX4: if (!zc)
case AP_DEVICE_TYPE_CEX5: return -ENOMEM;
if (ap_test_bit(&ap_dev->functions, AP_FUNC_ACCEL)) { zc->card = ac;
zdev = zcrypt_device_alloc(CEX4A_MAX_MESSAGE_SIZE); ac->private = zc;
if (!zdev) if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL)) {
return -ENOMEM; if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) { zc->type_string = "CEX4A";
zdev->type_string = "CEX4A"; zc->user_space_type = ZCRYPT_CEX4;
zdev->speed_rating = CEX4A_SPEED_RATING; memcpy(zc->speed_rating, CEX4A_SPEED_IDX,
} else { sizeof(CEX4A_SPEED_IDX));
zdev->type_string = "CEX5A"; } else {
zdev->speed_rating = CEX5A_SPEED_RATING; zc->type_string = "CEX5A";
} zc->user_space_type = ZCRYPT_CEX5;
zdev->user_space_type = ZCRYPT_CEX3A; memcpy(zc->speed_rating, CEX5A_SPEED_IDX,
zdev->min_mod_size = CEX4A_MIN_MOD_SIZE; sizeof(CEX5A_SPEED_IDX));
if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) &&
ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) {
zdev->max_mod_size =
CEX4A_MAX_MOD_SIZE_4K;
zdev->max_exp_bit_length =
CEX4A_MAX_MOD_SIZE_4K;
} else {
zdev->max_mod_size =
CEX4A_MAX_MOD_SIZE_2K;
zdev->max_exp_bit_length =
CEX4A_MAX_MOD_SIZE_2K;
}
zdev->short_crt = 1;
zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME,
MSGTYPE50_VARIANT_DEFAULT);
} else if (ap_test_bit(&ap_dev->functions, AP_FUNC_COPRO)) {
zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE);
if (!zdev)
return -ENOMEM;
if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) {
zdev->type_string = "CEX4C";
zdev->speed_rating = CEX4C_SPEED_RATING;
} else {
zdev->type_string = "CEX5C";
zdev->speed_rating = CEX5C_SPEED_RATING;
}
zdev->user_space_type = ZCRYPT_CEX3C;
zdev->min_mod_size = CEX4C_MIN_MOD_SIZE;
zdev->max_mod_size = CEX4C_MAX_MOD_SIZE;
zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
zdev->short_crt = 0;
zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_DEFAULT);
} else if (ap_test_bit(&ap_dev->functions, AP_FUNC_EP11)) {
zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE);
if (!zdev)
return -ENOMEM;
if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) {
zdev->type_string = "CEX4P";
zdev->speed_rating = CEX4P_SPEED_RATING;
} else {
zdev->type_string = "CEX5P";
zdev->speed_rating = CEX5P_SPEED_RATING;
}
zdev->user_space_type = ZCRYPT_CEX4;
zdev->min_mod_size = CEX4C_MIN_MOD_SIZE;
zdev->max_mod_size = CEX4C_MAX_MOD_SIZE;
zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
zdev->short_crt = 0;
zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_EP11);
} }
break; zc->min_mod_size = CEX4A_MIN_MOD_SIZE;
} if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
if (!zdev) ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) {
zc->max_mod_size = CEX4A_MAX_MOD_SIZE_4K;
zc->max_exp_bit_length =
CEX4A_MAX_MOD_SIZE_4K;
} else {
zc->max_mod_size = CEX4A_MAX_MOD_SIZE_2K;
zc->max_exp_bit_length =
CEX4A_MAX_MOD_SIZE_2K;
}
} else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
zc->type_string = "CEX4C";
/* wrong user space type, must be CEX4
* just keep it for cca compatibility
*/
zc->user_space_type = ZCRYPT_CEX3C;
memcpy(zc->speed_rating, CEX4C_SPEED_IDX,
sizeof(CEX4C_SPEED_IDX));
} else {
zc->type_string = "CEX5C";
/* wrong user space type, must be CEX5
* just keep it for cca compatibility
*/
zc->user_space_type = ZCRYPT_CEX3C;
memcpy(zc->speed_rating, CEX5C_SPEED_IDX,
sizeof(CEX5C_SPEED_IDX));
}
zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
} else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
zc->type_string = "CEX4P";
zc->user_space_type = ZCRYPT_CEX4;
memcpy(zc->speed_rating, CEX4P_SPEED_IDX,
sizeof(CEX4P_SPEED_IDX));
} else {
zc->type_string = "CEX5P";
zc->user_space_type = ZCRYPT_CEX5;
memcpy(zc->speed_rating, CEX5P_SPEED_IDX,
sizeof(CEX5P_SPEED_IDX));
}
zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
} else {
zcrypt_card_free(zc);
return -ENODEV; return -ENODEV;
zdev->ap_dev = ap_dev; }
zdev->online = 1; zc->online = 1;
ap_device_init_reply(ap_dev, &zdev->reply);
ap_dev->private = zdev; rc = zcrypt_card_register(zc);
rc = zcrypt_device_register(zdev);
if (rc) { if (rc) {
zcrypt_msgtype_release(zdev->ops); ac->private = NULL;
ap_dev->private = NULL; zcrypt_card_free(zc);
zcrypt_device_free(zdev);
} }
return rc; return rc;
} }
/** /**
* This is called to remove the extended CEX4 driver information * This is called to remove the CEX4 card driver information
* if an AP device is removed. * if an AP card device is removed.
*/
static void zcrypt_cex4_card_remove(struct ap_device *ap_dev)
{
struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
if (zc)
zcrypt_card_unregister(zc);
}
static struct ap_driver zcrypt_cex4_card_driver = {
.probe = zcrypt_cex4_card_probe,
.remove = zcrypt_cex4_card_remove,
.ids = zcrypt_cex4_card_ids,
};
/**
* Probe function for CEX4 queue device. It always accepts the AP device
* since the bus_match already checked the hardware type.
* @ap_dev: pointer to the AP device.
*/ */
static void zcrypt_cex4_remove(struct ap_device *ap_dev) static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
{ {
struct zcrypt_device *zdev = ap_dev->private; struct ap_queue *aq = to_ap_queue(&ap_dev->device);
struct zcrypt_ops *zops; struct zcrypt_queue *zq;
int rc;
if (zdev) { if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL)) {
zops = zdev->ops; zq = zcrypt_queue_alloc(CEX4A_MAX_MESSAGE_SIZE);
zcrypt_device_unregister(zdev); if (!zq)
zcrypt_msgtype_release(zops); return -ENOMEM;
zq->ops = zcrypt_msgtype(MSGTYPE50_NAME,
MSGTYPE50_VARIANT_DEFAULT);
} else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE);
if (!zq)
return -ENOMEM;
zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_DEFAULT);
} else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE);
if (!zq)
return -ENOMEM;
zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_EP11);
} else {
return -ENODEV;
} }
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX4_CLEANUP_TIME,
aq->private = zq;
rc = zcrypt_queue_register(zq);
if (rc) {
aq->private = NULL;
zcrypt_queue_free(zq);
}
return rc;
}
/**
* This is called to remove the CEX4 queue driver information
* if an AP queue device is removed.
*/
static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
{
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
struct zcrypt_queue *zq = aq->private;
ap_queue_remove(aq);
if (zq)
zcrypt_queue_unregister(zq);
} }
static struct ap_driver zcrypt_cex4_queue_driver = {
.probe = zcrypt_cex4_queue_probe,
.remove = zcrypt_cex4_queue_remove,
.suspend = ap_queue_suspend,
.resume = ap_queue_resume,
.ids = zcrypt_cex4_queue_ids,
};
int __init zcrypt_cex4_init(void) int __init zcrypt_cex4_init(void)
{ {
return ap_driver_register(&zcrypt_cex4_driver, THIS_MODULE, "cex4"); int rc;
rc = ap_driver_register(&zcrypt_cex4_card_driver,
THIS_MODULE, "cex4card");
if (rc)
return rc;
rc = ap_driver_register(&zcrypt_cex4_queue_driver,
THIS_MODULE, "cex4queue");
if (rc)
ap_driver_unregister(&zcrypt_cex4_card_driver);
return rc;
} }
void __exit zcrypt_cex4_exit(void) void __exit zcrypt_cex4_exit(void)
{ {
ap_driver_unregister(&zcrypt_cex4_driver); ap_driver_unregister(&zcrypt_cex4_queue_driver);
ap_driver_unregister(&zcrypt_cex4_card_driver);
} }
module_init(zcrypt_cex4_init); module_init(zcrypt_cex4_init);
......
/* /*
* Copyright IBM Corp. 2012 * Copyright IBM Corp. 2016
* Author(s): Holger Dengler (hd@linux.vnet.ibm.com) * Author(s): Holger Dengler (hd@linux.vnet.ibm.com)
* Harald Freudenberger <freude@de.ibm.com>
*/ */
#ifndef ZCRYPT_DEBUG_H #ifndef ZCRYPT_DEBUG_H
#define ZCRYPT_DEBUG_H #define ZCRYPT_DEBUG_H
#include <asm/debug.h> #include <asm/debug.h>
#include "zcrypt_api.h"
/* that gives us 15 characters in the text event views */ #define DBF_ERR 3 /* error conditions */
#define ZCRYPT_DBF_LEN 16 #define DBF_WARN 4 /* warning conditions */
#define DBF_INFO 5 /* informational */
#define DBF_ERR 3 /* error conditions */ #define DBF_DEBUG 6 /* for debugging only */
#define DBF_WARN 4 /* warning conditions */
#define DBF_INFO 6 /* informational */
#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO) #define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
#define ZCRYPT_DBF_COMMON(level, text...) \ #define DBF_MAX_SPRINTF_ARGS 5
do { \
if (debug_level_enabled(zcrypt_dbf_common, level)) { \ #define ZCRYPT_DBF(...) \
char debug_buffer[ZCRYPT_DBF_LEN]; \ debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__)
snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
debug_text_event(zcrypt_dbf_common, level, \ extern debug_info_t *zcrypt_dbf_info;
debug_buffer); \
} \
} while (0)
#define ZCRYPT_DBF_DEVICES(level, text...) \
do { \
if (debug_level_enabled(zcrypt_dbf_devices, level)) { \
char debug_buffer[ZCRYPT_DBF_LEN]; \
snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
debug_text_event(zcrypt_dbf_devices, level, \
debug_buffer); \
} \
} while (0)
#define ZCRYPT_DBF_DEV(level, device, text...) \
do { \
if (debug_level_enabled(device->dbf_area, level)) { \
char debug_buffer[ZCRYPT_DBF_LEN]; \
snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
debug_text_event(device->dbf_area, level, \
debug_buffer); \
} \
} while (0)
int zcrypt_debug_init(void); int zcrypt_debug_init(void);
void zcrypt_debug_exit(void); void zcrypt_debug_exit(void);
......
...@@ -55,52 +55,61 @@ struct error_hdr { ...@@ -55,52 +55,61 @@ struct error_hdr {
#define TYPE82_RSP_CODE 0x82 #define TYPE82_RSP_CODE 0x82
#define TYPE88_RSP_CODE 0x88 #define TYPE88_RSP_CODE 0x88
#define REP82_ERROR_MACHINE_FAILURE 0x10 #define REP82_ERROR_MACHINE_FAILURE 0x10
#define REP82_ERROR_PREEMPT_FAILURE 0x12 #define REP82_ERROR_PREEMPT_FAILURE 0x12
#define REP82_ERROR_CHECKPT_FAILURE 0x14 #define REP82_ERROR_CHECKPT_FAILURE 0x14
#define REP82_ERROR_MESSAGE_TYPE 0x20 #define REP82_ERROR_MESSAGE_TYPE 0x20
#define REP82_ERROR_INVALID_COMM_CD 0x21 /* Type 84 */ #define REP82_ERROR_INVALID_COMM_CD 0x21 /* Type 84 */
#define REP82_ERROR_INVALID_MSG_LEN 0x23 #define REP82_ERROR_INVALID_MSG_LEN 0x23
#define REP82_ERROR_RESERVD_FIELD 0x24 /* was 0x50 */ #define REP82_ERROR_RESERVD_FIELD 0x24 /* was 0x50 */
#define REP82_ERROR_FORMAT_FIELD 0x29 #define REP82_ERROR_FORMAT_FIELD 0x29
#define REP82_ERROR_INVALID_COMMAND 0x30 #define REP82_ERROR_INVALID_COMMAND 0x30
#define REP82_ERROR_MALFORMED_MSG 0x40 #define REP82_ERROR_MALFORMED_MSG 0x40
#define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */ #define REP82_ERROR_INVALID_DOMAIN_PRECHECK 0x42
#define REP82_ERROR_WORD_ALIGNMENT 0x60 #define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */
#define REP82_ERROR_MESSAGE_LENGTH 0x80 #define REP82_ERROR_WORD_ALIGNMENT 0x60
#define REP82_ERROR_OPERAND_INVALID 0x82 #define REP82_ERROR_MESSAGE_LENGTH 0x80
#define REP82_ERROR_OPERAND_SIZE 0x84 #define REP82_ERROR_OPERAND_INVALID 0x82
#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85 #define REP82_ERROR_OPERAND_SIZE 0x84
#define REP82_ERROR_RESERVED_FIELD 0x88 #define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
#define REP82_ERROR_TRANSPORT_FAIL 0x90 #define REP82_ERROR_RESERVED_FIELD 0x88
#define REP82_ERROR_PACKET_TRUNCATED 0xA0 #define REP82_ERROR_INVALID_DOMAIN_PENDING 0x8A
#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0 #define REP82_ERROR_TRANSPORT_FAIL 0x90
#define REP82_ERROR_PACKET_TRUNCATED 0xA0
#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
#define REP88_ERROR_MODULE_FAILURE 0x10 #define REP88_ERROR_MODULE_FAILURE 0x10
#define REP88_ERROR_MESSAGE_TYPE 0x20 #define REP88_ERROR_MESSAGE_TYPE 0x20
#define REP88_ERROR_MESSAGE_MALFORMD 0x22 #define REP88_ERROR_MESSAGE_MALFORMD 0x22
#define REP88_ERROR_MESSAGE_LENGTH 0x23 #define REP88_ERROR_MESSAGE_LENGTH 0x23
#define REP88_ERROR_RESERVED_FIELD 0x24 #define REP88_ERROR_RESERVED_FIELD 0x24
#define REP88_ERROR_KEY_TYPE 0x34 #define REP88_ERROR_KEY_TYPE 0x34
#define REP88_ERROR_INVALID_KEY 0x82 /* CEX2A */ #define REP88_ERROR_INVALID_KEY 0x82 /* CEX2A */
#define REP88_ERROR_OPERAND 0x84 /* CEX2A */ #define REP88_ERROR_OPERAND 0x84 /* CEX2A */
#define REP88_ERROR_OPERAND_EVEN_MOD 0x85 /* CEX2A */ #define REP88_ERROR_OPERAND_EVEN_MOD 0x85 /* CEX2A */
static inline int convert_error(struct zcrypt_device *zdev, static inline int convert_error(struct zcrypt_queue *zq,
struct ap_message *reply) struct ap_message *reply)
{ {
struct error_hdr *ehdr = reply->message; struct error_hdr *ehdr = reply->message;
int card = AP_QID_CARD(zq->queue->qid);
int queue = AP_QID_QUEUE(zq->queue->qid);
switch (ehdr->reply_code) { switch (ehdr->reply_code) {
case REP82_ERROR_OPERAND_INVALID: case REP82_ERROR_OPERAND_INVALID:
case REP82_ERROR_OPERAND_SIZE: case REP82_ERROR_OPERAND_SIZE:
case REP82_ERROR_EVEN_MOD_IN_OPND: case REP82_ERROR_EVEN_MOD_IN_OPND:
case REP88_ERROR_MESSAGE_MALFORMD: case REP88_ERROR_MESSAGE_MALFORMD:
case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
case REP82_ERROR_INVALID_DOMAIN_PENDING:
// REP88_ERROR_INVALID_KEY // '82' CEX2A // REP88_ERROR_INVALID_KEY // '82' CEX2A
// REP88_ERROR_OPERAND // '84' CEX2A // REP88_ERROR_OPERAND // '84' CEX2A
// REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A // REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
/* Invalid input data. */ /* Invalid input data. */
ZCRYPT_DBF(DBF_WARN,
"device=%02x.%04x reply=0x%02x => rc=EINVAL\n",
card, queue, ehdr->reply_code);
return -EINVAL; return -EINVAL;
case REP82_ERROR_MESSAGE_TYPE: case REP82_ERROR_MESSAGE_TYPE:
// REP88_ERROR_MESSAGE_TYPE // '20' CEX2A // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A
...@@ -110,32 +119,32 @@ static inline int convert_error(struct zcrypt_device *zdev, ...@@ -110,32 +119,32 @@ static inline int convert_error(struct zcrypt_device *zdev,
* and then repeat the request. * and then repeat the request.
*/ */
atomic_set(&zcrypt_rescan_req, 1); atomic_set(&zcrypt_rescan_req, 1);
zdev->online = 0; zq->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n", pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
AP_QID_DEVICE(zdev->ap_dev->qid)); card, queue);
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", ZCRYPT_DBF(DBF_ERR,
AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
ehdr->reply_code); card, queue, ehdr->reply_code);
return -EAGAIN; return -EAGAIN;
case REP82_ERROR_TRANSPORT_FAIL: case REP82_ERROR_TRANSPORT_FAIL:
case REP82_ERROR_MACHINE_FAILURE: case REP82_ERROR_MACHINE_FAILURE:
// REP88_ERROR_MODULE_FAILURE // '10' CEX2A // REP88_ERROR_MODULE_FAILURE // '10' CEX2A
/* If a card fails disable it and repeat the request. */ /* If a card fails disable it and repeat the request. */
atomic_set(&zcrypt_rescan_req, 1); atomic_set(&zcrypt_rescan_req, 1);
zdev->online = 0; zq->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n", pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
AP_QID_DEVICE(zdev->ap_dev->qid)); card, queue);
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", ZCRYPT_DBF(DBF_ERR,
AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
ehdr->reply_code); card, queue, ehdr->reply_code);
return -EAGAIN; return -EAGAIN;
default: default:
zdev->online = 0; zq->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n", pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
AP_QID_DEVICE(zdev->ap_dev->qid)); card, queue);
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", ZCRYPT_DBF(DBF_ERR,
AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
ehdr->reply_code); card, queue, ehdr->reply_code);
return -EAGAIN; /* repeat the request on a different device. */ return -EAGAIN; /* repeat the request on a different device. */
} }
} }
......
...@@ -53,9 +53,6 @@ MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \ ...@@ -53,9 +53,6 @@ MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \
"Copyright IBM Corp. 2001, 2012"); "Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *,
struct ap_message *);
/** /**
* The type 50 message family is associated with a CEX2A card. * The type 50 message family is associated with a CEX2A card.
* *
...@@ -173,16 +170,48 @@ struct type80_hdr { ...@@ -173,16 +170,48 @@ struct type80_hdr {
unsigned char reserved3[8]; unsigned char reserved3[8];
} __packed; } __packed;
unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fcode)
{
if (!mex->inputdatalength)
return -EINVAL;
if (mex->inputdatalength <= 128) /* 1024 bit */
*fcode = MEX_1K;
else if (mex->inputdatalength <= 256) /* 2048 bit */
*fcode = MEX_2K;
else /* 4096 bit */
*fcode = MEX_4K;
return 0;
}
unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode)
{
if (!crt->inputdatalength)
return -EINVAL;
if (crt->inputdatalength <= 128) /* 1024 bit */
*fcode = CRT_1K;
else if (crt->inputdatalength <= 256) /* 2048 bit */
*fcode = CRT_2K;
else /* 4096 bit */
*fcode = CRT_4K;
return 0;
}
/** /**
* Convert a ICAMEX message to a type50 MEX message. * Convert a ICAMEX message to a type50 MEX message.
* *
* @zdev: crypto device pointer * @zq: crypto queue pointer
* @zreq: crypto request pointer * @ap_msg: crypto request pointer
* @mex: pointer to user input data * @mex: pointer to user input data
* *
* Returns 0 on success or -EFAULT. * Returns 0 on success or -EFAULT.
*/ */
static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev, static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
struct ap_message *ap_msg, struct ap_message *ap_msg,
struct ica_rsa_modexpo *mex) struct ica_rsa_modexpo *mex)
{ {
...@@ -234,13 +263,13 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev, ...@@ -234,13 +263,13 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
/** /**
* Convert a ICACRT message to a type50 CRT message. * Convert a ICACRT message to a type50 CRT message.
* *
* @zdev: crypto device pointer * @zq: crypto queue pointer
* @zreq: crypto request pointer * @ap_msg: crypto request pointer
* @crt: pointer to user input data * @crt: pointer to user input data
* *
* Returns 0 on success or -EFAULT. * Returns 0 on success or -EFAULT.
*/ */
static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev, static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
struct ap_message *ap_msg, struct ap_message *ap_msg,
struct ica_rsa_modexpo_crt *crt) struct ica_rsa_modexpo_crt *crt)
{ {
...@@ -283,7 +312,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev, ...@@ -283,7 +312,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
u = crb2->u + sizeof(crb2->u) - short_len; u = crb2->u + sizeof(crb2->u) - short_len;
inp = crb2->message + sizeof(crb2->message) - mod_len; inp = crb2->message + sizeof(crb2->message) - mod_len;
} else if ((mod_len <= 512) && /* up to 4096 bit key size */ } else if ((mod_len <= 512) && /* up to 4096 bit key size */
(zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)) { /* >= CEX3A */ (zq->zcard->max_mod_size == CEX3A_MAX_MOD_SIZE)) {
struct type50_crb3_msg *crb3 = ap_msg->message; struct type50_crb3_msg *crb3 = ap_msg->message;
memset(crb3, 0, sizeof(*crb3)); memset(crb3, 0, sizeof(*crb3));
ap_msg->length = sizeof(*crb3); ap_msg->length = sizeof(*crb3);
...@@ -317,14 +346,14 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev, ...@@ -317,14 +346,14 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
/** /**
* Copy results from a type 80 reply message back to user space. * Copy results from a type 80 reply message back to user space.
* *
* @zdev: crypto device pointer * @zq: crypto device pointer
* @reply: reply AP message. * @reply: reply AP message.
* @data: pointer to user output data * @data: pointer to user output data
* @length: size of user output data * @length: size of user output data
* *
* Returns 0 on success or -EFAULT. * Returns 0 on success or -EFAULT.
*/ */
static int convert_type80(struct zcrypt_device *zdev, static int convert_type80(struct zcrypt_queue *zq,
struct ap_message *reply, struct ap_message *reply,
char __user *outputdata, char __user *outputdata,
unsigned int outputdatalength) unsigned int outputdatalength)
...@@ -334,16 +363,18 @@ static int convert_type80(struct zcrypt_device *zdev, ...@@ -334,16 +363,18 @@ static int convert_type80(struct zcrypt_device *zdev,
if (t80h->len < sizeof(*t80h) + outputdatalength) { if (t80h->len < sizeof(*t80h) + outputdatalength) {
/* The result is too short, the CEX2A card may not do that.. */ /* The result is too short, the CEX2A card may not do that.. */
zdev->online = 0; zq->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n", pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
AP_QID_DEVICE(zdev->ap_dev->qid)); AP_QID_CARD(zq->queue->qid),
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", AP_QID_QUEUE(zq->queue->qid));
AP_QID_DEVICE(zdev->ap_dev->qid), ZCRYPT_DBF(DBF_ERR,
zdev->online, t80h->code); "device=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
t80h->code);
return -EAGAIN; /* repeat the request on a different device. */ return -EAGAIN; /* repeat the request on a different device. */
} }
if (zdev->user_space_type == ZCRYPT_CEX2A) if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE); BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
else else
BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE); BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
...@@ -353,25 +384,31 @@ static int convert_type80(struct zcrypt_device *zdev, ...@@ -353,25 +384,31 @@ static int convert_type80(struct zcrypt_device *zdev,
return 0; return 0;
} }
static int convert_response(struct zcrypt_device *zdev, static int convert_response(struct zcrypt_queue *zq,
struct ap_message *reply, struct ap_message *reply,
char __user *outputdata, char __user *outputdata,
unsigned int outputdatalength) unsigned int outputdatalength)
{ {
/* Response type byte is the second byte in the response. */ /* Response type byte is the second byte in the response. */
switch (((unsigned char *) reply->message)[1]) { unsigned char rtype = ((unsigned char *) reply->message)[1];
switch (rtype) {
case TYPE82_RSP_CODE: case TYPE82_RSP_CODE:
case TYPE88_RSP_CODE: case TYPE88_RSP_CODE:
return convert_error(zdev, reply); return convert_error(zq, reply);
case TYPE80_RSP_CODE: case TYPE80_RSP_CODE:
return convert_type80(zdev, reply, return convert_type80(zq, reply,
outputdata, outputdatalength); outputdata, outputdatalength);
default: /* Unknown response type, this should NEVER EVER happen */ default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0; zq->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n", pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
AP_QID_DEVICE(zdev->ap_dev->qid)); AP_QID_CARD(zq->queue->qid),
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", AP_QID_QUEUE(zq->queue->qid));
AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); ZCRYPT_DBF(DBF_ERR,
"device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
(unsigned int) rtype);
return -EAGAIN; /* repeat the request on a different device. */ return -EAGAIN; /* repeat the request on a different device. */
} }
} }
...@@ -380,11 +417,11 @@ static int convert_response(struct zcrypt_device *zdev, ...@@ -380,11 +417,11 @@ static int convert_response(struct zcrypt_device *zdev,
* This function is called from the AP bus code after a crypto request * This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply". * "msg" has finished with the reply message "reply".
* It is called from tasklet context. * It is called from tasklet context.
* @ap_dev: pointer to the AP device * @aq: pointer to the AP device
* @msg: pointer to the AP message * @msg: pointer to the AP message
* @reply: pointer to the AP reply message * @reply: pointer to the AP reply message
*/ */
static void zcrypt_cex2a_receive(struct ap_device *ap_dev, static void zcrypt_cex2a_receive(struct ap_queue *aq,
struct ap_message *msg, struct ap_message *msg,
struct ap_message *reply) struct ap_message *reply)
{ {
...@@ -400,7 +437,7 @@ static void zcrypt_cex2a_receive(struct ap_device *ap_dev, ...@@ -400,7 +437,7 @@ static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
goto out; /* ap_msg->rc indicates the error */ goto out; /* ap_msg->rc indicates the error */
t80h = reply->message; t80h = reply->message;
if (t80h->type == TYPE80_RSP_CODE) { if (t80h->type == TYPE80_RSP_CODE) {
if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A) if (aq->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A)
length = min_t(int, length = min_t(int,
CEX2A_MAX_RESPONSE_SIZE, t80h->len); CEX2A_MAX_RESPONSE_SIZE, t80h->len);
else else
...@@ -418,11 +455,11 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0); ...@@ -418,11 +455,11 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0);
/** /**
* The request distributor calls this function if it picked the CEX2A * The request distributor calls this function if it picked the CEX2A
* device to handle a modexpo request. * device to handle a modexpo request.
* @zdev: pointer to zcrypt_device structure that identifies the * @zq: pointer to zcrypt_queue structure that identifies the
* CEX2A device to the request distributor * CEX2A device to the request distributor
* @mex: pointer to the modexpo request buffer * @mex: pointer to the modexpo request buffer
*/ */
static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev, static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
struct ica_rsa_modexpo *mex) struct ica_rsa_modexpo *mex)
{ {
struct ap_message ap_msg; struct ap_message ap_msg;
...@@ -430,7 +467,7 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev, ...@@ -430,7 +467,7 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
int rc; int rc;
ap_init_message(&ap_msg); ap_init_message(&ap_msg);
if (zdev->user_space_type == ZCRYPT_CEX2A) if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
GFP_KERNEL); GFP_KERNEL);
else else
...@@ -442,20 +479,20 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev, ...@@ -442,20 +479,20 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
ap_msg.psmid = (((unsigned long long) current->pid) << 32) + ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step); atomic_inc_return(&zcrypt_step);
ap_msg.private = &work; ap_msg.private = &work;
rc = ICAMEX_msg_to_type50MEX_msg(zdev, &ap_msg, mex); rc = ICAMEX_msg_to_type50MEX_msg(zq, &ap_msg, mex);
if (rc) if (rc)
goto out_free; goto out_free;
init_completion(&work); init_completion(&work);
ap_queue_message(zdev->ap_dev, &ap_msg); ap_queue_message(zq->queue, &ap_msg);
rc = wait_for_completion_interruptible(&work); rc = wait_for_completion_interruptible(&work);
if (rc == 0) { if (rc == 0) {
rc = ap_msg.rc; rc = ap_msg.rc;
if (rc == 0) if (rc == 0)
rc = convert_response(zdev, &ap_msg, mex->outputdata, rc = convert_response(zq, &ap_msg, mex->outputdata,
mex->outputdatalength); mex->outputdatalength);
} else } else
/* Signal pending. */ /* Signal pending. */
ap_cancel_message(zdev->ap_dev, &ap_msg); ap_cancel_message(zq->queue, &ap_msg);
out_free: out_free:
kfree(ap_msg.message); kfree(ap_msg.message);
return rc; return rc;
...@@ -464,11 +501,11 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev, ...@@ -464,11 +501,11 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
/** /**
* The request distributor calls this function if it picked the CEX2A * The request distributor calls this function if it picked the CEX2A
* device to handle a modexpo_crt request. * device to handle a modexpo_crt request.
* @zdev: pointer to zcrypt_device structure that identifies the * @zq: pointer to zcrypt_queue structure that identifies the
* CEX2A device to the request distributor * CEX2A device to the request distributor
* @crt: pointer to the modexpoc_crt request buffer * @crt: pointer to the modexpoc_crt request buffer
*/ */
static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev, static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
struct ica_rsa_modexpo_crt *crt) struct ica_rsa_modexpo_crt *crt)
{ {
struct ap_message ap_msg; struct ap_message ap_msg;
...@@ -476,7 +513,7 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev, ...@@ -476,7 +513,7 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
int rc; int rc;
ap_init_message(&ap_msg); ap_init_message(&ap_msg);
if (zdev->user_space_type == ZCRYPT_CEX2A) if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
GFP_KERNEL); GFP_KERNEL);
else else
...@@ -488,20 +525,20 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev, ...@@ -488,20 +525,20 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
ap_msg.psmid = (((unsigned long long) current->pid) << 32) + ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step); atomic_inc_return(&zcrypt_step);
ap_msg.private = &work; ap_msg.private = &work;
rc = ICACRT_msg_to_type50CRT_msg(zdev, &ap_msg, crt); rc = ICACRT_msg_to_type50CRT_msg(zq, &ap_msg, crt);
if (rc) if (rc)
goto out_free; goto out_free;
init_completion(&work); init_completion(&work);
ap_queue_message(zdev->ap_dev, &ap_msg); ap_queue_message(zq->queue, &ap_msg);
rc = wait_for_completion_interruptible(&work); rc = wait_for_completion_interruptible(&work);
if (rc == 0) { if (rc == 0) {
rc = ap_msg.rc; rc = ap_msg.rc;
if (rc == 0) if (rc == 0)
rc = convert_response(zdev, &ap_msg, crt->outputdata, rc = convert_response(zq, &ap_msg, crt->outputdata,
crt->outputdatalength); crt->outputdatalength);
} else } else
/* Signal pending. */ /* Signal pending. */
ap_cancel_message(zdev->ap_dev, &ap_msg); ap_cancel_message(zq->queue, &ap_msg);
out_free: out_free:
kfree(ap_msg.message); kfree(ap_msg.message);
return rc; return rc;
...@@ -518,16 +555,12 @@ static struct zcrypt_ops zcrypt_msgtype50_ops = { ...@@ -518,16 +555,12 @@ static struct zcrypt_ops zcrypt_msgtype50_ops = {
.variant = MSGTYPE50_VARIANT_DEFAULT, .variant = MSGTYPE50_VARIANT_DEFAULT,
}; };
int __init zcrypt_msgtype50_init(void) void __init zcrypt_msgtype50_init(void)
{ {
zcrypt_msgtype_register(&zcrypt_msgtype50_ops); zcrypt_msgtype_register(&zcrypt_msgtype50_ops);
return 0;
} }
void __exit zcrypt_msgtype50_exit(void) void __exit zcrypt_msgtype50_exit(void)
{ {
zcrypt_msgtype_unregister(&zcrypt_msgtype50_ops); zcrypt_msgtype_unregister(&zcrypt_msgtype50_ops);
} }
module_init(zcrypt_msgtype50_init);
module_exit(zcrypt_msgtype50_exit);
...@@ -35,7 +35,10 @@ ...@@ -35,7 +35,10 @@
#define MSGTYPE_ADJUSTMENT 0x08 /*type04 extension (not needed in type50)*/ #define MSGTYPE_ADJUSTMENT 0x08 /*type04 extension (not needed in type50)*/
int zcrypt_msgtype50_init(void); unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *, int *);
unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *, int *);
void zcrypt_msgtype50_init(void);
void zcrypt_msgtype50_exit(void); void zcrypt_msgtype50_exit(void);
#endif /* _ZCRYPT_MSGTYPE50_H_ */ #endif /* _ZCRYPT_MSGTYPE50_H_ */
...@@ -60,9 +60,6 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \ ...@@ -60,9 +60,6 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \
"Copyright IBM Corp. 2001, 2012"); "Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
static void zcrypt_msgtype6_receive(struct ap_device *, struct ap_message *,
struct ap_message *);
/** /**
* CPRB * CPRB
* Note that all shorts, ints and longs are little-endian. * Note that all shorts, ints and longs are little-endian.
...@@ -149,16 +146,122 @@ static struct CPRBX static_cprbx = { ...@@ -149,16 +146,122 @@ static struct CPRBX static_cprbx = {
.func_id = {0x54, 0x32}, .func_id = {0x54, 0x32},
}; };
int speed_idx_cca(int req_type)
{
switch (req_type) {
case 0x4142:
case 0x4149:
case 0x414D:
case 0x4341:
case 0x4344:
case 0x4354:
case 0x4358:
case 0x444B:
case 0x4558:
case 0x4643:
case 0x4651:
case 0x4C47:
case 0x4C4B:
case 0x4C51:
case 0x4F48:
case 0x504F:
case 0x5053:
case 0x5058:
case 0x5343:
case 0x5344:
case 0x5345:
case 0x5350:
return LOW;
case 0x414B:
case 0x4345:
case 0x4349:
case 0x434D:
case 0x4847:
case 0x4849:
case 0x484D:
case 0x4850:
case 0x4851:
case 0x4954:
case 0x4958:
case 0x4B43:
case 0x4B44:
case 0x4B45:
case 0x4B47:
case 0x4B48:
case 0x4B49:
case 0x4B4E:
case 0x4B50:
case 0x4B52:
case 0x4B54:
case 0x4B58:
case 0x4D50:
case 0x4D53:
case 0x4D56:
case 0x4D58:
case 0x5044:
case 0x5045:
case 0x5046:
case 0x5047:
case 0x5049:
case 0x504B:
case 0x504D:
case 0x5254:
case 0x5347:
case 0x5349:
case 0x534B:
case 0x534D:
case 0x5356:
case 0x5358:
case 0x5443:
case 0x544B:
case 0x5647:
return HIGH;
default:
return MEDIUM;
}
}
int speed_idx_ep11(int req_type)
{
switch (req_type) {
case 1:
case 2:
case 36:
case 37:
case 38:
case 39:
case 40:
return LOW;
case 17:
case 18:
case 19:
case 20:
case 21:
case 22:
case 26:
case 30:
case 31:
case 32:
case 33:
case 34:
case 35:
return HIGH;
default:
return MEDIUM;
}
}
/** /**
* Convert a ICAMEX message to a type6 MEX message. * Convert a ICAMEX message to a type6 MEX message.
* *
* @zdev: crypto device pointer * @zq: crypto device pointer
* @ap_msg: pointer to AP message * @ap_msg: pointer to AP message
* @mex: pointer to user input data * @mex: pointer to user input data
* *
* Returns 0 on success or -EFAULT. * Returns 0 on success or -EFAULT.
*/ */
static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev, static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
struct ap_message *ap_msg, struct ap_message *ap_msg,
struct ica_rsa_modexpo *mex) struct ica_rsa_modexpo *mex)
{ {
...@@ -173,11 +276,6 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev, ...@@ -173,11 +276,6 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
.ulen = 10, .ulen = 10,
.only_rule = {'M', 'R', 'P', ' ', ' ', ' ', ' ', ' '} .only_rule = {'M', 'R', 'P', ' ', ' ', ' ', ' ', ' '}
}; };
static struct function_and_rules_block static_pke_fnr_MCL2 = {
.function_code = {'P', 'K'},
.ulen = 10,
.only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'}
};
struct { struct {
struct type6_hdr hdr; struct type6_hdr hdr;
struct CPRBX cprbx; struct CPRBX cprbx;
...@@ -204,11 +302,10 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev, ...@@ -204,11 +302,10 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
msg->cprbx = static_cprbx; msg->cprbx = static_cprbx;
msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid); msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1; msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1;
msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ? msg->fr = static_pke_fnr;
static_pke_fnr_MCL2 : static_pke_fnr;
msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx); msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
...@@ -219,13 +316,13 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev, ...@@ -219,13 +316,13 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
/** /**
* Convert a ICACRT message to a type6 CRT message. * Convert a ICACRT message to a type6 CRT message.
* *
* @zdev: crypto device pointer * @zq: crypto device pointer
* @ap_msg: pointer to AP message * @ap_msg: pointer to AP message
* @crt: pointer to user input data * @crt: pointer to user input data
* *
* Returns 0 on success or -EFAULT. * Returns 0 on success or -EFAULT.
*/ */
static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev, static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
struct ap_message *ap_msg, struct ap_message *ap_msg,
struct ica_rsa_modexpo_crt *crt) struct ica_rsa_modexpo_crt *crt)
{ {
...@@ -241,11 +338,6 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev, ...@@ -241,11 +338,6 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
.only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'} .only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'}
}; };
static struct function_and_rules_block static_pkd_fnr_MCL2 = {
.function_code = {'P', 'D'},
.ulen = 10,
.only_rule = {'P', 'K', 'C', 'S', '-', '1', '.', '2'}
};
struct { struct {
struct type6_hdr hdr; struct type6_hdr hdr;
struct CPRBX cprbx; struct CPRBX cprbx;
...@@ -272,12 +364,11 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev, ...@@ -272,12 +364,11 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
msg->cprbx = static_cprbx; msg->cprbx = static_cprbx;
msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid); msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
msg->cprbx.req_parml = msg->cprbx.rpl_msgbl = msg->cprbx.req_parml = msg->cprbx.rpl_msgbl =
size - sizeof(msg->hdr) - sizeof(msg->cprbx); size - sizeof(msg->hdr) - sizeof(msg->cprbx);
msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ? msg->fr = static_pkd_fnr;
static_pkd_fnr_MCL2 : static_pkd_fnr;
ap_msg->length = size; ap_msg->length = size;
return 0; return 0;
...@@ -286,7 +377,7 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev, ...@@ -286,7 +377,7 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
/** /**
* Convert a XCRB message to a type6 CPRB message. * Convert a XCRB message to a type6 CPRB message.
* *
* @zdev: crypto device pointer * @zq: crypto device pointer
* @ap_msg: pointer to AP message * @ap_msg: pointer to AP message
* @xcRB: pointer to user input data * @xcRB: pointer to user input data
* *
...@@ -297,9 +388,10 @@ struct type86_fmt2_msg { ...@@ -297,9 +388,10 @@ struct type86_fmt2_msg {
struct type86_fmt2_ext fmt2; struct type86_fmt2_ext fmt2;
} __packed; } __packed;
static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev, static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg,
struct ap_message *ap_msg, struct ica_xcRB *xcRB,
struct ica_xcRB *xcRB) unsigned int *fcode,
unsigned short **dom)
{ {
static struct type6_hdr static_type6_hdrX = { static struct type6_hdr static_type6_hdrX = {
.type = 0x06, .type = 0x06,
...@@ -379,6 +471,9 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev, ...@@ -379,6 +471,9 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
memcpy(msg->hdr.function_code, function_code, memcpy(msg->hdr.function_code, function_code,
sizeof(msg->hdr.function_code)); sizeof(msg->hdr.function_code));
*fcode = (msg->hdr.function_code[0] << 8) | msg->hdr.function_code[1];
*dom = (unsigned short *)&msg->cprbx.domain;
if (memcmp(function_code, "US", 2) == 0) if (memcmp(function_code, "US", 2) == 0)
ap_msg->special = 1; ap_msg->special = 1;
else else
...@@ -389,15 +484,15 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev, ...@@ -389,15 +484,15 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
copy_from_user(req_data, xcRB->request_data_address, copy_from_user(req_data, xcRB->request_data_address,
xcRB->request_data_length)) xcRB->request_data_length))
return -EFAULT; return -EFAULT;
return 0; return 0;
} }
static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev, static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg,
struct ap_message *ap_msg, struct ep11_urb *xcRB,
struct ep11_urb *xcRB) unsigned int *fcode)
{ {
unsigned int lfmt; unsigned int lfmt;
static struct type6_hdr static_type6_ep11_hdr = { static struct type6_hdr static_type6_ep11_hdr = {
.type = 0x06, .type = 0x06,
.rqid = {0x00, 0x01}, .rqid = {0x00, 0x01},
...@@ -421,7 +516,7 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev, ...@@ -421,7 +516,7 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev,
unsigned char dom_tag; /* fixed value 0x4 */ unsigned char dom_tag; /* fixed value 0x4 */
unsigned char dom_len; /* fixed value 0x4 */ unsigned char dom_len; /* fixed value 0x4 */
unsigned int dom_val; /* domain id */ unsigned int dom_val; /* domain id */
} __packed * payload_hdr; } __packed * payload_hdr = NULL;
if (CEIL4(xcRB->req_len) < xcRB->req_len) if (CEIL4(xcRB->req_len) < xcRB->req_len)
return -EINVAL; /* overflow after alignment*/ return -EINVAL; /* overflow after alignment*/
...@@ -450,43 +545,30 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev, ...@@ -450,43 +545,30 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev,
return -EFAULT; return -EFAULT;
} }
/* if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/
The target domain field within the cprb body/payload block will be switch (msg->pld_lenfmt & 0x03) {
replaced by the usage domain for non-management commands only. case 1:
Therefore we check the first bit of the 'flags' parameter for lfmt = 2;
management command indication. break;
0 - non management command case 2:
1 - management command lfmt = 3;
*/ break;
if (!((msg->cprbx.flags & 0x80) == 0x80)) { default:
msg->cprbx.target_id = (unsigned int) return -EINVAL;
AP_QID_QUEUE(zdev->ap_dev->qid); }
} else {
if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/ lfmt = 1; /* length format #1 */
switch (msg->pld_lenfmt & 0x03) {
case 1:
lfmt = 2;
break;
case 2:
lfmt = 3;
break;
default:
return -EINVAL;
}
} else {
lfmt = 1; /* length format #1 */
}
payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
payload_hdr->dom_val = (unsigned int)
AP_QID_QUEUE(zdev->ap_dev->qid);
} }
payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
*fcode = payload_hdr->func_val & 0xFFFF;
return 0; return 0;
} }
/** /**
* Copy results from a type 86 ICA reply message back to user space. * Copy results from a type 86 ICA reply message back to user space.
* *
* @zdev: crypto device pointer * @zq: crypto device pointer
* @reply: reply AP message. * @reply: reply AP message.
* @data: pointer to user output data * @data: pointer to user output data
* @length: size of user output data * @length: size of user output data
...@@ -508,7 +590,7 @@ struct type86_ep11_reply { ...@@ -508,7 +590,7 @@ struct type86_ep11_reply {
struct ep11_cprb cprbx; struct ep11_cprb cprbx;
} __packed; } __packed;
static int convert_type86_ica(struct zcrypt_device *zdev, static int convert_type86_ica(struct zcrypt_queue *zq,
struct ap_message *reply, struct ap_message *reply,
char __user *outputdata, char __user *outputdata,
unsigned int outputdatalength) unsigned int outputdatalength)
...@@ -556,26 +638,37 @@ static int convert_type86_ica(struct zcrypt_device *zdev, ...@@ -556,26 +638,37 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
service_rc = msg->cprbx.ccp_rtcode; service_rc = msg->cprbx.ccp_rtcode;
if (unlikely(service_rc != 0)) { if (unlikely(service_rc != 0)) {
service_rs = msg->cprbx.ccp_rscode; service_rs = msg->cprbx.ccp_rscode;
if (service_rc == 8 && service_rs == 66) if ((service_rc == 8 && service_rs == 66) ||
return -EINVAL; (service_rc == 8 && service_rs == 65) ||
if (service_rc == 8 && service_rs == 65) (service_rc == 8 && service_rs == 72) ||
return -EINVAL; (service_rc == 8 && service_rs == 770) ||
if (service_rc == 8 && service_rs == 770) (service_rc == 12 && service_rs == 769)) {
ZCRYPT_DBF(DBF_DEBUG,
"device=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
(int) service_rc, (int) service_rs);
return -EINVAL; return -EINVAL;
}
if (service_rc == 8 && service_rs == 783) { if (service_rc == 8 && service_rs == 783) {
zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; zq->zcard->min_mod_size =
PCIXCC_MIN_MOD_SIZE_OLD;
ZCRYPT_DBF(DBF_DEBUG,
"device=%02x.%04x rc/rs=%d/%d => rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
(int) service_rc, (int) service_rs);
return -EAGAIN; return -EAGAIN;
} }
if (service_rc == 12 && service_rs == 769) zq->online = 0;
return -EINVAL; pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
if (service_rc == 8 && service_rs == 72) AP_QID_CARD(zq->queue->qid),
return -EINVAL; AP_QID_QUEUE(zq->queue->qid));
zdev->online = 0; ZCRYPT_DBF(DBF_ERR,
pr_err("Cryptographic device %x failed and was set offline\n", "device=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n",
AP_QID_DEVICE(zdev->ap_dev->qid)); AP_QID_CARD(zq->queue->qid),
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", AP_QID_QUEUE(zq->queue->qid),
AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, (int) service_rc, (int) service_rs);
msg->hdr.reply_code);
return -EAGAIN; /* repeat the request on a different device. */ return -EAGAIN; /* repeat the request on a different device. */
} }
data = msg->text; data = msg->text;
...@@ -611,13 +704,13 @@ static int convert_type86_ica(struct zcrypt_device *zdev, ...@@ -611,13 +704,13 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
/** /**
* Copy results from a type 86 XCRB reply message back to user space. * Copy results from a type 86 XCRB reply message back to user space.
* *
* @zdev: crypto device pointer * @zq: crypto device pointer
* @reply: reply AP message. * @reply: reply AP message.
* @xcRB: pointer to XCRB * @xcRB: pointer to XCRB
* *
* Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
*/ */
static int convert_type86_xcrb(struct zcrypt_device *zdev, static int convert_type86_xcrb(struct zcrypt_queue *zq,
struct ap_message *reply, struct ap_message *reply,
struct ica_xcRB *xcRB) struct ica_xcRB *xcRB)
{ {
...@@ -642,13 +735,13 @@ static int convert_type86_xcrb(struct zcrypt_device *zdev, ...@@ -642,13 +735,13 @@ static int convert_type86_xcrb(struct zcrypt_device *zdev,
/** /**
* Copy results from a type 86 EP11 XCRB reply message back to user space. * Copy results from a type 86 EP11 XCRB reply message back to user space.
* *
* @zdev: crypto device pointer * @zq: crypto device pointer
* @reply: reply AP message. * @reply: reply AP message.
* @xcRB: pointer to EP11 user request block * @xcRB: pointer to EP11 user request block
* *
* Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
*/ */
static int convert_type86_ep11_xcrb(struct zcrypt_device *zdev, static int convert_type86_ep11_xcrb(struct zcrypt_queue *zq,
struct ap_message *reply, struct ap_message *reply,
struct ep11_urb *xcRB) struct ep11_urb *xcRB)
{ {
...@@ -666,7 +759,7 @@ static int convert_type86_ep11_xcrb(struct zcrypt_device *zdev, ...@@ -666,7 +759,7 @@ static int convert_type86_ep11_xcrb(struct zcrypt_device *zdev,
return 0; return 0;
} }
static int convert_type86_rng(struct zcrypt_device *zdev, static int convert_type86_rng(struct zcrypt_queue *zq,
struct ap_message *reply, struct ap_message *reply,
char *buffer) char *buffer)
{ {
...@@ -683,104 +776,113 @@ static int convert_type86_rng(struct zcrypt_device *zdev, ...@@ -683,104 +776,113 @@ static int convert_type86_rng(struct zcrypt_device *zdev,
return msg->fmt2.count2; return msg->fmt2.count2;
} }
static int convert_response_ica(struct zcrypt_device *zdev, static int convert_response_ica(struct zcrypt_queue *zq,
struct ap_message *reply, struct ap_message *reply,
char __user *outputdata, char __user *outputdata,
unsigned int outputdatalength) unsigned int outputdatalength)
{ {
struct type86x_reply *msg = reply->message; struct type86x_reply *msg = reply->message;
/* Response type byte is the second byte in the response. */ switch (msg->hdr.type) {
switch (((unsigned char *) reply->message)[1]) {
case TYPE82_RSP_CODE: case TYPE82_RSP_CODE:
case TYPE88_RSP_CODE: case TYPE88_RSP_CODE:
return convert_error(zdev, reply); return convert_error(zq, reply);
case TYPE86_RSP_CODE: case TYPE86_RSP_CODE:
if (msg->cprbx.ccp_rtcode && if (msg->cprbx.ccp_rtcode &&
(msg->cprbx.ccp_rscode == 0x14f) && (msg->cprbx.ccp_rscode == 0x14f) &&
(outputdatalength > 256)) { (outputdatalength > 256)) {
if (zdev->max_exp_bit_length <= 17) { if (zq->zcard->max_exp_bit_length <= 17) {
zdev->max_exp_bit_length = 17; zq->zcard->max_exp_bit_length = 17;
return -EAGAIN; return -EAGAIN;
} else } else
return -EINVAL; return -EINVAL;
} }
if (msg->hdr.reply_code) if (msg->hdr.reply_code)
return convert_error(zdev, reply); return convert_error(zq, reply);
if (msg->cprbx.cprb_ver_id == 0x02) if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_ica(zdev, reply, return convert_type86_ica(zq, reply,
outputdata, outputdatalength); outputdata, outputdatalength);
/* Fall through, no break, incorrect cprb version is an unknown /* Fall through, no break, incorrect cprb version is an unknown
* response */ * response */
default: /* Unknown response type, this should NEVER EVER happen */ default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0; zq->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n", pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
AP_QID_DEVICE(zdev->ap_dev->qid)); AP_QID_CARD(zq->queue->qid),
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", AP_QID_QUEUE(zq->queue->qid));
AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); ZCRYPT_DBF(DBF_ERR,
"device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
(int) msg->hdr.type);
return -EAGAIN; /* repeat the request on a different device. */ return -EAGAIN; /* repeat the request on a different device. */
} }
} }
static int convert_response_xcrb(struct zcrypt_device *zdev, static int convert_response_xcrb(struct zcrypt_queue *zq,
struct ap_message *reply, struct ap_message *reply,
struct ica_xcRB *xcRB) struct ica_xcRB *xcRB)
{ {
struct type86x_reply *msg = reply->message; struct type86x_reply *msg = reply->message;
/* Response type byte is the second byte in the response. */ switch (msg->hdr.type) {
switch (((unsigned char *) reply->message)[1]) {
case TYPE82_RSP_CODE: case TYPE82_RSP_CODE:
case TYPE88_RSP_CODE: case TYPE88_RSP_CODE:
xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
return convert_error(zdev, reply); return convert_error(zq, reply);
case TYPE86_RSP_CODE: case TYPE86_RSP_CODE:
if (msg->hdr.reply_code) { if (msg->hdr.reply_code) {
memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32)); memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32));
return convert_error(zdev, reply); return convert_error(zq, reply);
} }
if (msg->cprbx.cprb_ver_id == 0x02) if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_xcrb(zdev, reply, xcRB); return convert_type86_xcrb(zq, reply, xcRB);
/* Fall through, no break, incorrect cprb version is an unknown /* Fall through, no break, incorrect cprb version is an unknown
* response */ * response */
default: /* Unknown response type, this should NEVER EVER happen */ default: /* Unknown response type, this should NEVER EVER happen */
xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
zdev->online = 0; zq->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n", pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
AP_QID_DEVICE(zdev->ap_dev->qid)); AP_QID_CARD(zq->queue->qid),
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", AP_QID_QUEUE(zq->queue->qid));
AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); ZCRYPT_DBF(DBF_ERR,
"device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
(int) msg->hdr.type);
return -EAGAIN; /* repeat the request on a different device. */ return -EAGAIN; /* repeat the request on a different device. */
} }
} }
static int convert_response_ep11_xcrb(struct zcrypt_device *zdev, static int convert_response_ep11_xcrb(struct zcrypt_queue *zq,
struct ap_message *reply, struct ep11_urb *xcRB) struct ap_message *reply, struct ep11_urb *xcRB)
{ {
struct type86_ep11_reply *msg = reply->message; struct type86_ep11_reply *msg = reply->message;
/* Response type byte is the second byte in the response. */ switch (msg->hdr.type) {
switch (((unsigned char *)reply->message)[1]) {
case TYPE82_RSP_CODE: case TYPE82_RSP_CODE:
case TYPE87_RSP_CODE: case TYPE87_RSP_CODE:
return convert_error(zdev, reply); return convert_error(zq, reply);
case TYPE86_RSP_CODE: case TYPE86_RSP_CODE:
if (msg->hdr.reply_code) if (msg->hdr.reply_code)
return convert_error(zdev, reply); return convert_error(zq, reply);
if (msg->cprbx.cprb_ver_id == 0x04) if (msg->cprbx.cprb_ver_id == 0x04)
return convert_type86_ep11_xcrb(zdev, reply, xcRB); return convert_type86_ep11_xcrb(zq, reply, xcRB);
/* Fall through, no break, incorrect cprb version is an unknown resp.*/ /* Fall through, no break, incorrect cprb version is an unknown resp.*/
default: /* Unknown response type, this should NEVER EVER happen */ default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0; zq->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n", pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
AP_QID_DEVICE(zdev->ap_dev->qid)); AP_QID_CARD(zq->queue->qid),
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", AP_QID_QUEUE(zq->queue->qid));
AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); ZCRYPT_DBF(DBF_ERR,
"device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
(int) msg->hdr.type);
return -EAGAIN; /* repeat the request on a different device. */ return -EAGAIN; /* repeat the request on a different device. */
} }
} }
static int convert_response_rng(struct zcrypt_device *zdev, static int convert_response_rng(struct zcrypt_queue *zq,
struct ap_message *reply, struct ap_message *reply,
char *data) char *data)
{ {
...@@ -794,15 +896,19 @@ static int convert_response_rng(struct zcrypt_device *zdev, ...@@ -794,15 +896,19 @@ static int convert_response_rng(struct zcrypt_device *zdev,
if (msg->hdr.reply_code) if (msg->hdr.reply_code)
return -EINVAL; return -EINVAL;
if (msg->cprbx.cprb_ver_id == 0x02) if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_rng(zdev, reply, data); return convert_type86_rng(zq, reply, data);
/* Fall through, no break, incorrect cprb version is an unknown /* Fall through, no break, incorrect cprb version is an unknown
* response */ * response */
default: /* Unknown response type, this should NEVER EVER happen */ default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0; zq->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n", pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
AP_QID_DEVICE(zdev->ap_dev->qid)); AP_QID_CARD(zq->queue->qid),
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", AP_QID_QUEUE(zq->queue->qid));
AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); ZCRYPT_DBF(DBF_ERR,
"device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
(int) msg->hdr.type);
return -EAGAIN; /* repeat the request on a different device. */ return -EAGAIN; /* repeat the request on a different device. */
} }
} }
...@@ -811,11 +917,11 @@ static int convert_response_rng(struct zcrypt_device *zdev, ...@@ -811,11 +917,11 @@ static int convert_response_rng(struct zcrypt_device *zdev,
* This function is called from the AP bus code after a crypto request * This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply". * "msg" has finished with the reply message "reply".
* It is called from tasklet context. * It is called from tasklet context.
* @ap_dev: pointer to the AP device * @aq: pointer to the AP queue
* @msg: pointer to the AP message * @msg: pointer to the AP message
* @reply: pointer to the AP reply message * @reply: pointer to the AP reply message
*/ */
static void zcrypt_msgtype6_receive(struct ap_device *ap_dev, static void zcrypt_msgtype6_receive(struct ap_queue *aq,
struct ap_message *msg, struct ap_message *msg,
struct ap_message *reply) struct ap_message *reply)
{ {
...@@ -860,11 +966,11 @@ static void zcrypt_msgtype6_receive(struct ap_device *ap_dev, ...@@ -860,11 +966,11 @@ static void zcrypt_msgtype6_receive(struct ap_device *ap_dev,
* This function is called from the AP bus code after a crypto request * This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply". * "msg" has finished with the reply message "reply".
* It is called from tasklet context. * It is called from tasklet context.
* @ap_dev: pointer to the AP device * @aq: pointer to the AP queue
* @msg: pointer to the AP message * @msg: pointer to the AP message
* @reply: pointer to the AP reply message * @reply: pointer to the AP reply message
*/ */
static void zcrypt_msgtype6_receive_ep11(struct ap_device *ap_dev, static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
struct ap_message *msg, struct ap_message *msg,
struct ap_message *reply) struct ap_message *reply)
{ {
...@@ -904,11 +1010,11 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0); ...@@ -904,11 +1010,11 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0);
/** /**
* The request distributor calls this function if it picked the PCIXCC/CEX2C * The request distributor calls this function if it picked the PCIXCC/CEX2C
* device to handle a modexpo request. * device to handle a modexpo request.
* @zdev: pointer to zcrypt_device structure that identifies the * @zq: pointer to zcrypt_queue structure that identifies the
* PCIXCC/CEX2C device to the request distributor * PCIXCC/CEX2C device to the request distributor
* @mex: pointer to the modexpo request buffer * @mex: pointer to the modexpo request buffer
*/ */
static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev, static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
struct ica_rsa_modexpo *mex) struct ica_rsa_modexpo *mex)
{ {
struct ap_message ap_msg; struct ap_message ap_msg;
...@@ -925,21 +1031,21 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev, ...@@ -925,21 +1031,21 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev,
ap_msg.psmid = (((unsigned long long) current->pid) << 32) + ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step); atomic_inc_return(&zcrypt_step);
ap_msg.private = &resp_type; ap_msg.private = &resp_type;
rc = ICAMEX_msg_to_type6MEX_msgX(zdev, &ap_msg, mex); rc = ICAMEX_msg_to_type6MEX_msgX(zq, &ap_msg, mex);
if (rc) if (rc)
goto out_free; goto out_free;
init_completion(&resp_type.work); init_completion(&resp_type.work);
ap_queue_message(zdev->ap_dev, &ap_msg); ap_queue_message(zq->queue, &ap_msg);
rc = wait_for_completion_interruptible(&resp_type.work); rc = wait_for_completion_interruptible(&resp_type.work);
if (rc == 0) { if (rc == 0) {
rc = ap_msg.rc; rc = ap_msg.rc;
if (rc == 0) if (rc == 0)
rc = convert_response_ica(zdev, &ap_msg, rc = convert_response_ica(zq, &ap_msg,
mex->outputdata, mex->outputdata,
mex->outputdatalength); mex->outputdatalength);
} else } else
/* Signal pending. */ /* Signal pending. */
ap_cancel_message(zdev->ap_dev, &ap_msg); ap_cancel_message(zq->queue, &ap_msg);
out_free: out_free:
free_page((unsigned long) ap_msg.message); free_page((unsigned long) ap_msg.message);
return rc; return rc;
...@@ -948,11 +1054,11 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev, ...@@ -948,11 +1054,11 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev,
/** /**
* The request distributor calls this function if it picked the PCIXCC/CEX2C * The request distributor calls this function if it picked the PCIXCC/CEX2C
* device to handle a modexpo_crt request. * device to handle a modexpo_crt request.
* @zdev: pointer to zcrypt_device structure that identifies the * @zq: pointer to zcrypt_queue structure that identifies the
* PCIXCC/CEX2C device to the request distributor * PCIXCC/CEX2C device to the request distributor
* @crt: pointer to the modexpoc_crt request buffer * @crt: pointer to the modexpoc_crt request buffer
*/ */
static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_device *zdev, static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
struct ica_rsa_modexpo_crt *crt) struct ica_rsa_modexpo_crt *crt)
{ {
struct ap_message ap_msg; struct ap_message ap_msg;
...@@ -969,148 +1075,258 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_device *zdev, ...@@ -969,148 +1075,258 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_device *zdev,
ap_msg.psmid = (((unsigned long long) current->pid) << 32) + ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step); atomic_inc_return(&zcrypt_step);
ap_msg.private = &resp_type; ap_msg.private = &resp_type;
rc = ICACRT_msg_to_type6CRT_msgX(zdev, &ap_msg, crt); rc = ICACRT_msg_to_type6CRT_msgX(zq, &ap_msg, crt);
if (rc) if (rc)
goto out_free; goto out_free;
init_completion(&resp_type.work); init_completion(&resp_type.work);
ap_queue_message(zdev->ap_dev, &ap_msg); ap_queue_message(zq->queue, &ap_msg);
rc = wait_for_completion_interruptible(&resp_type.work); rc = wait_for_completion_interruptible(&resp_type.work);
if (rc == 0) { if (rc == 0) {
rc = ap_msg.rc; rc = ap_msg.rc;
if (rc == 0) if (rc == 0)
rc = convert_response_ica(zdev, &ap_msg, rc = convert_response_ica(zq, &ap_msg,
crt->outputdata, crt->outputdata,
crt->outputdatalength); crt->outputdatalength);
} else } else {
/* Signal pending. */ /* Signal pending. */
ap_cancel_message(zdev->ap_dev, &ap_msg); ap_cancel_message(zq->queue, &ap_msg);
}
out_free: out_free:
free_page((unsigned long) ap_msg.message); free_page((unsigned long) ap_msg.message);
return rc; return rc;
} }
unsigned int get_cprb_fc(struct ica_xcRB *xcRB,
struct ap_message *ap_msg,
unsigned int *func_code, unsigned short **dom)
{
struct response_type resp_type = {
.type = PCIXCC_RESPONSE_TYPE_XCRB,
};
int rc;
ap_init_message(ap_msg);
ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
if (!ap_msg->message)
return -ENOMEM;
ap_msg->receive = zcrypt_msgtype6_receive;
ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
if (!ap_msg->private) {
kzfree(ap_msg->message);
return -ENOMEM;
}
memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
rc = XCRB_msg_to_type6CPRB_msgX(ap_msg, xcRB, func_code, dom);
if (rc) {
kzfree(ap_msg->message);
kzfree(ap_msg->private);
}
return rc;
}
/** /**
* The request distributor calls this function if it picked the PCIXCC/CEX2C * The request distributor calls this function if it picked the PCIXCC/CEX2C
* device to handle a send_cprb request. * device to handle a send_cprb request.
* @zdev: pointer to zcrypt_device structure that identifies the * @zq: pointer to zcrypt_queue structure that identifies the
* PCIXCC/CEX2C device to the request distributor * PCIXCC/CEX2C device to the request distributor
* @xcRB: pointer to the send_cprb request buffer * @xcRB: pointer to the send_cprb request buffer
*/ */
static long zcrypt_msgtype6_send_cprb(struct zcrypt_device *zdev, static long zcrypt_msgtype6_send_cprb(struct zcrypt_queue *zq,
struct ica_xcRB *xcRB) struct ica_xcRB *xcRB,
struct ap_message *ap_msg)
{ {
struct ap_message ap_msg;
struct response_type resp_type = {
.type = PCIXCC_RESPONSE_TYPE_XCRB,
};
int rc; int rc;
struct response_type *rtype = (struct response_type *)(ap_msg->private);
ap_init_message(&ap_msg); init_completion(&rtype->work);
ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); ap_queue_message(zq->queue, ap_msg);
if (!ap_msg.message) rc = wait_for_completion_interruptible(&rtype->work);
return -ENOMEM;
ap_msg.receive = zcrypt_msgtype6_receive;
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg.private = &resp_type;
rc = XCRB_msg_to_type6CPRB_msgX(zdev, &ap_msg, xcRB);
if (rc)
goto out_free;
init_completion(&resp_type.work);
ap_queue_message(zdev->ap_dev, &ap_msg);
rc = wait_for_completion_interruptible(&resp_type.work);
if (rc == 0) { if (rc == 0) {
rc = ap_msg.rc; rc = ap_msg->rc;
if (rc == 0) if (rc == 0)
rc = convert_response_xcrb(zdev, &ap_msg, xcRB); rc = convert_response_xcrb(zq, ap_msg, xcRB);
} else } else
/* Signal pending. */ /* Signal pending. */
ap_cancel_message(zdev->ap_dev, &ap_msg); ap_cancel_message(zq->queue, ap_msg);
out_free:
kzfree(ap_msg.message); kzfree(ap_msg->message);
kzfree(ap_msg->private);
return rc;
}
unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb,
struct ap_message *ap_msg,
unsigned int *func_code)
{
struct response_type resp_type = {
.type = PCIXCC_RESPONSE_TYPE_EP11,
};
int rc;
ap_init_message(ap_msg);
ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
if (!ap_msg->message)
return -ENOMEM;
ap_msg->receive = zcrypt_msgtype6_receive_ep11;
ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
if (!ap_msg->private) {
kzfree(ap_msg->message);
return -ENOMEM;
}
memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
rc = xcrb_msg_to_type6_ep11cprb_msgx(ap_msg, xcrb, func_code);
if (rc) {
kzfree(ap_msg->message);
kzfree(ap_msg->private);
}
return rc; return rc;
} }
/** /**
* The request distributor calls this function if it picked the CEX4P * The request distributor calls this function if it picked the CEX4P
* device to handle a send_ep11_cprb request. * device to handle a send_ep11_cprb request.
* @zdev: pointer to zcrypt_device structure that identifies the * @zq: pointer to zcrypt_queue structure that identifies the
* CEX4P device to the request distributor * CEX4P device to the request distributor
* @xcRB: pointer to the ep11 user request block * @xcRB: pointer to the ep11 user request block
*/ */
static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_device *zdev, static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_queue *zq,
struct ep11_urb *xcrb) struct ep11_urb *xcrb,
struct ap_message *ap_msg)
{ {
struct ap_message ap_msg;
struct response_type resp_type = {
.type = PCIXCC_RESPONSE_TYPE_EP11,
};
int rc; int rc;
unsigned int lfmt;
struct response_type *rtype = (struct response_type *)(ap_msg->private);
struct {
struct type6_hdr hdr;
struct ep11_cprb cprbx;
unsigned char pld_tag; /* fixed value 0x30 */
unsigned char pld_lenfmt; /* payload length format */
} __packed * msg = ap_msg->message;
struct pld_hdr {
unsigned char func_tag; /* fixed value 0x4 */
unsigned char func_len; /* fixed value 0x4 */
unsigned int func_val; /* function ID */
unsigned char dom_tag; /* fixed value 0x4 */
unsigned char dom_len; /* fixed value 0x4 */
unsigned int dom_val; /* domain id */
} __packed * payload_hdr = NULL;
ap_init_message(&ap_msg);
ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); /**
if (!ap_msg.message) * The target domain field within the cprb body/payload block will be
return -ENOMEM; * replaced by the usage domain for non-management commands only.
ap_msg.receive = zcrypt_msgtype6_receive_ep11; * Therefore we check the first bit of the 'flags' parameter for
ap_msg.psmid = (((unsigned long long) current->pid) << 32) + * management command indication.
atomic_inc_return(&zcrypt_step); * 0 - non management command
ap_msg.private = &resp_type; * 1 - management command
rc = xcrb_msg_to_type6_ep11cprb_msgx(zdev, &ap_msg, xcrb); */
if (rc) if (!((msg->cprbx.flags & 0x80) == 0x80)) {
goto out_free; msg->cprbx.target_id = (unsigned int)
init_completion(&resp_type.work); AP_QID_QUEUE(zq->queue->qid);
ap_queue_message(zdev->ap_dev, &ap_msg);
rc = wait_for_completion_interruptible(&resp_type.work); if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/
switch (msg->pld_lenfmt & 0x03) {
case 1:
lfmt = 2;
break;
case 2:
lfmt = 3;
break;
default:
return -EINVAL;
}
} else {
lfmt = 1; /* length format #1 */
}
payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
payload_hdr->dom_val = (unsigned int)
AP_QID_QUEUE(zq->queue->qid);
}
init_completion(&rtype->work);
ap_queue_message(zq->queue, ap_msg);
rc = wait_for_completion_interruptible(&rtype->work);
if (rc == 0) { if (rc == 0) {
rc = ap_msg.rc; rc = ap_msg->rc;
if (rc == 0) if (rc == 0)
rc = convert_response_ep11_xcrb(zdev, &ap_msg, xcrb); rc = convert_response_ep11_xcrb(zq, ap_msg, xcrb);
} else } else
/* Signal pending. */ /* Signal pending. */
ap_cancel_message(zdev->ap_dev, &ap_msg); ap_cancel_message(zq->queue, ap_msg);
out_free: kzfree(ap_msg->message);
kzfree(ap_msg.message); kzfree(ap_msg->private);
return rc; return rc;
} }
unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code,
unsigned int *domain)
{
struct response_type resp_type = {
.type = PCIXCC_RESPONSE_TYPE_XCRB,
};
ap_init_message(ap_msg);
ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
if (!ap_msg->message)
return -ENOMEM;
ap_msg->receive = zcrypt_msgtype6_receive;
ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
if (!ap_msg->private) {
kzfree(ap_msg->message);
return -ENOMEM;
}
memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
rng_type6CPRB_msgX(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain);
*func_code = HWRNG;
return 0;
}
/** /**
* The request distributor calls this function if it picked the PCIXCC/CEX2C * The request distributor calls this function if it picked the PCIXCC/CEX2C
* device to generate random data. * device to generate random data.
* @zdev: pointer to zcrypt_device structure that identifies the * @zq: pointer to zcrypt_queue structure that identifies the
* PCIXCC/CEX2C device to the request distributor * PCIXCC/CEX2C device to the request distributor
* @buffer: pointer to a memory page to return random data * @buffer: pointer to a memory page to return random data
*/ */
static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq,
static long zcrypt_msgtype6_rng(struct zcrypt_device *zdev, char *buffer, struct ap_message *ap_msg)
char *buffer)
{ {
struct ap_message ap_msg; struct {
struct response_type resp_type = { struct type6_hdr hdr;
.type = PCIXCC_RESPONSE_TYPE_XCRB, struct CPRBX cprbx;
}; char function_code[2];
short int rule_length;
char rule[8];
short int verb_length;
short int key_length;
} __packed * msg = ap_msg->message;
struct response_type *rtype = (struct response_type *)(ap_msg->private);
int rc; int rc;
ap_init_message(&ap_msg); msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
if (!ap_msg.message) init_completion(&rtype->work);
return -ENOMEM; ap_queue_message(zq->queue, ap_msg);
ap_msg.receive = zcrypt_msgtype6_receive; rc = wait_for_completion_interruptible(&rtype->work);
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg.private = &resp_type;
rng_type6CPRB_msgX(zdev->ap_dev, &ap_msg, ZCRYPT_RNG_BUFFER_SIZE);
init_completion(&resp_type.work);
ap_queue_message(zdev->ap_dev, &ap_msg);
rc = wait_for_completion_interruptible(&resp_type.work);
if (rc == 0) { if (rc == 0) {
rc = ap_msg.rc; rc = ap_msg->rc;
if (rc == 0) if (rc == 0)
rc = convert_response_rng(zdev, &ap_msg, buffer); rc = convert_response_rng(zq, ap_msg, buffer);
} else } else
/* Signal pending. */ /* Signal pending. */
ap_cancel_message(zdev->ap_dev, &ap_msg); ap_cancel_message(zq->queue, ap_msg);
kfree(ap_msg.message);
kzfree(ap_msg->message);
kzfree(ap_msg->private);
return rc; return rc;
} }
...@@ -1145,12 +1361,11 @@ static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = { ...@@ -1145,12 +1361,11 @@ static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = {
.send_ep11_cprb = zcrypt_msgtype6_send_ep11_cprb, .send_ep11_cprb = zcrypt_msgtype6_send_ep11_cprb,
}; };
int __init zcrypt_msgtype6_init(void) void __init zcrypt_msgtype6_init(void)
{ {
zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops); zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops);
zcrypt_msgtype_register(&zcrypt_msgtype6_ops); zcrypt_msgtype_register(&zcrypt_msgtype6_ops);
zcrypt_msgtype_register(&zcrypt_msgtype6_ep11_ops); zcrypt_msgtype_register(&zcrypt_msgtype6_ep11_ops);
return 0;
} }
void __exit zcrypt_msgtype6_exit(void) void __exit zcrypt_msgtype6_exit(void)
...@@ -1159,6 +1374,3 @@ void __exit zcrypt_msgtype6_exit(void) ...@@ -1159,6 +1374,3 @@ void __exit zcrypt_msgtype6_exit(void)
zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops); zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops);
zcrypt_msgtype_unregister(&zcrypt_msgtype6_ep11_ops); zcrypt_msgtype_unregister(&zcrypt_msgtype6_ep11_ops);
} }
module_init(zcrypt_msgtype6_init);
module_exit(zcrypt_msgtype6_exit);
...@@ -116,15 +116,28 @@ struct type86_fmt2_ext { ...@@ -116,15 +116,28 @@ struct type86_fmt2_ext {
unsigned int offset4; /* 0x00000000 */ unsigned int offset4; /* 0x00000000 */
} __packed; } __packed;
unsigned int get_cprb_fc(struct ica_xcRB *, struct ap_message *,
unsigned int *, unsigned short **);
unsigned int get_ep11cprb_fc(struct ep11_urb *, struct ap_message *,
unsigned int *);
unsigned int get_rng_fc(struct ap_message *, int *, unsigned int *);
#define LOW 10
#define MEDIUM 100
#define HIGH 500
int speed_idx_cca(int);
int speed_idx_ep11(int);
/** /**
* Prepare a type6 CPRB message for random number generation * Prepare a type6 CPRB message for random number generation
* *
* @ap_dev: AP device pointer * @ap_dev: AP device pointer
* @ap_msg: pointer to AP message * @ap_msg: pointer to AP message
*/ */
static inline void rng_type6CPRB_msgX(struct ap_device *ap_dev, static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg,
struct ap_message *ap_msg, unsigned int random_number_length,
unsigned random_number_length) unsigned int *domain)
{ {
struct { struct {
struct type6_hdr hdr; struct type6_hdr hdr;
...@@ -156,16 +169,16 @@ static inline void rng_type6CPRB_msgX(struct ap_device *ap_dev, ...@@ -156,16 +169,16 @@ static inline void rng_type6CPRB_msgX(struct ap_device *ap_dev,
msg->hdr.FromCardLen2 = random_number_length, msg->hdr.FromCardLen2 = random_number_length,
msg->cprbx = local_cprbx; msg->cprbx = local_cprbx;
msg->cprbx.rpl_datal = random_number_length, msg->cprbx.rpl_datal = random_number_length,
msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid);
memcpy(msg->function_code, msg->hdr.function_code, 0x02); memcpy(msg->function_code, msg->hdr.function_code, 0x02);
msg->rule_length = 0x0a; msg->rule_length = 0x0a;
memcpy(msg->rule, "RANDOM ", 8); memcpy(msg->rule, "RANDOM ", 8);
msg->verb_length = 0x02; msg->verb_length = 0x02;
msg->key_length = 0x02; msg->key_length = 0x02;
ap_msg->length = sizeof(*msg); ap_msg->length = sizeof(*msg);
*domain = (unsigned short)msg->cprbx.domain;
} }
int zcrypt_msgtype6_init(void); void zcrypt_msgtype6_init(void);
void zcrypt_msgtype6_exit(void); void zcrypt_msgtype6_exit(void);
#endif /* _ZCRYPT_MSGTYPE6_H_ */ #endif /* _ZCRYPT_MSGTYPE6_H_ */
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <linux/mod_devicetable.h>
#include "ap_bus.h" #include "ap_bus.h"
#include "zcrypt_api.h" #include "zcrypt_api.h"
...@@ -46,11 +47,6 @@ ...@@ -46,11 +47,6 @@
#define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE #define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE
#define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */ #define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */
#define PCIXCC_MCL2_SPEED_RATING 7870
#define PCIXCC_MCL3_SPEED_RATING 7870
#define CEX2C_SPEED_RATING 7000
#define CEX3C_SPEED_RATING 6500
#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */ #define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */
#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */ #define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
...@@ -67,142 +63,34 @@ struct response_type { ...@@ -67,142 +63,34 @@ struct response_type {
#define PCIXCC_RESPONSE_TYPE_ICA 0 #define PCIXCC_RESPONSE_TYPE_ICA 0
#define PCIXCC_RESPONSE_TYPE_XCRB 1 #define PCIXCC_RESPONSE_TYPE_XCRB 1
static struct ap_device_id zcrypt_pcixcc_ids[] = {
{ AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) },
{ AP_DEVICE(AP_DEVICE_TYPE_CEX2C) },
{ AP_DEVICE(AP_DEVICE_TYPE_CEX3C) },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids);
MODULE_AUTHOR("IBM Corporation"); MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " \ MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " \
"Copyright IBM Corp. 2001, 2012"); "Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
static int zcrypt_pcixcc_probe(struct ap_device *ap_dev); static struct ap_device_id zcrypt_pcixcc_card_ids[] = {
static void zcrypt_pcixcc_remove(struct ap_device *ap_dev); { .dev_type = AP_DEVICE_TYPE_PCIXCC,
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
static struct ap_driver zcrypt_pcixcc_driver = { { .dev_type = AP_DEVICE_TYPE_CEX2C,
.probe = zcrypt_pcixcc_probe, .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
.remove = zcrypt_pcixcc_remove, { .dev_type = AP_DEVICE_TYPE_CEX3C,
.ids = zcrypt_pcixcc_ids, .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
.request_timeout = PCIXCC_CLEANUP_TIME, { /* end of list */ },
}; };
/** MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_card_ids);
* Micro-code detection function. Its sends a message to a pcixcc card
* to find out the microcode level.
* @ap_dev: pointer to the AP device.
*/
static int zcrypt_pcixcc_mcl(struct ap_device *ap_dev)
{
static unsigned char msg[] = {
0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,
0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,
0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,
0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,
0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,
0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,
0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,
0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,
0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,
0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,
0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,
0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,
0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,
0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,
0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,
0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,
0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,
0xF1,0x3D,0x93,0x53
};
unsigned long long psmid;
struct CPRBX *cprbx;
char *reply;
int rc, i;
reply = (void *) get_zeroed_page(GFP_KERNEL);
if (!reply)
return -ENOMEM;
rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, msg, sizeof(msg)); static struct ap_device_id zcrypt_pcixcc_queue_ids[] = {
if (rc) { .dev_type = AP_DEVICE_TYPE_PCIXCC,
goto out_free; .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX2C,
/* Wait for the test message to complete. */ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
for (i = 0; i < 6; i++) { { .dev_type = AP_DEVICE_TYPE_CEX3C,
msleep(300); .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
rc = ap_recv(ap_dev->qid, &psmid, reply, 4096); { /* end of list */ },
if (rc == 0 && psmid == 0x0102030405060708ULL) };
break;
}
if (i >= 6) {
/* Got no answer. */
rc = -ENODEV;
goto out_free;
}
cprbx = (struct CPRBX *) (reply + 48); MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_queue_ids);
if (cprbx->ccp_rtcode == 8 && cprbx->ccp_rscode == 33)
rc = ZCRYPT_PCIXCC_MCL2;
else
rc = ZCRYPT_PCIXCC_MCL3;
out_free:
free_page((unsigned long) reply);
return rc;
}
/** /**
* Large random number detection function. Its sends a message to a pcixcc * Large random number detection function. Its sends a message to a pcixcc
...@@ -211,15 +99,25 @@ static int zcrypt_pcixcc_mcl(struct ap_device *ap_dev) ...@@ -211,15 +99,25 @@ static int zcrypt_pcixcc_mcl(struct ap_device *ap_dev)
* *
* Returns 1 if large random numbers are supported, 0 if not and < 0 on error. * Returns 1 if large random numbers are supported, 0 if not and < 0 on error.
*/ */
static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev) static int zcrypt_pcixcc_rng_supported(struct ap_queue *aq)
{ {
struct ap_message ap_msg; struct ap_message ap_msg;
unsigned long long psmid; unsigned long long psmid;
unsigned int domain;
struct { struct {
struct type86_hdr hdr; struct type86_hdr hdr;
struct type86_fmt2_ext fmt2; struct type86_fmt2_ext fmt2;
struct CPRBX cprbx; struct CPRBX cprbx;
} __attribute__((packed)) *reply; } __attribute__((packed)) *reply;
struct {
struct type6_hdr hdr;
struct CPRBX cprbx;
char function_code[2];
short int rule_length;
char rule[8];
short int verb_length;
short int key_length;
} __packed * msg;
int rc, i; int rc, i;
ap_init_message(&ap_msg); ap_init_message(&ap_msg);
...@@ -227,8 +125,12 @@ static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev) ...@@ -227,8 +125,12 @@ static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
if (!ap_msg.message) if (!ap_msg.message)
return -ENOMEM; return -ENOMEM;
rng_type6CPRB_msgX(ap_dev, &ap_msg, 4); rng_type6CPRB_msgX(&ap_msg, 4, &domain);
rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, ap_msg.message,
msg = ap_msg.message;
msg->cprbx.domain = AP_QID_QUEUE(aq->qid);
rc = ap_send(aq->qid, 0x0102030405060708ULL, ap_msg.message,
ap_msg.length); ap_msg.length);
if (rc) if (rc)
goto out_free; goto out_free;
...@@ -236,7 +138,7 @@ static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev) ...@@ -236,7 +138,7 @@ static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
/* Wait for the test message to complete. */ /* Wait for the test message to complete. */
for (i = 0; i < 2 * HZ; i++) { for (i = 0; i < 2 * HZ; i++) {
msleep(1000 / HZ); msleep(1000 / HZ);
rc = ap_recv(ap_dev->qid, &psmid, ap_msg.message, 4096); rc = ap_recv(aq->qid, &psmid, ap_msg.message, 4096);
if (rc == 0 && psmid == 0x0102030405060708ULL) if (rc == 0 && psmid == 0x0102030405060708ULL)
break; break;
} }
...@@ -258,110 +160,168 @@ static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev) ...@@ -258,110 +160,168 @@ static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
} }
/** /**
* Probe function for PCIXCC/CEX2C cards. It always accepts the AP device * Probe function for PCIXCC/CEX2C card devices. It always accepts the
* since the bus_match already checked the hardware type. The PCIXCC * AP device since the bus_match already checked the hardware type. The
* cards come in two flavours: micro code level 2 and micro code level 3. * PCIXCC cards come in two flavours: micro code level 2 and micro code
* This is checked by sending a test message to the device. * level 3. This is checked by sending a test message to the device.
* @ap_dev: pointer to the AP device. * @ap_dev: pointer to the AP card device.
*/ */
static int zcrypt_pcixcc_probe(struct ap_device *ap_dev) static int zcrypt_pcixcc_card_probe(struct ap_device *ap_dev)
{ {
struct zcrypt_device *zdev; /*
* Normalized speed ratings per crypto adapter
* MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
*/
static const int CEX2C_SPEED_IDX[] = {
1000, 1400, 2400, 1100, 1500, 2600, 100, 12};
static const int CEX3C_SPEED_IDX[] = {
500, 700, 1400, 550, 800, 1500, 80, 10};
struct ap_card *ac = to_ap_card(&ap_dev->device);
struct zcrypt_card *zc;
int rc = 0; int rc = 0;
zdev = zcrypt_device_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE); zc = zcrypt_card_alloc();
if (!zdev) if (!zc)
return -ENOMEM; return -ENOMEM;
zdev->ap_dev = ap_dev; zc->card = ac;
zdev->online = 1; ac->private = zc;
switch (ap_dev->device_type) { switch (ac->ap_dev.device_type) {
case AP_DEVICE_TYPE_PCIXCC:
rc = zcrypt_pcixcc_mcl(ap_dev);
if (rc < 0) {
zcrypt_device_free(zdev);
return rc;
}
zdev->user_space_type = rc;
if (rc == ZCRYPT_PCIXCC_MCL2) {
zdev->type_string = "PCIXCC_MCL2";
zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING;
zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
} else {
zdev->type_string = "PCIXCC_MCL3";
zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING;
zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
}
break;
case AP_DEVICE_TYPE_CEX2C: case AP_DEVICE_TYPE_CEX2C:
zdev->user_space_type = ZCRYPT_CEX2C; zc->user_space_type = ZCRYPT_CEX2C;
zdev->type_string = "CEX2C"; zc->type_string = "CEX2C";
zdev->speed_rating = CEX2C_SPEED_RATING; memcpy(zc->speed_rating, CEX2C_SPEED_IDX,
zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; sizeof(CEX2C_SPEED_IDX));
zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; zc->min_mod_size = PCIXCC_MIN_MOD_SIZE;
zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE; zc->max_mod_size = PCIXCC_MAX_MOD_SIZE;
zc->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
break; break;
case AP_DEVICE_TYPE_CEX3C: case AP_DEVICE_TYPE_CEX3C:
zdev->user_space_type = ZCRYPT_CEX3C; zc->user_space_type = ZCRYPT_CEX3C;
zdev->type_string = "CEX3C"; zc->type_string = "CEX3C";
zdev->speed_rating = CEX3C_SPEED_RATING; memcpy(zc->speed_rating, CEX3C_SPEED_IDX,
zdev->min_mod_size = CEX3C_MIN_MOD_SIZE; sizeof(CEX3C_SPEED_IDX));
zdev->max_mod_size = CEX3C_MAX_MOD_SIZE; zc->min_mod_size = CEX3C_MIN_MOD_SIZE;
zdev->max_exp_bit_length = CEX3C_MAX_MOD_SIZE; zc->max_mod_size = CEX3C_MAX_MOD_SIZE;
zc->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
break; break;
default: default:
goto out_free; zcrypt_card_free(zc);
return -ENODEV;
} }
zc->online = 1;
rc = zcrypt_card_register(zc);
if (rc) {
ac->private = NULL;
zcrypt_card_free(zc);
}
return rc;
}
rc = zcrypt_pcixcc_rng_supported(ap_dev); /**
* This is called to remove the PCIXCC/CEX2C card driver information
* if an AP card device is removed.
*/
static void zcrypt_pcixcc_card_remove(struct ap_device *ap_dev)
{
struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
if (zc)
zcrypt_card_unregister(zc);
}
static struct ap_driver zcrypt_pcixcc_card_driver = {
.probe = zcrypt_pcixcc_card_probe,
.remove = zcrypt_pcixcc_card_remove,
.ids = zcrypt_pcixcc_card_ids,
};
/**
* Probe function for PCIXCC/CEX2C queue devices. It always accepts the
* AP device since the bus_match already checked the hardware type. The
* PCIXCC cards come in two flavours: micro code level 2 and micro code
* level 3. This is checked by sending a test message to the device.
* @ap_dev: pointer to the AP card device.
*/
static int zcrypt_pcixcc_queue_probe(struct ap_device *ap_dev)
{
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
struct zcrypt_queue *zq;
int rc;
zq = zcrypt_queue_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE);
if (!zq)
return -ENOMEM;
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
rc = zcrypt_pcixcc_rng_supported(aq);
if (rc < 0) { if (rc < 0) {
zcrypt_device_free(zdev); zcrypt_queue_free(zq);
return rc; return rc;
} }
if (rc) if (rc)
zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME, zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_DEFAULT); MSGTYPE06_VARIANT_DEFAULT);
else else
zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME, zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_NORNG); MSGTYPE06_VARIANT_NORNG);
ap_device_init_reply(ap_dev, &zdev->reply); ap_queue_init_reply(aq, &zq->reply);
ap_dev->private = zdev; aq->request_timeout = PCIXCC_CLEANUP_TIME,
rc = zcrypt_device_register(zdev); aq->private = zq;
if (rc) rc = zcrypt_queue_register(zq);
goto out_free; if (rc) {
return 0; aq->private = NULL;
zcrypt_queue_free(zq);
out_free: }
ap_dev->private = NULL;
zcrypt_msgtype_release(zdev->ops);
zcrypt_device_free(zdev);
return rc; return rc;
} }
/** /**
* This is called to remove the extended PCIXCC/CEX2C driver information * This is called to remove the PCIXCC/CEX2C queue driver information
* if an AP device is removed. * if an AP queue device is removed.
*/ */
static void zcrypt_pcixcc_remove(struct ap_device *ap_dev) static void zcrypt_pcixcc_queue_remove(struct ap_device *ap_dev)
{ {
struct zcrypt_device *zdev = ap_dev->private; struct ap_queue *aq = to_ap_queue(&ap_dev->device);
struct zcrypt_ops *zops = zdev->ops; struct zcrypt_queue *zq = aq->private;
zcrypt_device_unregister(zdev); ap_queue_remove(aq);
zcrypt_msgtype_release(zops); if (zq)
zcrypt_queue_unregister(zq);
} }
static struct ap_driver zcrypt_pcixcc_queue_driver = {
.probe = zcrypt_pcixcc_queue_probe,
.remove = zcrypt_pcixcc_queue_remove,
.suspend = ap_queue_suspend,
.resume = ap_queue_resume,
.ids = zcrypt_pcixcc_queue_ids,
};
int __init zcrypt_pcixcc_init(void) int __init zcrypt_pcixcc_init(void)
{ {
return ap_driver_register(&zcrypt_pcixcc_driver, THIS_MODULE, "pcixcc"); int rc;
rc = ap_driver_register(&zcrypt_pcixcc_card_driver,
THIS_MODULE, "pcixcccard");
if (rc)
return rc;
rc = ap_driver_register(&zcrypt_pcixcc_queue_driver,
THIS_MODULE, "pcixccqueue");
if (rc)
ap_driver_unregister(&zcrypt_pcixcc_card_driver);
return rc;
} }
void zcrypt_pcixcc_exit(void) void zcrypt_pcixcc_exit(void)
{ {
ap_driver_unregister(&zcrypt_pcixcc_driver); ap_driver_unregister(&zcrypt_pcixcc_queue_driver);
ap_driver_unregister(&zcrypt_pcixcc_card_driver);
} }
module_init(zcrypt_pcixcc_init); module_init(zcrypt_pcixcc_init);
......
/*
* zcrypt 2.1.0
*
* Copyright IBM Corp. 2001, 2012
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
* Cornelia Huck <cornelia.huck@de.ibm.com>
*
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
#include <linux/hw_random.h>
#include <linux/debugfs.h>
#include <asm/debug.h>
#include "zcrypt_debug.h"
#include "zcrypt_api.h"
#include "zcrypt_msgtype6.h"
#include "zcrypt_msgtype50.h"
/*
* Device attributes common for all crypto queue devices.
*/
static ssize_t zcrypt_queue_online_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zcrypt_queue *zq = to_ap_queue(dev)->private;
return snprintf(buf, PAGE_SIZE, "%d\n", zq->online);
}
static ssize_t zcrypt_queue_online_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zcrypt_queue *zq = to_ap_queue(dev)->private;
struct zcrypt_card *zc = zq->zcard;
int online;
if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
return -EINVAL;
if (online && !zc->online)
return -EINVAL;
zq->online = online;
ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x online=%d\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
online);
if (!online)
ap_flush_queue(zq->queue);
return count;
}
static DEVICE_ATTR(online, 0644, zcrypt_queue_online_show,
zcrypt_queue_online_store);
static struct attribute *zcrypt_queue_attrs[] = {
&dev_attr_online.attr,
NULL,
};
static struct attribute_group zcrypt_queue_attr_group = {
.attrs = zcrypt_queue_attrs,
};
void zcrypt_queue_force_online(struct zcrypt_queue *zq, int online)
{
zq->online = online;
if (!online)
ap_flush_queue(zq->queue);
}
struct zcrypt_queue *zcrypt_queue_alloc(size_t max_response_size)
{
struct zcrypt_queue *zq;
zq = kzalloc(sizeof(struct zcrypt_queue), GFP_KERNEL);
if (!zq)
return NULL;
zq->reply.message = kmalloc(max_response_size, GFP_KERNEL);
if (!zq->reply.message)
goto out_free;
zq->reply.length = max_response_size;
INIT_LIST_HEAD(&zq->list);
kref_init(&zq->refcount);
return zq;
out_free:
kfree(zq);
return NULL;
}
EXPORT_SYMBOL(zcrypt_queue_alloc);
void zcrypt_queue_free(struct zcrypt_queue *zq)
{
kfree(zq->reply.message);
kfree(zq);
}
EXPORT_SYMBOL(zcrypt_queue_free);
static void zcrypt_queue_release(struct kref *kref)
{
struct zcrypt_queue *zq =
container_of(kref, struct zcrypt_queue, refcount);
zcrypt_queue_free(zq);
}
void zcrypt_queue_get(struct zcrypt_queue *zq)
{
kref_get(&zq->refcount);
}
EXPORT_SYMBOL(zcrypt_queue_get);
int zcrypt_queue_put(struct zcrypt_queue *zq)
{
return kref_put(&zq->refcount, zcrypt_queue_release);
}
EXPORT_SYMBOL(zcrypt_queue_put);
/**
* zcrypt_queue_register() - Register a crypto queue device.
* @zq: Pointer to a crypto queue device
*
* Register a crypto queue device. Returns 0 if successful.
*/
int zcrypt_queue_register(struct zcrypt_queue *zq)
{
struct zcrypt_card *zc;
int rc;
spin_lock(&zcrypt_list_lock);
zc = zq->queue->card->private;
zcrypt_card_get(zc);
zq->zcard = zc;
zq->online = 1; /* New devices are online by default. */
ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x register online=1\n",
AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid));
list_add_tail(&zq->list, &zc->zqueues);
zcrypt_device_count++;
spin_unlock(&zcrypt_list_lock);
rc = sysfs_create_group(&zq->queue->ap_dev.device.kobj,
&zcrypt_queue_attr_group);
if (rc)
goto out;
get_device(&zq->queue->ap_dev.device);
if (zq->ops->rng) {
rc = zcrypt_rng_device_add();
if (rc)
goto out_unregister;
}
return 0;
out_unregister:
sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
&zcrypt_queue_attr_group);
put_device(&zq->queue->ap_dev.device);
out:
spin_lock(&zcrypt_list_lock);
list_del_init(&zq->list);
spin_unlock(&zcrypt_list_lock);
zcrypt_card_put(zc);
return rc;
}
EXPORT_SYMBOL(zcrypt_queue_register);
/**
* zcrypt_queue_unregister(): Unregister a crypto queue device.
* @zq: Pointer to crypto queue device
*
* Unregister a crypto queue device.
*/
void zcrypt_queue_unregister(struct zcrypt_queue *zq)
{
struct zcrypt_card *zc;
ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x unregister\n",
AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid));
zc = zq->zcard;
spin_lock(&zcrypt_list_lock);
list_del_init(&zq->list);
zcrypt_device_count--;
spin_unlock(&zcrypt_list_lock);
zcrypt_card_put(zc);
if (zq->ops->rng)
zcrypt_rng_device_remove();
sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
&zcrypt_queue_attr_group);
put_device(&zq->queue->ap_dev.device);
zcrypt_queue_put(zq);
}
EXPORT_SYMBOL(zcrypt_queue_unregister);
...@@ -175,7 +175,8 @@ struct ap_device_id { ...@@ -175,7 +175,8 @@ struct ap_device_id {
kernel_ulong_t driver_info; kernel_ulong_t driver_info;
}; };
#define AP_DEVICE_ID_MATCH_DEVICE_TYPE 0x01 #define AP_DEVICE_ID_MATCH_CARD_TYPE 0x01
#define AP_DEVICE_ID_MATCH_QUEUE_TYPE 0x02
/* s390 css bus devices (subchannels) */ /* s390 css bus devices (subchannels) */
struct css_device_id { struct css_device_id {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment