Commit b7f80afa authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6: (71 commits)
  [S390] sclp_tty: Fix scheduling while atomic bug.
  [S390] sclp_tty: remove ioctl interface.
  [S390] Remove P390 support.
  [S390] Cleanup vmcp printk messages.
  [S390] Cleanup lcs printk messages.
  [S390] Cleanup kprobes printk messages.
  [S390] Cleanup vmwatch printk messages.
  [S390] Cleanup dcssblk printk messages.
  [S390] Cleanup zfcp dumper printk messages.
  [S390] Cleanup vmlogrdr printk messages.
  [S390] Cleanup s390 debug feature print messages.
  [S390] Cleanup monreader printk messages.
  [S390] Cleanup appldata printk messages.
  [S390] Cleanup smsgiucv printk messages.
  [S390] Cleanup cpacf printk messages.
  [S390] Cleanup qeth print messages.
  [S390] Cleanup netiucv printk messages.
  [S390] Cleanup iucv printk messages.
  [S390] Cleanup sclp printk messages.
  [S390] Cleanup zcrypt printk messages.
  ...
parents 42c59208 5e34599f
What: /sys/bus/css/devices/.../type
Date: March 2008
Contact: Cornelia Huck <cornelia.huck@de.ibm.com>
linux-s390@vger.kernel.org
Description: Contains the subchannel type, as reported by the hardware.
This attribute is present for all subchannel types.
What: /sys/bus/css/devices/.../modalias
Date: March 2008
Contact: Cornelia Huck <cornelia.huck@de.ibm.com>
linux-s390@vger.kernel.org
Description: Contains the module alias as reported with uevents.
It is of the format css:t<type> and present for all
subchannel types.
What: /sys/bus/css/drivers/io_subchannel/.../chpids
Date: December 2002
Contact: Cornelia Huck <cornelia.huck@de.ibm.com>
linux-s390@vger.kernel.org
Description: Contains the ids of the channel paths used by this
subchannel, as reported by the channel subsystem
during subchannel recognition.
Note: This is an I/O-subchannel specific attribute.
Users: s390-tools, HAL
What: /sys/bus/css/drivers/io_subchannel/.../pimpampom
Date: December 2002
Contact: Cornelia Huck <cornelia.huck@de.ibm.com>
linux-s390@vger.kernel.org
Description: Contains the PIM/PAM/POM values, as reported by the
channel subsystem when last queried by the common I/O
layer (this implies that this attribute is not neccessarily
in sync with the values current in the channel subsystem).
Note: This is an I/O-subchannel specific attribute.
Users: s390-tools, HAL
...@@ -117,6 +117,7 @@ Code Seq# Include File Comments ...@@ -117,6 +117,7 @@ Code Seq# Include File Comments
<mailto:natalia@nikhefk.nikhef.nl> <mailto:natalia@nikhefk.nikhef.nl>
'c' 00-7F linux/comstats.h conflict! 'c' 00-7F linux/comstats.h conflict!
'c' 00-7F linux/coda.h conflict! 'c' 00-7F linux/coda.h conflict!
'c' 80-9F asm-s390/chsc.h
'd' 00-FF linux/char/drm/drm/h conflict! 'd' 00-FF linux/char/drm/drm/h conflict!
'd' 00-DF linux/video_decoder.h conflict! 'd' 00-DF linux/video_decoder.h conflict!
'd' F0-FF linux/digi1.h 'd' F0-FF linux/digi1.h
......
...@@ -146,6 +146,7 @@ config MATHEMU ...@@ -146,6 +146,7 @@ config MATHEMU
config COMPAT config COMPAT
bool "Kernel support for 31 bit emulation" bool "Kernel support for 31 bit emulation"
depends on 64BIT depends on 64BIT
select COMPAT_BINFMT_ELF
help help
Select this option if you want to enable your system kernel to Select this option if you want to enable your system kernel to
handle system-calls from ELF binaries for 31 bit ESA. This option handle system-calls from ELF binaries for 31 bit ESA. This option
...@@ -312,6 +313,10 @@ config ARCH_SPARSEMEM_DEFAULT ...@@ -312,6 +313,10 @@ config ARCH_SPARSEMEM_DEFAULT
config ARCH_SELECT_MEMORY_MODEL config ARCH_SELECT_MEMORY_MODEL
def_bool y def_bool y
config ARCH_ENABLE_MEMORY_HOTPLUG
def_bool y
depends on SPARSEMEM
source "mm/Kconfig" source "mm/Kconfig"
comment "I/O subsystem configuration" comment "I/O subsystem configuration"
...@@ -344,6 +349,22 @@ config QDIO_DEBUG ...@@ -344,6 +349,22 @@ config QDIO_DEBUG
If unsure, say N. If unsure, say N.
config CHSC_SCH
tristate "Support for CHSC subchannels"
help
This driver allows usage of CHSC subchannels. A CHSC subchannel
is usually present on LPAR only.
The driver creates a device /dev/chsc, which may be used to
obtain I/O configuration information about the machine and
to issue asynchronous chsc commands (DANGEROUS).
You will usually only want to use this interface on a special
LPAR designated for system management.
To compile this driver as a module, choose M here: the
module will be called chsc_sch.
If unsure, say N.
comment "Misc" comment "Misc"
config IPL config IPL
......
...@@ -3,13 +3,11 @@ ...@@ -3,13 +3,11 @@
* *
* Definitions and interface for Linux - z/VM Monitor Stream. * Definitions and interface for Linux - z/VM Monitor Stream.
* *
* Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH. * Copyright IBM Corp. 2003, 2008
* *
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/ */
//#define APPLDATA_DEBUG /* Debug messages on/off */
#define APPLDATA_MAX_REC_SIZE 4024 /* Maximum size of the */ #define APPLDATA_MAX_REC_SIZE 4024 /* Maximum size of the */
/* data buffer */ /* data buffer */
#define APPLDATA_MAX_PROCS 100 #define APPLDATA_MAX_PROCS 100
...@@ -32,12 +30,6 @@ ...@@ -32,12 +30,6 @@
#define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x) #define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x)
#define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x) #define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x)
#ifdef APPLDATA_DEBUG
#define P_DEBUG(x...) printk(KERN_DEBUG MY_PRINT_NAME " debug: " x)
#else
#define P_DEBUG(x...) do {} while (0)
#endif
struct appldata_ops { struct appldata_ops {
struct list_head list; struct list_head list;
struct ctl_table_header *sysctl_header; struct ctl_table_header *sysctl_header;
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
* Exports appldata_register_ops() and appldata_unregister_ops() for the * Exports appldata_register_ops() and appldata_unregister_ops() for the
* data gathering modules. * data gathering modules.
* *
* Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH. * Copyright IBM Corp. 2003, 2008
* *
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/ */
...@@ -108,9 +108,6 @@ static LIST_HEAD(appldata_ops_list); ...@@ -108,9 +108,6 @@ static LIST_HEAD(appldata_ops_list);
*/ */
static void appldata_timer_function(unsigned long data) static void appldata_timer_function(unsigned long data)
{ {
P_DEBUG(" -= Timer =-\n");
P_DEBUG("CPU: %i, expire_count: %i\n", smp_processor_id(),
atomic_read(&appldata_expire_count));
if (atomic_dec_and_test(&appldata_expire_count)) { if (atomic_dec_and_test(&appldata_expire_count)) {
atomic_set(&appldata_expire_count, num_online_cpus()); atomic_set(&appldata_expire_count, num_online_cpus());
queue_work(appldata_wq, (struct work_struct *) data); queue_work(appldata_wq, (struct work_struct *) data);
...@@ -128,14 +125,11 @@ static void appldata_work_fn(struct work_struct *work) ...@@ -128,14 +125,11 @@ static void appldata_work_fn(struct work_struct *work)
struct appldata_ops *ops; struct appldata_ops *ops;
int i; int i;
P_DEBUG(" -= Work Queue =-\n");
i = 0; i = 0;
get_online_cpus(); get_online_cpus();
spin_lock(&appldata_ops_lock); spin_lock(&appldata_ops_lock);
list_for_each(lh, &appldata_ops_list) { list_for_each(lh, &appldata_ops_list) {
ops = list_entry(lh, struct appldata_ops, list); ops = list_entry(lh, struct appldata_ops, list);
P_DEBUG("list_for_each loop: %i) active = %u, name = %s\n",
++i, ops->active, ops->name);
if (ops->active == 1) { if (ops->active == 1) {
ops->callback(ops->data); ops->callback(ops->data);
} }
...@@ -212,7 +206,6 @@ __appldata_vtimer_setup(int cmd) ...@@ -212,7 +206,6 @@ __appldata_vtimer_setup(int cmd)
0, 1); 0, 1);
} }
appldata_timer_active = 1; appldata_timer_active = 1;
P_INFO("Monitoring timer started.\n");
break; break;
case APPLDATA_DEL_TIMER: case APPLDATA_DEL_TIMER:
for_each_online_cpu(i) for_each_online_cpu(i)
...@@ -221,7 +214,6 @@ __appldata_vtimer_setup(int cmd) ...@@ -221,7 +214,6 @@ __appldata_vtimer_setup(int cmd)
break; break;
appldata_timer_active = 0; appldata_timer_active = 0;
atomic_set(&appldata_expire_count, num_online_cpus()); atomic_set(&appldata_expire_count, num_online_cpus());
P_INFO("Monitoring timer stopped.\n");
break; break;
case APPLDATA_MOD_TIMER: case APPLDATA_MOD_TIMER:
per_cpu_interval = (u64) (appldata_interval*1000 / per_cpu_interval = (u64) (appldata_interval*1000 /
...@@ -313,10 +305,8 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp, ...@@ -313,10 +305,8 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
} }
interval = 0; interval = 0;
sscanf(buf, "%i", &interval); sscanf(buf, "%i", &interval);
if (interval <= 0) { if (interval <= 0)
P_ERROR("Timer CPU interval has to be > 0!\n");
return -EINVAL; return -EINVAL;
}
get_online_cpus(); get_online_cpus();
spin_lock(&appldata_timer_lock); spin_lock(&appldata_timer_lock);
...@@ -324,9 +314,6 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp, ...@@ -324,9 +314,6 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
__appldata_vtimer_setup(APPLDATA_MOD_TIMER); __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
spin_unlock(&appldata_timer_lock); spin_unlock(&appldata_timer_lock);
put_online_cpus(); put_online_cpus();
P_INFO("Monitoring CPU interval set to %u milliseconds.\n",
interval);
out: out:
*lenp = len; *lenp = len;
*ppos += len; *ppos += len;
...@@ -406,23 +393,16 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp, ...@@ -406,23 +393,16 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
P_ERROR("START DIAG 0xDC for %s failed, " P_ERROR("START DIAG 0xDC for %s failed, "
"return code: %d\n", ops->name, rc); "return code: %d\n", ops->name, rc);
module_put(ops->owner); module_put(ops->owner);
} else { } else
P_INFO("Monitoring %s data enabled, "
"DIAG 0xDC started.\n", ops->name);
ops->active = 1; ops->active = 1;
}
} else if ((buf[0] == '0') && (ops->active == 1)) { } else if ((buf[0] == '0') && (ops->active == 1)) {
ops->active = 0; ops->active = 0;
rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
(unsigned long) ops->data, ops->size, (unsigned long) ops->data, ops->size,
ops->mod_lvl); ops->mod_lvl);
if (rc != 0) { if (rc != 0)
P_ERROR("STOP DIAG 0xDC for %s failed, " P_ERROR("STOP DIAG 0xDC for %s failed, "
"return code: %d\n", ops->name, rc); "return code: %d\n", ops->name, rc);
} else {
P_INFO("Monitoring %s data disabled, "
"DIAG 0xDC stopped.\n", ops->name);
}
module_put(ops->owner); module_put(ops->owner);
} }
spin_unlock(&appldata_ops_lock); spin_unlock(&appldata_ops_lock);
...@@ -468,7 +448,6 @@ int appldata_register_ops(struct appldata_ops *ops) ...@@ -468,7 +448,6 @@ int appldata_register_ops(struct appldata_ops *ops)
ops->sysctl_header = register_sysctl_table(ops->ctl_table); ops->sysctl_header = register_sysctl_table(ops->ctl_table);
if (!ops->sysctl_header) if (!ops->sysctl_header)
goto out; goto out;
P_INFO("%s-ops registered!\n", ops->name);
return 0; return 0;
out: out:
spin_lock(&appldata_ops_lock); spin_lock(&appldata_ops_lock);
...@@ -490,7 +469,6 @@ void appldata_unregister_ops(struct appldata_ops *ops) ...@@ -490,7 +469,6 @@ void appldata_unregister_ops(struct appldata_ops *ops)
spin_unlock(&appldata_ops_lock); spin_unlock(&appldata_ops_lock);
unregister_sysctl_table(ops->sysctl_header); unregister_sysctl_table(ops->sysctl_header);
kfree(ops->ctl_table); kfree(ops->ctl_table);
P_INFO("%s-ops unregistered!\n", ops->name);
} }
/********************** module-ops management <END> **************************/ /********************** module-ops management <END> **************************/
...@@ -553,14 +531,9 @@ static int __init appldata_init(void) ...@@ -553,14 +531,9 @@ static int __init appldata_init(void)
{ {
int i; int i;
P_DEBUG("sizeof(parameter_list) = %lu\n",
sizeof(struct appldata_parameter_list));
appldata_wq = create_singlethread_workqueue("appldata"); appldata_wq = create_singlethread_workqueue("appldata");
if (!appldata_wq) { if (!appldata_wq)
P_ERROR("Could not create work queue\n");
return -ENOMEM; return -ENOMEM;
}
get_online_cpus(); get_online_cpus();
for_each_online_cpu(i) for_each_online_cpu(i)
...@@ -571,8 +544,6 @@ static int __init appldata_init(void) ...@@ -571,8 +544,6 @@ static int __init appldata_init(void)
register_hotcpu_notifier(&appldata_nb); register_hotcpu_notifier(&appldata_nb);
appldata_sysctl_header = register_sysctl_table(appldata_dir_table); appldata_sysctl_header = register_sysctl_table(appldata_dir_table);
P_DEBUG("Base interface initialized.\n");
return 0; return 0;
} }
...@@ -584,7 +555,9 @@ EXPORT_SYMBOL_GPL(appldata_register_ops); ...@@ -584,7 +555,9 @@ EXPORT_SYMBOL_GPL(appldata_register_ops);
EXPORT_SYMBOL_GPL(appldata_unregister_ops); EXPORT_SYMBOL_GPL(appldata_unregister_ops);
EXPORT_SYMBOL_GPL(appldata_diag); EXPORT_SYMBOL_GPL(appldata_diag);
#ifdef CONFIG_SWAP
EXPORT_SYMBOL_GPL(si_swapinfo); EXPORT_SYMBOL_GPL(si_swapinfo);
#endif
EXPORT_SYMBOL_GPL(nr_threads); EXPORT_SYMBOL_GPL(nr_threads);
EXPORT_SYMBOL_GPL(nr_running); EXPORT_SYMBOL_GPL(nr_running);
EXPORT_SYMBOL_GPL(nr_iowait); EXPORT_SYMBOL_GPL(nr_iowait);
...@@ -14,14 +14,13 @@ ...@@ -14,14 +14,13 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <asm/io.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/swap.h> #include <linux/swap.h>
#include <asm/io.h>
#include "appldata.h" #include "appldata.h"
#define MY_PRINT_NAME "appldata_mem" /* for debug messages, etc. */
#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */ #define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */
/* /*
...@@ -70,30 +69,6 @@ static struct appldata_mem_data { ...@@ -70,30 +69,6 @@ static struct appldata_mem_data {
} __attribute__((packed)) appldata_mem_data; } __attribute__((packed)) appldata_mem_data;
static inline void appldata_debug_print(struct appldata_mem_data *mem_data)
{
P_DEBUG("--- MEM - RECORD ---\n");
P_DEBUG("pgpgin = %8lu KB\n", mem_data->pgpgin);
P_DEBUG("pgpgout = %8lu KB\n", mem_data->pgpgout);
P_DEBUG("pswpin = %8lu Pages\n", mem_data->pswpin);
P_DEBUG("pswpout = %8lu Pages\n", mem_data->pswpout);
P_DEBUG("pgalloc = %8lu \n", mem_data->pgalloc);
P_DEBUG("pgfault = %8lu \n", mem_data->pgfault);
P_DEBUG("pgmajfault = %8lu \n", mem_data->pgmajfault);
P_DEBUG("sharedram = %8lu KB\n", mem_data->sharedram);
P_DEBUG("totalram = %8lu KB\n", mem_data->totalram);
P_DEBUG("freeram = %8lu KB\n", mem_data->freeram);
P_DEBUG("totalhigh = %8lu KB\n", mem_data->totalhigh);
P_DEBUG("freehigh = %8lu KB\n", mem_data->freehigh);
P_DEBUG("bufferram = %8lu KB\n", mem_data->bufferram);
P_DEBUG("cached = %8lu KB\n", mem_data->cached);
P_DEBUG("totalswap = %8lu KB\n", mem_data->totalswap);
P_DEBUG("freeswap = %8lu KB\n", mem_data->freeswap);
P_DEBUG("sync_count_1 = %u\n", mem_data->sync_count_1);
P_DEBUG("sync_count_2 = %u\n", mem_data->sync_count_2);
P_DEBUG("timestamp = %lX\n", mem_data->timestamp);
}
/* /*
* appldata_get_mem_data() * appldata_get_mem_data()
* *
...@@ -140,9 +115,6 @@ static void appldata_get_mem_data(void *data) ...@@ -140,9 +115,6 @@ static void appldata_get_mem_data(void *data)
mem_data->timestamp = get_clock(); mem_data->timestamp = get_clock();
mem_data->sync_count_2++; mem_data->sync_count_2++;
#ifdef APPLDATA_DEBUG
appldata_debug_print(mem_data);
#endif
} }
...@@ -164,17 +136,7 @@ static struct appldata_ops ops = { ...@@ -164,17 +136,7 @@ static struct appldata_ops ops = {
*/ */
static int __init appldata_mem_init(void) static int __init appldata_mem_init(void)
{ {
int rc; return appldata_register_ops(&ops);
P_DEBUG("sizeof(mem) = %lu\n", sizeof(struct appldata_mem_data));
rc = appldata_register_ops(&ops);
if (rc != 0) {
P_ERROR("Error registering ops, rc = %i\n", rc);
} else {
P_DEBUG("%s-ops registered!\n", ops.name);
}
return rc;
} }
/* /*
...@@ -185,7 +147,6 @@ static int __init appldata_mem_init(void) ...@@ -185,7 +147,6 @@ static int __init appldata_mem_init(void)
static void __exit appldata_mem_exit(void) static void __exit appldata_mem_exit(void)
{ {
appldata_unregister_ops(&ops); appldata_unregister_ops(&ops);
P_DEBUG("%s-ops unregistered!\n", ops.name);
} }
......
...@@ -21,9 +21,6 @@ ...@@ -21,9 +21,6 @@
#include "appldata.h" #include "appldata.h"
#define MY_PRINT_NAME "appldata_net_sum" /* for debug messages, etc. */
/* /*
* Network data * Network data
* *
...@@ -60,26 +57,6 @@ static struct appldata_net_sum_data { ...@@ -60,26 +57,6 @@ static struct appldata_net_sum_data {
} __attribute__((packed)) appldata_net_sum_data; } __attribute__((packed)) appldata_net_sum_data;
static inline void appldata_print_debug(struct appldata_net_sum_data *net_data)
{
P_DEBUG("--- NET - RECORD ---\n");
P_DEBUG("nr_interfaces = %u\n", net_data->nr_interfaces);
P_DEBUG("rx_packets = %8lu\n", net_data->rx_packets);
P_DEBUG("tx_packets = %8lu\n", net_data->tx_packets);
P_DEBUG("rx_bytes = %8lu\n", net_data->rx_bytes);
P_DEBUG("tx_bytes = %8lu\n", net_data->tx_bytes);
P_DEBUG("rx_errors = %8lu\n", net_data->rx_errors);
P_DEBUG("tx_errors = %8lu\n", net_data->tx_errors);
P_DEBUG("rx_dropped = %8lu\n", net_data->rx_dropped);
P_DEBUG("tx_dropped = %8lu\n", net_data->tx_dropped);
P_DEBUG("collisions = %8lu\n", net_data->collisions);
P_DEBUG("sync_count_1 = %u\n", net_data->sync_count_1);
P_DEBUG("sync_count_2 = %u\n", net_data->sync_count_2);
P_DEBUG("timestamp = %lX\n", net_data->timestamp);
}
/* /*
* appldata_get_net_sum_data() * appldata_get_net_sum_data()
* *
...@@ -135,9 +112,6 @@ static void appldata_get_net_sum_data(void *data) ...@@ -135,9 +112,6 @@ static void appldata_get_net_sum_data(void *data)
net_data->timestamp = get_clock(); net_data->timestamp = get_clock();
net_data->sync_count_2++; net_data->sync_count_2++;
#ifdef APPLDATA_DEBUG
appldata_print_debug(net_data);
#endif
} }
...@@ -159,17 +133,7 @@ static struct appldata_ops ops = { ...@@ -159,17 +133,7 @@ static struct appldata_ops ops = {
*/ */
static int __init appldata_net_init(void) static int __init appldata_net_init(void)
{ {
int rc; return appldata_register_ops(&ops);
P_DEBUG("sizeof(net) = %lu\n", sizeof(struct appldata_net_sum_data));
rc = appldata_register_ops(&ops);
if (rc != 0) {
P_ERROR("Error registering ops, rc = %i\n", rc);
} else {
P_DEBUG("%s-ops registered!\n", ops.name);
}
return rc;
} }
/* /*
...@@ -180,7 +144,6 @@ static int __init appldata_net_init(void) ...@@ -180,7 +144,6 @@ static int __init appldata_net_init(void)
static void __exit appldata_net_exit(void) static void __exit appldata_net_exit(void)
{ {
appldata_unregister_ops(&ops); appldata_unregister_ops(&ops);
P_DEBUG("%s-ops unregistered!\n", ops.name);
} }
......
...@@ -89,44 +89,6 @@ static struct appldata_ops ops = { ...@@ -89,44 +89,6 @@ static struct appldata_ops ops = {
}; };
static inline void appldata_print_debug(struct appldata_os_data *os_data)
{
int a0, a1, a2, i;
P_DEBUG("--- OS - RECORD ---\n");
P_DEBUG("nr_threads = %u\n", os_data->nr_threads);
P_DEBUG("nr_running = %u\n", os_data->nr_running);
P_DEBUG("nr_iowait = %u\n", os_data->nr_iowait);
P_DEBUG("avenrun(int) = %8x / %8x / %8x\n", os_data->avenrun[0],
os_data->avenrun[1], os_data->avenrun[2]);
a0 = os_data->avenrun[0];
a1 = os_data->avenrun[1];
a2 = os_data->avenrun[2];
P_DEBUG("avenrun(float) = %d.%02d / %d.%02d / %d.%02d\n",
LOAD_INT(a0), LOAD_FRAC(a0), LOAD_INT(a1), LOAD_FRAC(a1),
LOAD_INT(a2), LOAD_FRAC(a2));
P_DEBUG("nr_cpus = %u\n", os_data->nr_cpus);
for (i = 0; i < os_data->nr_cpus; i++) {
P_DEBUG("cpu%u : user = %u, nice = %u, system = %u, "
"idle = %u, irq = %u, softirq = %u, iowait = %u, "
"steal = %u\n",
os_data->os_cpu[i].cpu_id,
os_data->os_cpu[i].per_cpu_user,
os_data->os_cpu[i].per_cpu_nice,
os_data->os_cpu[i].per_cpu_system,
os_data->os_cpu[i].per_cpu_idle,
os_data->os_cpu[i].per_cpu_irq,
os_data->os_cpu[i].per_cpu_softirq,
os_data->os_cpu[i].per_cpu_iowait,
os_data->os_cpu[i].per_cpu_steal);
}
P_DEBUG("sync_count_1 = %u\n", os_data->sync_count_1);
P_DEBUG("sync_count_2 = %u\n", os_data->sync_count_2);
P_DEBUG("timestamp = %lX\n", os_data->timestamp);
}
/* /*
* appldata_get_os_data() * appldata_get_os_data()
* *
...@@ -180,13 +142,10 @@ static void appldata_get_os_data(void *data) ...@@ -180,13 +142,10 @@ static void appldata_get_os_data(void *data)
APPLDATA_START_INTERVAL_REC, APPLDATA_START_INTERVAL_REC,
(unsigned long) ops.data, new_size, (unsigned long) ops.data, new_size,
ops.mod_lvl); ops.mod_lvl);
if (rc != 0) { if (rc != 0)
P_ERROR("os: START NEW DIAG 0xDC failed, " P_ERROR("os: START NEW DIAG 0xDC failed, "
"return code: %d, new size = %i\n", rc, "return code: %d, new size = %i\n", rc,
new_size); new_size);
P_INFO("os: stopping old record now\n");
} else
P_INFO("os: new record size = %i\n", new_size);
rc = appldata_diag(APPLDATA_RECORD_OS_ID, rc = appldata_diag(APPLDATA_RECORD_OS_ID,
APPLDATA_STOP_REC, APPLDATA_STOP_REC,
...@@ -204,9 +163,6 @@ static void appldata_get_os_data(void *data) ...@@ -204,9 +163,6 @@ static void appldata_get_os_data(void *data)
} }
os_data->timestamp = get_clock(); os_data->timestamp = get_clock();
os_data->sync_count_2++; os_data->sync_count_2++;
#ifdef APPLDATA_DEBUG
appldata_print_debug(os_data);
#endif
} }
...@@ -227,12 +183,9 @@ static int __init appldata_os_init(void) ...@@ -227,12 +183,9 @@ static int __init appldata_os_init(void)
rc = -ENOMEM; rc = -ENOMEM;
goto out; goto out;
} }
P_DEBUG("max. sizeof(os) = %i, sizeof(os_cpu) = %lu\n", max_size,
sizeof(struct appldata_os_per_cpu));
appldata_os_data = kzalloc(max_size, GFP_DMA); appldata_os_data = kzalloc(max_size, GFP_DMA);
if (appldata_os_data == NULL) { if (appldata_os_data == NULL) {
P_ERROR("No memory for %s!\n", ops.name);
rc = -ENOMEM; rc = -ENOMEM;
goto out; goto out;
} }
...@@ -240,17 +193,12 @@ static int __init appldata_os_init(void) ...@@ -240,17 +193,12 @@ static int __init appldata_os_init(void)
appldata_os_data->per_cpu_size = sizeof(struct appldata_os_per_cpu); appldata_os_data->per_cpu_size = sizeof(struct appldata_os_per_cpu);
appldata_os_data->cpu_offset = offsetof(struct appldata_os_data, appldata_os_data->cpu_offset = offsetof(struct appldata_os_data,
os_cpu); os_cpu);
P_DEBUG("cpu offset = %u\n", appldata_os_data->cpu_offset);
ops.data = appldata_os_data; ops.data = appldata_os_data;
ops.callback = &appldata_get_os_data; ops.callback = &appldata_get_os_data;
rc = appldata_register_ops(&ops); rc = appldata_register_ops(&ops);
if (rc != 0) { if (rc != 0)
P_ERROR("Error registering ops, rc = %i\n", rc);
kfree(appldata_os_data); kfree(appldata_os_data);
} else {
P_DEBUG("%s-ops registered!\n", ops.name);
}
out: out:
return rc; return rc;
} }
...@@ -264,7 +212,6 @@ static void __exit appldata_os_exit(void) ...@@ -264,7 +212,6 @@ static void __exit appldata_os_exit(void)
{ {
appldata_unregister_ops(&ops); appldata_unregister_ops(&ops);
kfree(appldata_os_data); kfree(appldata_os_data);
P_DEBUG("%s-ops unregistered!\n", ops.name);
} }
......
...@@ -185,11 +185,8 @@ static int __init prng_init(void) ...@@ -185,11 +185,8 @@ static int __init prng_init(void)
prng_seed(16); prng_seed(16);
ret = misc_register(&prng_dev); ret = misc_register(&prng_dev);
if (ret) { if (ret)
printk(KERN_WARNING
"Could not register misc device for PRNG.\n");
goto out_buf; goto out_buf;
}
return 0; return 0;
out_buf: out_buf:
......
...@@ -150,33 +150,24 @@ static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov, ...@@ -150,33 +150,24 @@ static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t offset) unsigned long nr_segs, loff_t offset)
{ {
char *data; char *data;
size_t len; ssize_t ret;
struct file *filp = iocb->ki_filp; struct file *filp = iocb->ki_filp;
/* XXX: temporary */ /* XXX: temporary */
char __user *buf = iov[0].iov_base; char __user *buf = iov[0].iov_base;
size_t count = iov[0].iov_len; size_t count = iov[0].iov_len;
if (nr_segs != 1) { if (nr_segs != 1)
count = -EINVAL; return -EINVAL;
goto out;
}
data = filp->private_data; data = filp->private_data;
len = strlen(data); ret = simple_read_from_buffer(buf, count, &offset, data, strlen(data));
if (offset > len) { if (ret <= 0)
count = 0; return ret;
goto out;
} iocb->ki_pos += ret;
if (count > len - offset)
count = len - offset;
if (copy_to_user(buf, data + offset, count)) {
count = -EFAULT;
goto out;
}
iocb->ki_pos += count;
file_accessed(filp); file_accessed(filp);
out:
return count; return ret;
} }
static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov, static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t offset) unsigned long nr_segs, loff_t offset)
......
...@@ -7,9 +7,14 @@ ...@@ -7,9 +7,14 @@
# #
CFLAGS_smp.o := -Wno-nonnull CFLAGS_smp.o := -Wno-nonnull
#
# Pass UTS_MACHINE for user_regset definition
#
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
obj-y := bitmap.o traps.o time.o process.o base.o early.o \ obj-y := bitmap.o traps.o time.o process.o base.o early.o \
setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
s390_ext.o debug.o irq.o ipl.o dis.o diag.o s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
...@@ -23,7 +28,7 @@ obj-$(CONFIG_AUDIT) += audit.o ...@@ -23,7 +28,7 @@ obj-$(CONFIG_AUDIT) += audit.o
compat-obj-$(CONFIG_AUDIT) += compat_audit.o compat-obj-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
compat_wrapper.o compat_exec_domain.o \ compat_wrapper.o compat_exec_domain.o \
binfmt_elf32.o $(compat-obj-y) $(compat-obj-y)
obj-$(CONFIG_VIRT_TIMER) += vtime.o obj-$(CONFIG_VIRT_TIMER) += vtime.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_STACKTRACE) += stacktrace.o
......
/*
* Support for 32-bit Linux for S390 ELF binaries.
*
* Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Gerhard Tonn (ton@de.ibm.com)
*
* Heavily inspired by the 32-bit Sparc compat code which is
* Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
* Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
#define __ASMS390_ELF_H
#include <linux/time.h>
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2MSB
#define ELF_ARCH EM_S390
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) \
(((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \
&& (x)->e_ident[EI_CLASS] == ELF_CLASS)
/* ELF register definitions */
#define NUM_GPRS 16
#define NUM_FPRS 16
#define NUM_ACRS 16
/* For SVR4/S390 the function pointer to be registered with `atexit` is
passed in R14. */
#define ELF_PLAT_INIT(_r, load_addr) \
do { \
_r->gprs[14] = 0; \
} while(0)
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
/* Wow, the "main" arch needs arch dependent functions too.. :) */
/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
now struct_user_regs, they are different) */
#define ELF_CORE_COPY_REGS(pr_reg, regs) dump_regs32(regs, &pr_reg);
#define ELF_CORE_COPY_TASK_REGS(tsk, regs) dump_task_regs32(tsk, regs)
#define ELF_CORE_COPY_FPREGS(tsk, fpregs) dump_task_fpu(tsk, fpregs)
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. */
#define ELF_HWCAP (0)
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo.
For the moment, we have only optimizations for the Intel generations,
but that could change... */
#define ELF_PLATFORM (NULL)
#define SET_PERSONALITY(ex, ibcs2) \
do { \
if (ibcs2) \
set_personality(PER_SVR4); \
else if (current->personality != PER_LINUX32) \
set_personality(PER_LINUX); \
set_thread_flag(TIF_31BIT); \
} while (0)
#include "compat_linux.h"
typedef _s390_fp_regs32 elf_fpregset_t;
typedef struct
{
_psw_t32 psw;
__u32 gprs[__NUM_GPRS];
__u32 acrs[__NUM_ACRS];
__u32 orig_gpr2;
} s390_regs32;
typedef s390_regs32 elf_gregset_t;
static inline int dump_regs32(struct pt_regs *ptregs, elf_gregset_t *regs)
{
int i;
memcpy(&regs->psw.mask, &ptregs->psw.mask, 4);
memcpy(&regs->psw.addr, (char *)&ptregs->psw.addr + 4, 4);
for (i = 0; i < NUM_GPRS; i++)
regs->gprs[i] = ptregs->gprs[i];
save_access_regs(regs->acrs);
regs->orig_gpr2 = ptregs->orig_gpr2;
return 1;
}
static inline int dump_task_regs32(struct task_struct *tsk, elf_gregset_t *regs)
{
struct pt_regs *ptregs = task_pt_regs(tsk);
int i;
memcpy(&regs->psw.mask, &ptregs->psw.mask, 4);
memcpy(&regs->psw.addr, (char *)&ptregs->psw.addr + 4, 4);
for (i = 0; i < NUM_GPRS; i++)
regs->gprs[i] = ptregs->gprs[i];
memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs));
regs->orig_gpr2 = ptregs->orig_gpr2;
return 1;
}
static inline int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
{
if (tsk == current)
save_fp_regs((s390_fp_regs *) fpregs);
else
memcpy(fpregs, &tsk->thread.fp_regs, sizeof(elf_fpregset_t));
return 1;
}
#include <asm/processor.h>
#include <asm/pgalloc.h>
#include <linux/module.h>
#include <linux/elfcore.h>
#include <linux/binfmts.h>
#include <linux/compat.h>
#define elf_prstatus elf_prstatus32
struct elf_prstatus32
{
struct elf_siginfo pr_info; /* Info associated with signal */
short pr_cursig; /* Current signal */
u32 pr_sigpend; /* Set of pending signals */
u32 pr_sighold; /* Set of held signals */
pid_t pr_pid;
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
struct compat_timeval pr_utime; /* User time */
struct compat_timeval pr_stime; /* System time */
struct compat_timeval pr_cutime; /* Cumulative user time */
struct compat_timeval pr_cstime; /* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
int pr_fpvalid; /* True if math co-processor being used. */
};
#define elf_prpsinfo elf_prpsinfo32
struct elf_prpsinfo32
{
char pr_state; /* numeric process state */
char pr_sname; /* char for pr_state */
char pr_zomb; /* zombie */
char pr_nice; /* nice val */
u32 pr_flag; /* flags */
u16 pr_uid;
u16 pr_gid;
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
#include <linux/highuid.h>
/*
#define init_elf_binfmt init_elf32_binfmt
*/
#undef start_thread
#define start_thread start_thread31
static inline void start_thread31(struct pt_regs *regs, unsigned long new_psw,
unsigned long new_stackp)
{
set_fs(USER_DS);
regs->psw.mask = psw_user32_bits;
regs->psw.addr = new_psw;
regs->gprs[15] = new_stackp;
crst_table_downgrade(current->mm, 1UL << 31);
}
MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit Linux for S390 binaries,"
" Copyright 2000 IBM Corporation");
MODULE_AUTHOR("Gerhard Tonn <ton@de.ibm.com>");
#undef MODULE_DESCRIPTION
#undef MODULE_AUTHOR
#undef cputime_to_timeval
#define cputime_to_timeval cputime_to_compat_timeval
static inline void
cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
{
value->tv_usec = cputime % 1000000;
value->tv_sec = cputime / 1000000;
}
#include "../../../fs/binfmt_elf.c"
#ifndef _PTRACE32_H #ifndef _PTRACE32_H
#define _PTRACE32_H #define _PTRACE32_H
#include "compat_linux.h" /* needed for _psw_t32 */ #include "compat_linux.h" /* needed for psw_compat_t */
typedef struct { typedef struct {
__u32 cr[3]; __u32 cr[3];
...@@ -38,7 +38,7 @@ typedef struct { ...@@ -38,7 +38,7 @@ typedef struct {
struct user_regs_struct32 struct user_regs_struct32
{ {
_psw_t32 psw; psw_compat_t psw;
u32 gprs[NUM_GPRS]; u32 gprs[NUM_GPRS];
u32 acrs[NUM_ACRS]; u32 acrs[NUM_ACRS];
u32 orig_gpr2; u32 orig_gpr2;
......
...@@ -1079,7 +1079,6 @@ __init debug_init(void) ...@@ -1079,7 +1079,6 @@ __init debug_init(void)
s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table); s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table);
mutex_lock(&debug_mutex); mutex_lock(&debug_mutex);
debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT,NULL); debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT,NULL);
printk(KERN_INFO "debug: Initialization complete\n");
initialized = 1; initialized = 1;
mutex_unlock(&debug_mutex); mutex_unlock(&debug_mutex);
...@@ -1193,7 +1192,6 @@ debug_get_uint(char *buf) ...@@ -1193,7 +1192,6 @@ debug_get_uint(char *buf)
for(; isspace(*buf); buf++); for(; isspace(*buf); buf++);
rc = simple_strtoul(buf, &buf, 10); rc = simple_strtoul(buf, &buf, 10);
if(*buf){ if(*buf){
printk("debug: no integer specified!\n");
rc = -EINVAL; rc = -EINVAL;
} }
return rc; return rc;
...@@ -1340,19 +1338,12 @@ static void debug_flush(debug_info_t* id, int area) ...@@ -1340,19 +1338,12 @@ static void debug_flush(debug_info_t* id, int area)
memset(id->areas[i][j], 0, PAGE_SIZE); memset(id->areas[i][j], 0, PAGE_SIZE);
} }
} }
printk(KERN_INFO "debug: %s: all areas flushed\n",id->name);
} else if(area >= 0 && area < id->nr_areas) { } else if(area >= 0 && area < id->nr_areas) {
id->active_entries[area] = 0; id->active_entries[area] = 0;
id->active_pages[area] = 0; id->active_pages[area] = 0;
for(i = 0; i < id->pages_per_area; i++) { for(i = 0; i < id->pages_per_area; i++) {
memset(id->areas[area][i],0,PAGE_SIZE); memset(id->areas[area][i],0,PAGE_SIZE);
} }
printk(KERN_INFO "debug: %s: area %i has been flushed\n",
id->name, area);
} else {
printk(KERN_INFO
"debug: %s: area %i cannot be flushed (range: %i - %i)\n",
id->name, area, 0, id->nr_areas-1);
} }
spin_unlock_irqrestore(&id->lock,flags); spin_unlock_irqrestore(&id->lock,flags);
} }
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/pfn.h> #include <linux/pfn.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/ebcdic.h>
#include <asm/ipl.h> #include <asm/ipl.h>
#include <asm/lowcore.h> #include <asm/lowcore.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -26,12 +27,40 @@ ...@@ -26,12 +27,40 @@
/* /*
* Create a Kernel NSS if the SAVESYS= parameter is defined * Create a Kernel NSS if the SAVESYS= parameter is defined
*/ */
#define DEFSYS_CMD_SIZE 96 #define DEFSYS_CMD_SIZE 128
#define SAVESYS_CMD_SIZE 32 #define SAVESYS_CMD_SIZE 32
char kernel_nss_name[NSS_NAME_SIZE + 1]; char kernel_nss_name[NSS_NAME_SIZE + 1];
static void __init setup_boot_command_line(void);
#ifdef CONFIG_SHARED_KERNEL #ifdef CONFIG_SHARED_KERNEL
int __init savesys_ipl_nss(char *cmd, const int cmdlen);
asm(
" .section .init.text,\"ax\",@progbits\n"
" .align 4\n"
" .type savesys_ipl_nss, @function\n"
"savesys_ipl_nss:\n"
#ifdef CONFIG_64BIT
" stmg 6,15,48(15)\n"
" lgr 14,3\n"
" sam31\n"
" diag 2,14,0x8\n"
" sam64\n"
" lgr 2,14\n"
" lmg 6,15,48(15)\n"
#else
" stm 6,15,24(15)\n"
" lr 14,3\n"
" diag 2,14,0x8\n"
" lr 2,14\n"
" lm 6,15,24(15)\n"
#endif
" br 14\n"
" .size savesys_ipl_nss, .-savesys_ipl_nss\n");
static noinline __init void create_kernel_nss(void) static noinline __init void create_kernel_nss(void)
{ {
unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size; unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
...@@ -39,6 +68,7 @@ static noinline __init void create_kernel_nss(void) ...@@ -39,6 +68,7 @@ static noinline __init void create_kernel_nss(void)
unsigned int sinitrd_pfn, einitrd_pfn; unsigned int sinitrd_pfn, einitrd_pfn;
#endif #endif
int response; int response;
size_t len;
char *savesys_ptr; char *savesys_ptr;
char upper_command_line[COMMAND_LINE_SIZE]; char upper_command_line[COMMAND_LINE_SIZE];
char defsys_cmd[DEFSYS_CMD_SIZE]; char defsys_cmd[DEFSYS_CMD_SIZE];
...@@ -49,8 +79,8 @@ static noinline __init void create_kernel_nss(void) ...@@ -49,8 +79,8 @@ static noinline __init void create_kernel_nss(void)
return; return;
/* Convert COMMAND_LINE to upper case */ /* Convert COMMAND_LINE to upper case */
for (i = 0; i < strlen(COMMAND_LINE); i++) for (i = 0; i < strlen(boot_command_line); i++)
upper_command_line[i] = toupper(COMMAND_LINE[i]); upper_command_line[i] = toupper(boot_command_line[i]);
savesys_ptr = strstr(upper_command_line, "SAVESYS="); savesys_ptr = strstr(upper_command_line, "SAVESYS=");
...@@ -83,7 +113,8 @@ static noinline __init void create_kernel_nss(void) ...@@ -83,7 +113,8 @@ static noinline __init void create_kernel_nss(void)
} }
#endif #endif
sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK", defsys_cmd, min_size); sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK PARMREGS=0-13",
defsys_cmd, min_size);
sprintf(savesys_cmd, "SAVESYS %s \n IPL %s", sprintf(savesys_cmd, "SAVESYS %s \n IPL %s",
kernel_nss_name, kernel_nss_name); kernel_nss_name, kernel_nss_name);
...@@ -94,13 +125,24 @@ static noinline __init void create_kernel_nss(void) ...@@ -94,13 +125,24 @@ static noinline __init void create_kernel_nss(void)
return; return;
} }
__cpcmd(savesys_cmd, NULL, 0, &response); len = strlen(savesys_cmd);
ASCEBC(savesys_cmd, len);
response = savesys_ipl_nss(savesys_cmd, len);
if (response != strlen(savesys_cmd)) { /* On success: response is equal to the command size,
* max SAVESYS_CMD_SIZE
* On error: response contains the numeric portion of cp error message.
* for SAVESYS it will be >= 263
*/
if (response > SAVESYS_CMD_SIZE) {
kernel_nss_name[0] = '\0'; kernel_nss_name[0] = '\0';
return; return;
} }
/* re-setup boot command line with new ipl vm parms */
ipl_update_parameters();
setup_boot_command_line();
ipl_flags = IPL_NSS_VALID; ipl_flags = IPL_NSS_VALID;
} }
...@@ -141,109 +183,11 @@ static noinline __init void detect_machine_type(void) ...@@ -141,109 +183,11 @@ static noinline __init void detect_machine_type(void)
if (cpuinfo->cpu_id.version == 0xff) if (cpuinfo->cpu_id.version == 0xff)
machine_flags |= MACHINE_FLAG_VM; machine_flags |= MACHINE_FLAG_VM;
/* Running on a P/390 ? */
if (cpuinfo->cpu_id.machine == 0x7490)
machine_flags |= MACHINE_FLAG_P390;
/* Running under KVM ? */ /* Running under KVM ? */
if (cpuinfo->cpu_id.version == 0xfe) if (cpuinfo->cpu_id.version == 0xfe)
machine_flags |= MACHINE_FLAG_KVM; machine_flags |= MACHINE_FLAG_KVM;
} }
#ifdef CONFIG_64BIT
static noinline __init int memory_fast_detect(void)
{
unsigned long val0 = 0;
unsigned long val1 = 0xc;
int ret = -ENOSYS;
if (ipl_flags & IPL_NSS_VALID)
return -ENOSYS;
asm volatile(
" diag %1,%2,0x260\n"
"0: lhi %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (ret), "+d" (val0), "+d" (val1) : : "cc");
if (ret || val0 != val1)
return -ENOSYS;
memory_chunk[0].size = val0 + 1;
return 0;
}
#else
static inline int memory_fast_detect(void)
{
return -ENOSYS;
}
#endif
static inline __init unsigned long __tprot(unsigned long addr)
{
int cc = -1;
asm volatile(
" tprot 0(%1),0\n"
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (cc) : "a" (addr) : "cc");
return (unsigned long)cc;
}
/* Checking memory in 128KB increments. */
#define CHUNK_INCR (1UL << 17)
#define ADDR2G (1UL << 31)
static noinline __init void find_memory_chunks(unsigned long memsize)
{
unsigned long addr = 0, old_addr = 0;
unsigned long old_cc = CHUNK_READ_WRITE;
unsigned long cc;
int chunk = 0;
while (chunk < MEMORY_CHUNKS) {
cc = __tprot(addr);
while (cc == old_cc) {
addr += CHUNK_INCR;
if (memsize && addr >= memsize)
break;
#ifndef CONFIG_64BIT
if (addr == ADDR2G)
break;
#endif
cc = __tprot(addr);
}
if (old_addr != addr &&
(old_cc == CHUNK_READ_WRITE || old_cc == CHUNK_READ_ONLY)) {
memory_chunk[chunk].addr = old_addr;
memory_chunk[chunk].size = addr - old_addr;
memory_chunk[chunk].type = old_cc;
chunk++;
}
old_addr = addr;
old_cc = cc;
#ifndef CONFIG_64BIT
if (addr == ADDR2G)
break;
#endif
/*
* Finish memory detection at the first hole
* if storage size is unknown.
*/
if (cc == -1UL && !memsize)
break;
if (memsize && addr >= memsize)
break;
}
}
static __init void early_pgm_check_handler(void) static __init void early_pgm_check_handler(void)
{ {
unsigned long addr; unsigned long addr;
...@@ -380,23 +324,61 @@ static __init void detect_machine_facilities(void) ...@@ -380,23 +324,61 @@ static __init void detect_machine_facilities(void)
#endif #endif
} }
static __init void rescue_initrd(void)
{
#ifdef CONFIG_BLK_DEV_INITRD
/*
* Move the initrd right behind the bss section in case it starts
* within the bss section. So we don't overwrite it when the bss
* section gets cleared.
*/
if (!INITRD_START || !INITRD_SIZE)
return;
if (INITRD_START >= (unsigned long) __bss_stop)
return;
memmove(__bss_stop, (void *) INITRD_START, INITRD_SIZE);
INITRD_START = (unsigned long) __bss_stop;
#endif
}
/* Set up boot command line */
static void __init setup_boot_command_line(void)
{
char *parm = NULL;
/* copy arch command line */
strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
boot_command_line[ARCH_COMMAND_LINE_SIZE - 1] = 0;
/* append IPL PARM data to the boot command line */
if (MACHINE_IS_VM) {
parm = boot_command_line + strlen(boot_command_line);
*parm++ = ' ';
get_ipl_vmparm(parm);
if (parm[0] == '=')
memmove(boot_command_line, parm + 1, strlen(parm));
}
}
/* /*
* Save ipl parameters, clear bss memory, initialize storage keys * Save ipl parameters, clear bss memory, initialize storage keys
* and create a kernel NSS at startup if the SAVESYS= parm is defined * and create a kernel NSS at startup if the SAVESYS= parm is defined
*/ */
void __init startup_init(void) void __init startup_init(void)
{ {
unsigned long long memsize;
ipl_save_parameters(); ipl_save_parameters();
rescue_initrd();
clear_bss_section(); clear_bss_section();
init_kernel_storage_key(); init_kernel_storage_key();
lockdep_init(); lockdep_init();
lockdep_off(); lockdep_off();
detect_machine_type();
create_kernel_nss();
sort_main_extable(); sort_main_extable();
setup_lowcore_early(); setup_lowcore_early();
detect_machine_type();
ipl_update_parameters();
setup_boot_command_line();
create_kernel_nss();
detect_mvpg(); detect_mvpg();
detect_ieee(); detect_ieee();
detect_csp(); detect_csp();
...@@ -404,18 +386,7 @@ void __init startup_init(void) ...@@ -404,18 +386,7 @@ void __init startup_init(void)
detect_diag44(); detect_diag44();
detect_machine_facilities(); detect_machine_facilities();
setup_hpage(); setup_hpage();
sclp_read_info_early();
sclp_facilities_detect(); sclp_facilities_detect();
memsize = sclp_memory_detect(); detect_memory_layout(memory_chunk);
#ifndef CONFIG_64BIT
/*
* Can't deal with more than 2G in 31 bit addressing mode, so
* limit the value in order to avoid strange side effects.
*/
if (memsize > ADDR2G)
memsize = ADDR2G;
#endif
if (memory_fast_detect() < 0)
find_memory_chunks((unsigned long) memsize);
lockdep_on(); lockdep_on();
} }
This diff is collapsed.
...@@ -41,10 +41,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) ...@@ -41,10 +41,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
if (is_prohibited_opcode((kprobe_opcode_t *) p->addr)) if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
return -EINVAL; return -EINVAL;
if ((unsigned long)p->addr & 0x01) { if ((unsigned long)p->addr & 0x01)
printk("Attempt to register kprobe at an unaligned address\n");
return -EINVAL; return -EINVAL;
}
/* Use the get_insn_slot() facility for correctness */ /* Use the get_insn_slot() facility for correctness */
if (!(p->ainsn.insn = get_insn_slot())) if (!(p->ainsn.insn = get_insn_slot()))
......
...@@ -52,7 +52,6 @@ void machine_kexec_cleanup(struct kimage *image) ...@@ -52,7 +52,6 @@ void machine_kexec_cleanup(struct kimage *image)
void machine_shutdown(void) void machine_shutdown(void)
{ {
printk(KERN_INFO "kexec: machine_shutdown called\n");
} }
void machine_kexec(struct kimage *image) void machine_kexec(struct kimage *image)
......
/*
* Copyright IBM Corp. 2008
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/ipl.h>
#include <asm/sclp.h>
#include <asm/setup.h>
static int memory_fast_detect(struct mem_chunk *chunk)
{
unsigned long val0 = 0;
unsigned long val1 = 0xc;
int rc = -EOPNOTSUPP;
if (ipl_flags & IPL_NSS_VALID)
return -EOPNOTSUPP;
asm volatile(
" diag %1,%2,0x260\n"
"0: lhi %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (rc), "+d" (val0), "+d" (val1) : : "cc");
if (rc || val0 != val1)
return -EOPNOTSUPP;
chunk->size = val0 + 1;
return 0;
}
static inline int tprot(unsigned long addr)
{
int rc = -EFAULT;
asm volatile(
" tprot 0(%1),0\n"
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (rc) : "a" (addr) : "cc");
return rc;
}
#define ADDR2G (1ULL << 31)
static void find_memory_chunks(struct mem_chunk chunk[])
{
unsigned long long memsize, rnmax, rzm;
unsigned long addr = 0, size;
int i = 0, type;
rzm = sclp_get_rzm();
rnmax = sclp_get_rnmax();
memsize = rzm * rnmax;
if (!rzm)
rzm = 1ULL << 17;
if (sizeof(long) == 4) {
rzm = min(ADDR2G, rzm);
memsize = memsize ? min(ADDR2G, memsize) : ADDR2G;
}
do {
size = 0;
type = tprot(addr);
do {
size += rzm;
if (memsize && addr + size >= memsize)
break;
} while (type == tprot(addr + size));
if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
chunk[i].addr = addr;
chunk[i].size = size;
chunk[i].type = type;
i++;
}
addr += size;
} while (addr < memsize && i < MEMORY_CHUNKS);
}
void detect_memory_layout(struct mem_chunk chunk[])
{
unsigned long flags, cr0;
memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk));
if (memory_fast_detect(&chunk[0]) == 0)
return;
/* Disable IRQs, DAT and low address protection so tprot does the
* right thing and we don't get scheduled away with low address
* protection disabled.
*/
flags = __raw_local_irq_stnsm(0xf8);
__ctl_store(cr0, 0, 0);
__ctl_clear_bit(0, 28);
find_memory_chunks(chunk);
__ctl_load(cr0, 0, 0);
__raw_local_irq_ssm(flags);
}
EXPORT_SYMBOL(detect_memory_layout);
...@@ -75,46 +75,19 @@ unsigned long thread_saved_pc(struct task_struct *tsk) ...@@ -75,46 +75,19 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return sf->gprs[8]; return sf->gprs[8];
} }
/*
* Need to know about CPUs going idle?
*/
static ATOMIC_NOTIFIER_HEAD(idle_chain);
DEFINE_PER_CPU(struct s390_idle_data, s390_idle); DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
int register_idle_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&idle_chain, nb);
}
EXPORT_SYMBOL(register_idle_notifier);
int unregister_idle_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&idle_chain, nb);
}
EXPORT_SYMBOL(unregister_idle_notifier);
static int s390_idle_enter(void) static int s390_idle_enter(void)
{ {
struct s390_idle_data *idle; struct s390_idle_data *idle;
int nr_calls = 0;
void *hcpu;
int rc;
hcpu = (void *)(long)smp_processor_id();
rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
&nr_calls);
if (rc == NOTIFY_BAD) {
nr_calls--;
__atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
hcpu, nr_calls, NULL);
return rc;
}
idle = &__get_cpu_var(s390_idle); idle = &__get_cpu_var(s390_idle);
spin_lock(&idle->lock); spin_lock(&idle->lock);
idle->idle_count++; idle->idle_count++;
idle->in_idle = 1; idle->in_idle = 1;
idle->idle_enter = get_clock(); idle->idle_enter = get_clock();
spin_unlock(&idle->lock); spin_unlock(&idle->lock);
vtime_stop_cpu_timer();
return NOTIFY_OK; return NOTIFY_OK;
} }
...@@ -122,13 +95,12 @@ void s390_idle_leave(void) ...@@ -122,13 +95,12 @@ void s390_idle_leave(void)
{ {
struct s390_idle_data *idle; struct s390_idle_data *idle;
vtime_start_cpu_timer();
idle = &__get_cpu_var(s390_idle); idle = &__get_cpu_var(s390_idle);
spin_lock(&idle->lock); spin_lock(&idle->lock);
idle->idle_time += get_clock() - idle->idle_enter; idle->idle_time += get_clock() - idle->idle_enter;
idle->in_idle = 0; idle->in_idle = 0;
spin_unlock(&idle->lock); spin_unlock(&idle->lock);
atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
(void *)(long) smp_processor_id());
} }
extern void s390_handle_mcck(void); extern void s390_handle_mcck(void);
......
This diff is collapsed.
...@@ -77,7 +77,7 @@ unsigned long machine_flags; ...@@ -77,7 +77,7 @@ unsigned long machine_flags;
unsigned long elf_hwcap = 0; unsigned long elf_hwcap = 0;
char elf_platform[ELF_PLATFORM_SIZE]; char elf_platform[ELF_PLATFORM_SIZE];
struct mem_chunk __meminitdata memory_chunk[MEMORY_CHUNKS]; struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
static unsigned long __initdata memory_end; static unsigned long __initdata memory_end;
...@@ -205,12 +205,6 @@ static void __init conmode_default(void) ...@@ -205,12 +205,6 @@ static void __init conmode_default(void)
SET_CONSOLE_SCLP; SET_CONSOLE_SCLP;
#endif #endif
} }
} else if (MACHINE_IS_P390) {
#if defined(CONFIG_TN3215_CONSOLE)
SET_CONSOLE_3215;
#elif defined(CONFIG_TN3270_CONSOLE)
SET_CONSOLE_3270;
#endif
} else { } else {
#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP; SET_CONSOLE_SCLP;
...@@ -221,18 +215,17 @@ static void __init conmode_default(void) ...@@ -221,18 +215,17 @@ static void __init conmode_default(void)
#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
static void __init setup_zfcpdump(unsigned int console_devno) static void __init setup_zfcpdump(unsigned int console_devno)
{ {
static char str[64]; static char str[41];
if (ipl_info.type != IPL_TYPE_FCP_DUMP) if (ipl_info.type != IPL_TYPE_FCP_DUMP)
return; return;
if (console_devno != -1) if (console_devno != -1)
sprintf(str, "cio_ignore=all,!0.0.%04x,!0.0.%04x", sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x",
ipl_info.data.fcp.dev_id.devno, console_devno); ipl_info.data.fcp.dev_id.devno, console_devno);
else else
sprintf(str, "cio_ignore=all,!0.0.%04x", sprintf(str, " cio_ignore=all,!0.0.%04x",
ipl_info.data.fcp.dev_id.devno); ipl_info.data.fcp.dev_id.devno);
strcat(COMMAND_LINE, " "); strcat(boot_command_line, str);
strcat(COMMAND_LINE, str);
console_loglevel = 2; console_loglevel = 2;
} }
#else #else
...@@ -289,32 +282,6 @@ static int __init early_parse_mem(char *p) ...@@ -289,32 +282,6 @@ static int __init early_parse_mem(char *p)
} }
early_param("mem", early_parse_mem); early_param("mem", early_parse_mem);
/*
* "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes
*/
static int __init early_parse_ipldelay(char *p)
{
unsigned long delay = 0;
delay = simple_strtoul(p, &p, 0);
switch (*p) {
case 's':
case 'S':
delay *= 1000000;
break;
case 'm':
case 'M':
delay *= 60 * 1000000;
}
/* now wait for the requested amount of time */
udelay(delay);
return 0;
}
early_param("ipldelay", early_parse_ipldelay);
#ifdef CONFIG_S390_SWITCH_AMODE #ifdef CONFIG_S390_SWITCH_AMODE
#ifdef CONFIG_PGSTE #ifdef CONFIG_PGSTE
unsigned int switch_amode = 1; unsigned int switch_amode = 1;
...@@ -804,11 +771,9 @@ setup_arch(char **cmdline_p) ...@@ -804,11 +771,9 @@ setup_arch(char **cmdline_p)
printk("We are running native (64 bit mode)\n"); printk("We are running native (64 bit mode)\n");
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
/* Save unparsed command line copy for /proc/cmdline */ /* Have one command line that is parsed and saved in /proc/cmdline */
strlcpy(boot_command_line, COMMAND_LINE, COMMAND_LINE_SIZE); /* boot_command_line has been already set up in early.c */
*cmdline_p = boot_command_line;
*cmdline_p = COMMAND_LINE;
*(*cmdline_p + COMMAND_LINE_SIZE - 1) = '\0';
ROOT_DEV = Root_RAM0; ROOT_DEV = Root_RAM0;
......
This diff is collapsed.
...@@ -313,8 +313,6 @@ void __init s390_init_cpu_topology(void) ...@@ -313,8 +313,6 @@ void __init s390_init_cpu_topology(void)
machine_has_topology_irq = 1; machine_has_topology_irq = 1;
tl_info = alloc_bootmem_pages(PAGE_SIZE); tl_info = alloc_bootmem_pages(PAGE_SIZE);
if (!tl_info)
goto error;
info = tl_info; info = tl_info;
stsi(info, 15, 1, 2); stsi(info, 15, 1, 2);
......
...@@ -136,7 +136,7 @@ static inline void set_vtimer(__u64 expires) ...@@ -136,7 +136,7 @@ static inline void set_vtimer(__u64 expires)
} }
#endif #endif
static void start_cpu_timer(void) void vtime_start_cpu_timer(void)
{ {
struct vtimer_queue *vt_list; struct vtimer_queue *vt_list;
...@@ -150,7 +150,7 @@ static void start_cpu_timer(void) ...@@ -150,7 +150,7 @@ static void start_cpu_timer(void)
set_vtimer(vt_list->idle); set_vtimer(vt_list->idle);
} }
static void stop_cpu_timer(void) void vtime_stop_cpu_timer(void)
{ {
struct vtimer_queue *vt_list; struct vtimer_queue *vt_list;
...@@ -318,8 +318,7 @@ static void internal_add_vtimer(struct vtimer_list *timer) ...@@ -318,8 +318,7 @@ static void internal_add_vtimer(struct vtimer_list *timer)
vt_list = &per_cpu(virt_cpu_timer, timer->cpu); vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
spin_lock_irqsave(&vt_list->lock, flags); spin_lock_irqsave(&vt_list->lock, flags);
if (timer->cpu != smp_processor_id()) BUG_ON(timer->cpu != smp_processor_id());
printk("internal_add_vtimer: BUG, running on wrong CPU");
/* if list is empty we only have to set the timer */ /* if list is empty we only have to set the timer */
if (list_empty(&vt_list->list)) { if (list_empty(&vt_list->list)) {
...@@ -353,25 +352,12 @@ static void internal_add_vtimer(struct vtimer_list *timer) ...@@ -353,25 +352,12 @@ static void internal_add_vtimer(struct vtimer_list *timer)
put_cpu(); put_cpu();
} }
static inline int prepare_vtimer(struct vtimer_list *timer) static inline void prepare_vtimer(struct vtimer_list *timer)
{ {
if (!timer->function) { BUG_ON(!timer->function);
printk("add_virt_timer: uninitialized timer\n"); BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE);
return -EINVAL; BUG_ON(vtimer_pending(timer));
}
if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) {
printk("add_virt_timer: invalid timer expire value!\n");
return -EINVAL;
}
if (vtimer_pending(timer)) {
printk("add_virt_timer: timer pending\n");
return -EBUSY;
}
timer->cpu = get_cpu(); timer->cpu = get_cpu();
return 0;
} }
/* /*
...@@ -382,10 +368,7 @@ void add_virt_timer(void *new) ...@@ -382,10 +368,7 @@ void add_virt_timer(void *new)
struct vtimer_list *timer; struct vtimer_list *timer;
timer = (struct vtimer_list *)new; timer = (struct vtimer_list *)new;
prepare_vtimer(timer);
if (prepare_vtimer(timer) < 0)
return;
timer->interval = 0; timer->interval = 0;
internal_add_vtimer(timer); internal_add_vtimer(timer);
} }
...@@ -399,10 +382,7 @@ void add_virt_timer_periodic(void *new) ...@@ -399,10 +382,7 @@ void add_virt_timer_periodic(void *new)
struct vtimer_list *timer; struct vtimer_list *timer;
timer = (struct vtimer_list *)new; timer = (struct vtimer_list *)new;
prepare_vtimer(timer);
if (prepare_vtimer(timer) < 0)
return;
timer->interval = timer->expires; timer->interval = timer->expires;
internal_add_vtimer(timer); internal_add_vtimer(timer);
} }
...@@ -423,15 +403,8 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires) ...@@ -423,15 +403,8 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
unsigned long flags; unsigned long flags;
int cpu; int cpu;
if (!timer->function) { BUG_ON(!timer->function);
printk("mod_virt_timer: uninitialized timer\n"); BUG_ON(!expires || expires > VTIMER_MAX_SLICE);
return -EINVAL;
}
if (!expires || expires > VTIMER_MAX_SLICE) {
printk("mod_virt_timer: invalid expire range\n");
return -EINVAL;
}
/* /*
* This is a common optimization triggered by the * This is a common optimization triggered by the
...@@ -444,6 +417,9 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires) ...@@ -444,6 +417,9 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
cpu = get_cpu(); cpu = get_cpu();
vt_list = &per_cpu(virt_cpu_timer, cpu); vt_list = &per_cpu(virt_cpu_timer, cpu);
/* check if we run on the right CPU */
BUG_ON(timer->cpu != cpu);
/* disable interrupts before test if timer is pending */ /* disable interrupts before test if timer is pending */
spin_lock_irqsave(&vt_list->lock, flags); spin_lock_irqsave(&vt_list->lock, flags);
...@@ -458,14 +434,6 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires) ...@@ -458,14 +434,6 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
return 0; return 0;
} }
/* check if we run on the right CPU */
if (timer->cpu != cpu) {
printk("mod_virt_timer: running on wrong CPU, check your code\n");
spin_unlock_irqrestore(&vt_list->lock, flags);
put_cpu();
return -EINVAL;
}
list_del_init(&timer->entry); list_del_init(&timer->entry);
timer->expires = expires; timer->expires = expires;
...@@ -536,24 +504,6 @@ void init_cpu_vtimer(void) ...@@ -536,24 +504,6 @@ void init_cpu_vtimer(void)
} }
static int vtimer_idle_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
switch (action) {
case S390_CPU_IDLE:
stop_cpu_timer();
break;
case S390_CPU_NOT_IDLE:
start_cpu_timer();
break;
}
return NOTIFY_OK;
}
static struct notifier_block vtimer_idle_nb = {
.notifier_call = vtimer_idle_notify,
};
void __init vtime_init(void) void __init vtime_init(void)
{ {
/* request the cpu timer external interrupt */ /* request the cpu timer external interrupt */
...@@ -561,9 +511,6 @@ void __init vtime_init(void) ...@@ -561,9 +511,6 @@ void __init vtime_init(void)
&ext_int_info_timer) != 0) &ext_int_info_timer) != 0)
panic("Couldn't request external interrupt 0x1005"); panic("Couldn't request external interrupt 0x1005");
if (register_idle_notifier(&vtimer_idle_nb))
panic("Couldn't register idle notifier");
/* Enable cpu timer interrupts on the boot cpu. */ /* Enable cpu timer interrupts on the boot cpu. */
init_cpu_vtimer(); init_cpu_vtimer();
} }
......
...@@ -202,3 +202,22 @@ void free_initrd_mem(unsigned long start, unsigned long end) ...@@ -202,3 +202,22 @@ void free_initrd_mem(unsigned long start, unsigned long end)
} }
} }
#endif #endif
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size)
{
struct pglist_data *pgdat;
struct zone *zone;
int rc;
pgdat = NODE_DATA(nid);
zone = pgdat->node_zones + ZONE_NORMAL;
rc = vmem_add_mapping(start, size);
if (rc)
return rc;
rc = __add_pages(zone, PFN_DOWN(start), PFN_DOWN(size));
if (rc)
vmem_remove_mapping(start, size);
return rc;
}
#endif /* CONFIG_MEMORY_HOTPLUG */
...@@ -995,14 +995,14 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, ...@@ -995,14 +995,14 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
now = get_clock(); now = get_clock();
DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x", DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x",
cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat), cdev->dev.bus_id, ((irb->scsw.cmd.cstat << 8) |
(unsigned int) intparm); irb->scsw.cmd.dstat), (unsigned int) intparm);
/* check for unsolicited interrupts */ /* check for unsolicited interrupts */
cqr = (struct dasd_ccw_req *) intparm; cqr = (struct dasd_ccw_req *) intparm;
if (!cqr || ((irb->scsw.cc == 1) && if (!cqr || ((irb->scsw.cmd.cc == 1) &&
(irb->scsw.fctl & SCSW_FCTL_START_FUNC) && (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) ) { (irb->scsw.cmd.stctl & SCSW_STCTL_STATUS_PEND))) {
if (cqr && cqr->status == DASD_CQR_IN_IO) if (cqr && cqr->status == DASD_CQR_IN_IO)
cqr->status = DASD_CQR_QUEUED; cqr->status = DASD_CQR_QUEUED;
device = dasd_device_from_cdev_locked(cdev); device = dasd_device_from_cdev_locked(cdev);
...@@ -1025,7 +1025,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, ...@@ -1025,7 +1025,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
/* Check for clear pending */ /* Check for clear pending */
if (cqr->status == DASD_CQR_CLEAR_PENDING && if (cqr->status == DASD_CQR_CLEAR_PENDING &&
irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
cqr->status = DASD_CQR_CLEARED; cqr->status = DASD_CQR_CLEARED;
dasd_device_clear_timer(device); dasd_device_clear_timer(device);
wake_up(&dasd_flush_wq); wake_up(&dasd_flush_wq);
...@@ -1041,11 +1041,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, ...@@ -1041,11 +1041,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
return; return;
} }
DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr); ((irb->scsw.cmd.cstat << 8) | irb->scsw.cmd.dstat), cqr);
next = NULL; next = NULL;
expires = 0; expires = 0;
if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
irb->scsw.cstat == 0 && !irb->esw.esw0.erw.cons) { irb->scsw.cmd.cstat == 0 && !irb->esw.esw0.erw.cons) {
/* request was completed successfully */ /* request was completed successfully */
cqr->status = DASD_CQR_SUCCESS; cqr->status = DASD_CQR_SUCCESS;
cqr->stopclk = now; cqr->stopclk = now;
......
...@@ -1572,7 +1572,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense) ...@@ -1572,7 +1572,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
/* determine the address of the CCW to be restarted */ /* determine the address of the CCW to be restarted */
/* Imprecise ending is not set -> addr from IRB-SCSW */ /* Imprecise ending is not set -> addr from IRB-SCSW */
cpa = default_erp->refers->irb.scsw.cpa; cpa = default_erp->refers->irb.scsw.cmd.cpa;
if (cpa == 0) { if (cpa == 0) {
...@@ -1725,7 +1725,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense) ...@@ -1725,7 +1725,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
/* determine the address of the CCW to be restarted */ /* determine the address of the CCW to be restarted */
/* Imprecise ending is not set -> addr from IRB-SCSW */ /* Imprecise ending is not set -> addr from IRB-SCSW */
cpa = previous_erp->irb.scsw.cpa; cpa = previous_erp->irb.scsw.cmd.cpa;
if (cpa == 0) { if (cpa == 0) {
...@@ -2171,7 +2171,7 @@ dasd_3990_erp_control_check(struct dasd_ccw_req *erp) ...@@ -2171,7 +2171,7 @@ dasd_3990_erp_control_check(struct dasd_ccw_req *erp)
{ {
struct dasd_device *device = erp->startdev; struct dasd_device *device = erp->startdev;
if (erp->refers->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK if (erp->refers->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK
| SCHN_STAT_CHN_CTRL_CHK)) { | SCHN_STAT_CHN_CTRL_CHK)) {
DEV_MESSAGE(KERN_DEBUG, device, "%s", DEV_MESSAGE(KERN_DEBUG, device, "%s",
"channel or interface control check"); "channel or interface control check");
...@@ -2352,9 +2352,9 @@ dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2) ...@@ -2352,9 +2352,9 @@ dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2)
if ((cqr1->irb.esw.esw0.erw.cons == 0) && if ((cqr1->irb.esw.esw0.erw.cons == 0) &&
(cqr2->irb.esw.esw0.erw.cons == 0)) { (cqr2->irb.esw.esw0.erw.cons == 0)) {
if ((cqr1->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK | if ((cqr1->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK |
SCHN_STAT_CHN_CTRL_CHK)) == SCHN_STAT_CHN_CTRL_CHK)) ==
(cqr2->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK | (cqr2->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK |
SCHN_STAT_CHN_CTRL_CHK))) SCHN_STAT_CHN_CTRL_CHK)))
return 1; /* match with ifcc*/ return 1; /* match with ifcc*/
} }
...@@ -2622,8 +2622,9 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) ...@@ -2622,8 +2622,9 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
} }
/* double-check if current erp/cqr was successfull */ /* double-check if current erp/cqr was successfull */
if ((cqr->irb.scsw.cstat == 0x00) && if ((cqr->irb.scsw.cmd.cstat == 0x00) &&
(cqr->irb.scsw.dstat == (DEV_STAT_CHN_END|DEV_STAT_DEV_END))) { (cqr->irb.scsw.cmd.dstat ==
(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
DEV_MESSAGE(KERN_DEBUG, device, DEV_MESSAGE(KERN_DEBUG, device,
"ERP called for successful request %p" "ERP called for successful request %p"
......
...@@ -1404,13 +1404,14 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, ...@@ -1404,13 +1404,14 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
/* first of all check for state change pending interrupt */ /* first of all check for state change pending interrupt */
mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
if ((irb->scsw.dstat & mask) == mask) { if ((irb->scsw.cmd.dstat & mask) == mask) {
dasd_generic_handle_state_change(device); dasd_generic_handle_state_change(device);
return; return;
} }
/* summary unit check */ /* summary unit check */
if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && irb->ecw[7] == 0x0D) { if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
(irb->ecw[7] == 0x0D)) {
dasd_alias_handle_summary_unit_check(device, irb); dasd_alias_handle_summary_unit_check(device, irb);
return; return;
} }
...@@ -2068,11 +2069,11 @@ static void dasd_eckd_dump_sense(struct dasd_device *device, ...@@ -2068,11 +2069,11 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
device->cdev->dev.bus_id); device->cdev->dev.bus_id);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" in req: %p CS: 0x%02X DS: 0x%02X\n", req, " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
irb->scsw.cstat, irb->scsw.dstat); irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" device %s: Failing CCW: %p\n", " device %s: Failing CCW: %p\n",
device->cdev->dev.bus_id, device->cdev->dev.bus_id,
(void *) (addr_t) irb->scsw.cpa); (void *) (addr_t) irb->scsw.cmd.cpa);
if (irb->esw.esw0.erw.cons) { if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) { for (sl = 0; sl < 4; sl++) {
len += sprintf(page + len, KERN_ERR PRINTK_HEADER len += sprintf(page + len, KERN_ERR PRINTK_HEADER
...@@ -2122,7 +2123,8 @@ static void dasd_eckd_dump_sense(struct dasd_device *device, ...@@ -2122,7 +2123,8 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
/* scsw->cda is either valid or zero */ /* scsw->cda is either valid or zero */
len = 0; len = 0;
from = ++to; from = ++to;
fail = (struct ccw1 *)(addr_t) irb->scsw.cpa; /* failing CCW */ fail = (struct ccw1 *)(addr_t)
irb->scsw.cmd.cpa; /* failing CCW */
if (from < fail - 2) { if (from < fail - 2) {
from = fail - 2; /* there is a gap - print header */ from = fail - 2; /* there is a gap - print header */
len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n"); len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
......
...@@ -222,7 +222,7 @@ static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device, ...@@ -222,7 +222,7 @@ static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device,
/* first of all check for state change pending interrupt */ /* first of all check for state change pending interrupt */
mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
if ((irb->scsw.dstat & mask) == mask) { if ((irb->scsw.cmd.dstat & mask) == mask) {
dasd_generic_handle_state_change(device); dasd_generic_handle_state_change(device);
return; return;
} }
...@@ -449,11 +449,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, ...@@ -449,11 +449,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
device->cdev->dev.bus_id); device->cdev->dev.bus_id);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" in req: %p CS: 0x%02X DS: 0x%02X\n", req, " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
irb->scsw.cstat, irb->scsw.dstat); irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" device %s: Failing CCW: %p\n", " device %s: Failing CCW: %p\n",
device->cdev->dev.bus_id, device->cdev->dev.bus_id,
(void *) (addr_t) irb->scsw.cpa); (void *) (addr_t) irb->scsw.cmd.cpa);
if (irb->esw.esw0.erw.cons) { if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) { for (sl = 0; sl < 4; sl++) {
len += sprintf(page + len, KERN_ERR PRINTK_HEADER len += sprintf(page + len, KERN_ERR PRINTK_HEADER
...@@ -498,11 +498,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, ...@@ -498,11 +498,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
/* print failing CCW area */ /* print failing CCW area */
len = 0; len = 0;
if (act < ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2) { if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) {
act = ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2; act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2;
len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
} }
end = min((struct ccw1 *)(addr_t) irb->scsw.cpa + 2, last); end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last);
while (act <= end) { while (act <= end) {
len += sprintf(page + len, KERN_ERR PRINTK_HEADER len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" CCW %p: %08X %08X DAT:", " CCW %p: %08X %08X DAT:",
......
...@@ -167,10 +167,8 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch ...@@ -167,10 +167,8 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
struct dcssblk_dev_info *dev_info; struct dcssblk_dev_info *dev_info;
int rc; int rc;
if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) { if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
PRINT_WARN("Invalid value, must be 0 or 1\n");
return -EINVAL; return -EINVAL;
}
down_write(&dcssblk_devices_sem); down_write(&dcssblk_devices_sem);
dev_info = container_of(dev, struct dcssblk_dev_info, dev); dev_info = container_of(dev, struct dcssblk_dev_info, dev);
if (atomic_read(&dev_info->use_count)) { if (atomic_read(&dev_info->use_count)) {
...@@ -215,7 +213,6 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch ...@@ -215,7 +213,6 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
set_disk_ro(dev_info->gd, 0); set_disk_ro(dev_info->gd, 0);
} }
} else { } else {
PRINT_WARN("Invalid value, must be 0 or 1\n");
rc = -EINVAL; rc = -EINVAL;
goto out; goto out;
} }
...@@ -258,10 +255,8 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char ...@@ -258,10 +255,8 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
{ {
struct dcssblk_dev_info *dev_info; struct dcssblk_dev_info *dev_info;
if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) { if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
PRINT_WARN("Invalid value, must be 0 or 1\n");
return -EINVAL; return -EINVAL;
}
dev_info = container_of(dev, struct dcssblk_dev_info, dev); dev_info = container_of(dev, struct dcssblk_dev_info, dev);
down_write(&dcssblk_devices_sem); down_write(&dcssblk_devices_sem);
...@@ -289,7 +284,6 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char ...@@ -289,7 +284,6 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
} }
} else { } else {
up_write(&dcssblk_devices_sem); up_write(&dcssblk_devices_sem);
PRINT_WARN("Invalid value, must be 0 or 1\n");
return -EINVAL; return -EINVAL;
} }
up_write(&dcssblk_devices_sem); up_write(&dcssblk_devices_sem);
...@@ -441,7 +435,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char ...@@ -441,7 +435,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
goto out; goto out;
unregister_dev: unregister_dev:
PRINT_ERR("device_create_file() failed!\n");
list_del(&dev_info->lh); list_del(&dev_info->lh);
blk_cleanup_queue(dev_info->dcssblk_queue); blk_cleanup_queue(dev_info->dcssblk_queue);
dev_info->gd->queue = NULL; dev_info->gd->queue = NULL;
...@@ -702,10 +695,8 @@ dcssblk_check_params(void) ...@@ -702,10 +695,8 @@ dcssblk_check_params(void)
static void __exit static void __exit
dcssblk_exit(void) dcssblk_exit(void)
{ {
PRINT_DEBUG("DCSSBLOCK EXIT...\n");
s390_root_dev_unregister(dcssblk_root_dev); s390_root_dev_unregister(dcssblk_root_dev);
unregister_blkdev(dcssblk_major, DCSSBLK_NAME); unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
PRINT_DEBUG("...finished!\n");
} }
static int __init static int __init
...@@ -713,27 +704,21 @@ dcssblk_init(void) ...@@ -713,27 +704,21 @@ dcssblk_init(void)
{ {
int rc; int rc;
PRINT_DEBUG("DCSSBLOCK INIT...\n");
dcssblk_root_dev = s390_root_dev_register("dcssblk"); dcssblk_root_dev = s390_root_dev_register("dcssblk");
if (IS_ERR(dcssblk_root_dev)) { if (IS_ERR(dcssblk_root_dev))
PRINT_ERR("device_register() failed!\n");
return PTR_ERR(dcssblk_root_dev); return PTR_ERR(dcssblk_root_dev);
}
rc = device_create_file(dcssblk_root_dev, &dev_attr_add); rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
if (rc) { if (rc) {
PRINT_ERR("device_create_file(add) failed!\n");
s390_root_dev_unregister(dcssblk_root_dev); s390_root_dev_unregister(dcssblk_root_dev);
return rc; return rc;
} }
rc = device_create_file(dcssblk_root_dev, &dev_attr_remove); rc = device_create_file(dcssblk_root_dev, &dev_attr_remove);
if (rc) { if (rc) {
PRINT_ERR("device_create_file(remove) failed!\n");
s390_root_dev_unregister(dcssblk_root_dev); s390_root_dev_unregister(dcssblk_root_dev);
return rc; return rc;
} }
rc = register_blkdev(0, DCSSBLK_NAME); rc = register_blkdev(0, DCSSBLK_NAME);
if (rc < 0) { if (rc < 0) {
PRINT_ERR("Can't get dynamic major!\n");
s390_root_dev_unregister(dcssblk_root_dev); s390_root_dev_unregister(dcssblk_root_dev);
return rc; return rc;
} }
...@@ -742,7 +727,6 @@ dcssblk_init(void) ...@@ -742,7 +727,6 @@ dcssblk_init(void)
dcssblk_check_params(); dcssblk_check_params();
PRINT_DEBUG("...finished!\n");
return 0; return 0;
} }
......
...@@ -100,15 +100,10 @@ static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index) ...@@ -100,15 +100,10 @@ static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
: "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc"); : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
if (cc == 3) if (cc == 3)
return -ENXIO; return -ENXIO;
if (cc == 2) { if (cc == 2)
PRINT_ERR("expanded storage lost!\n");
return -ENXIO; return -ENXIO;
} if (cc == 1)
if (cc == 1) {
PRINT_ERR("page in failed for page index %u.\n",
xpage_index);
return -EIO; return -EIO;
}
return 0; return 0;
} }
...@@ -135,15 +130,10 @@ static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index) ...@@ -135,15 +130,10 @@ static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
: "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc"); : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
if (cc == 3) if (cc == 3)
return -ENXIO; return -ENXIO;
if (cc == 2) { if (cc == 2)
PRINT_ERR("expanded storage lost!\n");
return -ENXIO; return -ENXIO;
} if (cc == 1)
if (cc == 1) {
PRINT_ERR("page out failed for page index %u.\n",
xpage_index);
return -EIO; return -EIO;
}
return 0; return 0;
} }
......
...@@ -93,9 +93,6 @@ struct raw3215_info { ...@@ -93,9 +93,6 @@ struct raw3215_info {
struct raw3215_req *queued_write;/* pointer to queued write requests */ struct raw3215_req *queued_write;/* pointer to queued write requests */
wait_queue_head_t empty_wait; /* wait queue for flushing */ wait_queue_head_t empty_wait; /* wait queue for flushing */
struct timer_list timer; /* timer for delayed output */ struct timer_list timer; /* timer for delayed output */
char *message; /* pending message from raw3215_irq */
int msg_dstat; /* dstat for pending message */
int msg_cstat; /* cstat for pending message */
int line_pos; /* position on the line (for tabs) */ int line_pos; /* position on the line (for tabs) */
char ubuffer[80]; /* copy_from_user buffer */ char ubuffer[80]; /* copy_from_user buffer */
}; };
...@@ -359,11 +356,6 @@ raw3215_tasklet(void *data) ...@@ -359,11 +356,6 @@ raw3215_tasklet(void *data)
raw3215_mk_write_req(raw); raw3215_mk_write_req(raw);
raw3215_try_io(raw); raw3215_try_io(raw);
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
/* Check for pending message from raw3215_irq */
if (raw->message != NULL) {
printk(raw->message, raw->msg_dstat, raw->msg_cstat);
raw->message = NULL;
}
tty = raw->tty; tty = raw->tty;
if (tty != NULL && if (tty != NULL &&
RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) { RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) {
...@@ -381,20 +373,14 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) ...@@ -381,20 +373,14 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
struct raw3215_req *req; struct raw3215_req *req;
struct tty_struct *tty; struct tty_struct *tty;
int cstat, dstat; int cstat, dstat;
int count, slen; int count;
raw = cdev->dev.driver_data; raw = cdev->dev.driver_data;
req = (struct raw3215_req *) intparm; req = (struct raw3215_req *) intparm;
cstat = irb->scsw.cstat; cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.dstat; dstat = irb->scsw.cmd.dstat;
if (cstat != 0) { if (cstat != 0)
raw->message = KERN_WARNING
"Got nonzero channel status in raw3215_irq "
"(dev sts 0x%2x, sch sts 0x%2x)";
raw->msg_dstat = dstat;
raw->msg_cstat = cstat;
tasklet_schedule(&raw->tasklet); tasklet_schedule(&raw->tasklet);
}
if (dstat & 0x01) { /* we got a unit exception */ if (dstat & 0x01) { /* we got a unit exception */
dstat &= ~0x01; /* we can ignore it */ dstat &= ~0x01; /* we can ignore it */
} }
...@@ -404,8 +390,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) ...@@ -404,8 +390,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
break; break;
/* Attention interrupt, someone hit the enter key */ /* Attention interrupt, someone hit the enter key */
raw3215_mk_read_req(raw); raw3215_mk_read_req(raw);
if (MACHINE_IS_P390)
memset(raw->inbuf, 0, RAW3215_INBUF_SIZE);
tasklet_schedule(&raw->tasklet); tasklet_schedule(&raw->tasklet);
break; break;
case 0x08: case 0x08:
...@@ -415,7 +399,7 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) ...@@ -415,7 +399,7 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
return; /* That shouldn't happen ... */ return; /* That shouldn't happen ... */
if (req->type == RAW3215_READ) { if (req->type == RAW3215_READ) {
/* store residual count, then wait for device end */ /* store residual count, then wait for device end */
req->residual = irb->scsw.count; req->residual = irb->scsw.cmd.count;
} }
if (dstat == 0x08) if (dstat == 0x08)
break; break;
...@@ -428,11 +412,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) ...@@ -428,11 +412,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
tty = raw->tty; tty = raw->tty;
count = 160 - req->residual; count = 160 - req->residual;
if (MACHINE_IS_P390) {
slen = strnlen(raw->inbuf, RAW3215_INBUF_SIZE);
if (count > slen)
count = slen;
} else
EBCASC(raw->inbuf, count); EBCASC(raw->inbuf, count);
cchar = ctrlchar_handle(raw->inbuf, count, tty); cchar = ctrlchar_handle(raw->inbuf, count, tty);
switch (cchar & CTRLCHAR_MASK) { switch (cchar & CTRLCHAR_MASK) {
...@@ -481,11 +460,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) ...@@ -481,11 +460,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
raw->flags &= ~RAW3215_WORKING; raw->flags &= ~RAW3215_WORKING;
raw3215_free_req(req); raw3215_free_req(req);
} }
raw->message = KERN_WARNING
"Spurious interrupt in in raw3215_irq "
"(dev sts 0x%2x, sch sts 0x%2x)";
raw->msg_dstat = dstat;
raw->msg_cstat = cstat;
tasklet_schedule(&raw->tasklet); tasklet_schedule(&raw->tasklet);
} }
return; return;
...@@ -883,7 +857,6 @@ con3215_init(void) ...@@ -883,7 +857,6 @@ con3215_init(void)
free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE); free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE);
free_bootmem((unsigned long) raw, sizeof(struct raw3215_info)); free_bootmem((unsigned long) raw, sizeof(struct raw3215_info));
raw3215[0] = NULL; raw3215[0] = NULL;
printk("Couldn't find a 3215 console device\n");
return -ENODEV; return -ENODEV;
} }
register_console(&con3215); register_console(&con3215);
...@@ -1157,7 +1130,6 @@ tty3215_init(void) ...@@ -1157,7 +1130,6 @@ tty3215_init(void)
tty_set_operations(driver, &tty3215_ops); tty_set_operations(driver, &tty3215_ops);
ret = tty_register_driver(driver); ret = tty_register_driver(driver);
if (ret) { if (ret) {
printk("Couldn't register tty3215 driver\n");
put_tty_driver(driver); put_tty_driver(driver);
return ret; return ret;
} }
......
...@@ -411,15 +411,15 @@ static int ...@@ -411,15 +411,15 @@ static int
con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb) con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
{ {
/* Handle ATTN. Schedule tasklet to read aid. */ /* Handle ATTN. Schedule tasklet to read aid. */
if (irb->scsw.dstat & DEV_STAT_ATTENTION) if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION)
con3270_issue_read(cp); con3270_issue_read(cp);
if (rq) { if (rq) {
if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
rq->rc = -EIO; rq->rc = -EIO;
else else
/* Normal end. Copy residual count. */ /* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.count; rq->rescnt = irb->scsw.cmd.count;
} }
return RAW3270_IO_DONE; return RAW3270_IO_DONE;
} }
......
...@@ -216,17 +216,17 @@ static int ...@@ -216,17 +216,17 @@ static int
fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb) fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
{ {
/* Handle ATTN. Set indication and wake waiters for attention. */ /* Handle ATTN. Set indication and wake waiters for attention. */
if (irb->scsw.dstat & DEV_STAT_ATTENTION) { if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
fp->attention = 1; fp->attention = 1;
wake_up(&fp->wait); wake_up(&fp->wait);
} }
if (rq) { if (rq) {
if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
rq->rc = -EIO; rq->rc = -EIO;
else else
/* Normal end. Copy residual count. */ /* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.count; rq->rescnt = irb->scsw.cmd.count;
} }
return RAW3270_IO_DONE; return RAW3270_IO_DONE;
} }
...@@ -512,11 +512,8 @@ fs3270_init(void) ...@@ -512,11 +512,8 @@ fs3270_init(void)
int rc; int rc;
rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops); rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops);
if (rc) { if (rc)
printk(KERN_ERR "fs3270 can't get major number %d: errno %d\n",
IBM_FS3270_MAJOR, rc);
return rc; return rc;
}
return 0; return 0;
} }
......
...@@ -3,9 +3,8 @@ ...@@ -3,9 +3,8 @@
* *
* Character device driver for reading z/VM *MONITOR service records. * Character device driver for reading z/VM *MONITOR service records.
* *
* Copyright 2004 IBM Corporation, IBM Deutschland Entwicklung GmbH. * Copyright IBM Corp. 2004, 2008
* * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
* Author: Gerald Schaefer <geraldsc@de.ibm.com>
*/ */
#include <linux/module.h> #include <linux/module.h>
...@@ -18,12 +17,11 @@ ...@@ -18,12 +17,11 @@
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/poll.h>
#include <net/iucv/iucv.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/ebcdic.h> #include <asm/ebcdic.h>
#include <asm/extmem.h> #include <asm/extmem.h>
#include <linux/poll.h>
#include <net/iucv/iucv.h>
//#define MON_DEBUG /* Debug messages on/off */ //#define MON_DEBUG /* Debug messages on/off */
...@@ -152,10 +150,7 @@ static int mon_check_mca(struct mon_msg *monmsg) ...@@ -152,10 +150,7 @@ static int mon_check_mca(struct mon_msg *monmsg)
(mon_mca_end(monmsg) > mon_dcss_end) || (mon_mca_end(monmsg) > mon_dcss_end) ||
(mon_mca_start(monmsg) < mon_dcss_start) || (mon_mca_start(monmsg) < mon_dcss_start) ||
((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0))) ((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0)))
{
P_DEBUG("READ, IGNORED INVALID MCA\n\n");
return -EINVAL; return -EINVAL;
}
return 0; return 0;
} }
...@@ -164,10 +159,6 @@ static int mon_send_reply(struct mon_msg *monmsg, ...@@ -164,10 +159,6 @@ static int mon_send_reply(struct mon_msg *monmsg,
{ {
int rc; int rc;
P_DEBUG("read, REPLY: pathid = 0x%04X, msgid = 0x%08X, trgcls = "
"0x%08X\n\n",
monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class);
rc = iucv_message_reply(monpriv->path, &monmsg->msg, rc = iucv_message_reply(monpriv->path, &monmsg->msg,
IUCV_IPRMDATA, NULL, 0); IUCV_IPRMDATA, NULL, 0);
atomic_dec(&monpriv->msglim_count); atomic_dec(&monpriv->msglim_count);
...@@ -202,15 +193,12 @@ static struct mon_private *mon_alloc_mem(void) ...@@ -202,15 +193,12 @@ static struct mon_private *mon_alloc_mem(void)
struct mon_private *monpriv; struct mon_private *monpriv;
monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
if (!monpriv) { if (!monpriv)
P_ERROR("no memory for monpriv\n");
return NULL; return NULL;
}
for (i = 0; i < MON_MSGLIM; i++) { for (i = 0; i < MON_MSGLIM; i++) {
monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg), monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg),
GFP_KERNEL); GFP_KERNEL);
if (!monpriv->msg_array[i]) { if (!monpriv->msg_array[i]) {
P_ERROR("open, no memory for msg_array\n");
mon_free_mem(monpriv); mon_free_mem(monpriv);
return NULL; return NULL;
} }
...@@ -218,41 +206,10 @@ static struct mon_private *mon_alloc_mem(void) ...@@ -218,41 +206,10 @@ static struct mon_private *mon_alloc_mem(void)
return monpriv; return monpriv;
} }
static inline void mon_read_debug(struct mon_msg *monmsg,
struct mon_private *monpriv)
{
#ifdef MON_DEBUG
u8 msg_type[2], mca_type;
unsigned long records_len;
records_len = mon_rec_end(monmsg) - mon_rec_start(monmsg) + 1;
memcpy(msg_type, &monmsg->msg.class, 2);
EBCASC(msg_type, 2);
mca_type = mon_mca_type(monmsg, 0);
EBCASC(&mca_type, 1);
P_DEBUG("read, mon_read_index = %i, mon_write_index = %i\n",
monpriv->read_index, monpriv->write_index);
P_DEBUG("read, pathid = 0x%04X, msgid = 0x%08X, trgcls = 0x%08X\n",
monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class);
P_DEBUG("read, msg_type = '%c%c', mca_type = '%c' / 0x%X / 0x%X\n",
msg_type[0], msg_type[1], mca_type ? mca_type : 'X',
mon_mca_type(monmsg, 1), mon_mca_type(monmsg, 2));
P_DEBUG("read, MCA: start = 0x%lX, end = 0x%lX\n",
mon_mca_start(monmsg), mon_mca_end(monmsg));
P_DEBUG("read, REC: start = 0x%X, end = 0x%X, len = %lu\n\n",
mon_rec_start(monmsg), mon_rec_end(monmsg), records_len);
if (mon_mca_size(monmsg) > 12)
P_DEBUG("READ, MORE THAN ONE MCA\n\n");
#endif
}
static inline void mon_next_mca(struct mon_msg *monmsg) static inline void mon_next_mca(struct mon_msg *monmsg)
{ {
if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12)) if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12))
return; return;
P_DEBUG("READ, NEXT MCA\n\n");
monmsg->mca_offset += 12; monmsg->mca_offset += 12;
monmsg->pos = 0; monmsg->pos = 0;
} }
...@@ -269,7 +226,6 @@ static struct mon_msg *mon_next_message(struct mon_private *monpriv) ...@@ -269,7 +226,6 @@ static struct mon_msg *mon_next_message(struct mon_private *monpriv)
monmsg->msglim_reached = 0; monmsg->msglim_reached = 0;
monmsg->pos = 0; monmsg->pos = 0;
monmsg->mca_offset = 0; monmsg->mca_offset = 0;
P_WARNING("read, message limit reached\n");
monpriv->read_index = (monpriv->read_index + 1) % monpriv->read_index = (monpriv->read_index + 1) %
MON_MSGLIM; MON_MSGLIM;
atomic_dec(&monpriv->read_ready); atomic_dec(&monpriv->read_ready);
...@@ -286,10 +242,6 @@ static void mon_iucv_path_complete(struct iucv_path *path, u8 ipuser[16]) ...@@ -286,10 +242,6 @@ static void mon_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
{ {
struct mon_private *monpriv = path->private; struct mon_private *monpriv = path->private;
P_DEBUG("IUCV connection completed\n");
P_DEBUG("IUCV ACCEPT (from *MONITOR): Version = 0x%02X, Event = "
"0x%02X, Sample = 0x%02X\n",
ipuser[0], ipuser[1], ipuser[2]);
atomic_set(&monpriv->iucv_connected, 1); atomic_set(&monpriv->iucv_connected, 1);
wake_up(&mon_conn_wait_queue); wake_up(&mon_conn_wait_queue);
} }
...@@ -310,7 +262,6 @@ static void mon_iucv_message_pending(struct iucv_path *path, ...@@ -310,7 +262,6 @@ static void mon_iucv_message_pending(struct iucv_path *path,
{ {
struct mon_private *monpriv = path->private; struct mon_private *monpriv = path->private;
P_DEBUG("IUCV message pending\n");
memcpy(&monpriv->msg_array[monpriv->write_index]->msg, memcpy(&monpriv->msg_array[monpriv->write_index]->msg,
msg, sizeof(*msg)); msg, sizeof(*msg));
if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
...@@ -375,7 +326,6 @@ static int mon_open(struct inode *inode, struct file *filp) ...@@ -375,7 +326,6 @@ static int mon_open(struct inode *inode, struct file *filp)
rc = -EIO; rc = -EIO;
goto out_path; goto out_path;
} }
P_INFO("open, established connection to *MONITOR service\n\n");
filp->private_data = monpriv; filp->private_data = monpriv;
return nonseekable_open(inode, filp); return nonseekable_open(inode, filp);
...@@ -400,8 +350,6 @@ static int mon_close(struct inode *inode, struct file *filp) ...@@ -400,8 +350,6 @@ static int mon_close(struct inode *inode, struct file *filp)
rc = iucv_path_sever(monpriv->path, user_data_sever); rc = iucv_path_sever(monpriv->path, user_data_sever);
if (rc) if (rc)
P_ERROR("close, iucv_sever failed with rc = %i\n", rc); P_ERROR("close, iucv_sever failed with rc = %i\n", rc);
else
P_INFO("close, terminated connection to *MONITOR service\n");
atomic_set(&monpriv->iucv_severed, 0); atomic_set(&monpriv->iucv_severed, 0);
atomic_set(&monpriv->iucv_connected, 0); atomic_set(&monpriv->iucv_connected, 0);
...@@ -442,10 +390,8 @@ static ssize_t mon_read(struct file *filp, char __user *data, ...@@ -442,10 +390,8 @@ static ssize_t mon_read(struct file *filp, char __user *data,
monmsg = monpriv->msg_array[monpriv->read_index]; monmsg = monpriv->msg_array[monpriv->read_index];
} }
if (!monmsg->pos) { if (!monmsg->pos)
monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset; monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset;
mon_read_debug(monmsg, monpriv);
}
if (mon_check_mca(monmsg)) if (mon_check_mca(monmsg))
goto reply; goto reply;
...@@ -531,7 +477,6 @@ static int __init mon_init(void) ...@@ -531,7 +477,6 @@ static int __init mon_init(void)
P_ERROR("failed to register with iucv driver\n"); P_ERROR("failed to register with iucv driver\n");
return rc; return rc;
} }
P_INFO("open, registered with IUCV\n");
rc = segment_type(mon_dcss_name); rc = segment_type(mon_dcss_name);
if (rc < 0) { if (rc < 0) {
...@@ -555,13 +500,8 @@ static int __init mon_init(void) ...@@ -555,13 +500,8 @@ static int __init mon_init(void)
dcss_mkname(mon_dcss_name, &user_data_connect[8]); dcss_mkname(mon_dcss_name, &user_data_connect[8]);
rc = misc_register(&mon_dev); rc = misc_register(&mon_dev);
if (rc < 0 ) { if (rc < 0 )
P_ERROR("misc_register failed, rc = %i\n", rc);
goto out; goto out;
}
P_INFO("Loaded segment %s from %p to %p, size = %lu Byte\n",
mon_dcss_name, (void *) mon_dcss_start, (void *) mon_dcss_end,
mon_dcss_end - mon_dcss_start + 1);
return 0; return 0;
out: out:
......
...@@ -153,19 +153,10 @@ struct raw3270_request __init *raw3270_request_alloc_bootmem(size_t size) ...@@ -153,19 +153,10 @@ struct raw3270_request __init *raw3270_request_alloc_bootmem(size_t size)
struct raw3270_request *rq; struct raw3270_request *rq;
rq = alloc_bootmem_low(sizeof(struct raw3270)); rq = alloc_bootmem_low(sizeof(struct raw3270));
if (!rq)
return ERR_PTR(-ENOMEM);
memset(rq, 0, sizeof(struct raw3270_request));
/* alloc output buffer. */ /* alloc output buffer. */
if (size > 0) { if (size > 0)
rq->buffer = alloc_bootmem_low(size); rq->buffer = alloc_bootmem_low(size);
if (!rq->buffer) {
free_bootmem((unsigned long) rq,
sizeof(struct raw3270));
return ERR_PTR(-ENOMEM);
}
}
rq->size = size; rq->size = size;
INIT_LIST_HEAD(&rq->list); INIT_LIST_HEAD(&rq->list);
...@@ -372,17 +363,17 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) ...@@ -372,17 +363,17 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
if (IS_ERR(irb)) if (IS_ERR(irb))
rc = RAW3270_IO_RETRY; rc = RAW3270_IO_RETRY;
else if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) { else if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
rq->rc = -EIO; rq->rc = -EIO;
rc = RAW3270_IO_DONE; rc = RAW3270_IO_DONE;
} else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END | } else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END |
DEV_STAT_UNIT_EXCEP)) { DEV_STAT_UNIT_EXCEP)) {
/* Handle CE-DE-UE and subsequent UDE */ /* Handle CE-DE-UE and subsequent UDE */
set_bit(RAW3270_FLAGS_BUSY, &rp->flags); set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
rc = RAW3270_IO_BUSY; rc = RAW3270_IO_BUSY;
} else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) { } else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
/* Wait for UDE if busy flag is set. */ /* Wait for UDE if busy flag is set. */
if (irb->scsw.dstat & DEV_STAT_DEV_END) { if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
clear_bit(RAW3270_FLAGS_BUSY, &rp->flags); clear_bit(RAW3270_FLAGS_BUSY, &rp->flags);
/* Got it, now retry. */ /* Got it, now retry. */
rc = RAW3270_IO_RETRY; rc = RAW3270_IO_RETRY;
...@@ -497,7 +488,7 @@ raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq, ...@@ -497,7 +488,7 @@ raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
* Unit-Check Processing: * Unit-Check Processing:
* Expect Command Reject or Intervention Required. * Expect Command Reject or Intervention Required.
*/ */
if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
/* Request finished abnormally. */ /* Request finished abnormally. */
if (irb->ecw[0] & SNS0_INTERVENTION_REQ) { if (irb->ecw[0] & SNS0_INTERVENTION_REQ) {
set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags); set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags);
...@@ -505,16 +496,16 @@ raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq, ...@@ -505,16 +496,16 @@ raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
} }
} }
if (rq) { if (rq) {
if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
if (irb->ecw[0] & SNS0_CMD_REJECT) if (irb->ecw[0] & SNS0_CMD_REJECT)
rq->rc = -EOPNOTSUPP; rq->rc = -EOPNOTSUPP;
else else
rq->rc = -EIO; rq->rc = -EIO;
} else } else
/* Request finished normally. Copy residual count. */ /* Request finished normally. Copy residual count. */
rq->rescnt = irb->scsw.count; rq->rescnt = irb->scsw.cmd.count;
} }
if (irb->scsw.dstat & DEV_STAT_ATTENTION) { if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags); set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags);
wake_up(&raw3270_wait_queue); wake_up(&raw3270_wait_queue);
} }
...@@ -619,7 +610,6 @@ __raw3270_size_device_vm(struct raw3270 *rp) ...@@ -619,7 +610,6 @@ __raw3270_size_device_vm(struct raw3270 *rp)
rp->cols = 132; rp->cols = 132;
break; break;
default: default:
printk(KERN_WARNING "vrdccrmd is 0x%.8x\n", model);
rc = -EOPNOTSUPP; rc = -EOPNOTSUPP;
break; break;
} }
......
...@@ -506,6 +506,8 @@ sclp_state_change_cb(struct evbuf_header *evbuf) ...@@ -506,6 +506,8 @@ sclp_state_change_cb(struct evbuf_header *evbuf)
if (scbuf->validity_sclp_send_mask) if (scbuf->validity_sclp_send_mask)
sclp_send_mask = scbuf->sclp_send_mask; sclp_send_mask = scbuf->sclp_send_mask;
spin_unlock_irqrestore(&sclp_lock, flags); spin_unlock_irqrestore(&sclp_lock, flags);
if (scbuf->validity_sclp_active_facility_mask)
sclp_facilities = scbuf->sclp_active_facility_mask;
sclp_dispatch_state_change(); sclp_dispatch_state_change();
} }
...@@ -782,11 +784,9 @@ sclp_check_handler(__u16 code) ...@@ -782,11 +784,9 @@ sclp_check_handler(__u16 code)
/* Is this the interrupt we are waiting for? */ /* Is this the interrupt we are waiting for? */
if (finished_sccb == 0) if (finished_sccb == 0)
return; return;
if (finished_sccb != (u32) (addr_t) sclp_init_sccb) { if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
printk(KERN_WARNING SCLP_HEADER "unsolicited interrupt " panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
"for buffer at 0x%x\n", finished_sccb); finished_sccb);
return;
}
spin_lock(&sclp_lock); spin_lock(&sclp_lock);
if (sclp_running_state == sclp_running_state_running) { if (sclp_running_state == sclp_running_state_running) {
sclp_init_req.status = SCLP_REQ_DONE; sclp_init_req.status = SCLP_REQ_DONE;
...@@ -883,8 +883,6 @@ sclp_init(void) ...@@ -883,8 +883,6 @@ sclp_init(void)
unsigned long flags; unsigned long flags;
int rc; int rc;
if (!MACHINE_HAS_SCLP)
return -ENODEV;
spin_lock_irqsave(&sclp_lock, flags); spin_lock_irqsave(&sclp_lock, flags);
/* Check for previous or running initialization */ /* Check for previous or running initialization */
if (sclp_init_state != sclp_init_state_uninitialized) { if (sclp_init_state != sclp_init_state_uninitialized) {
......
...@@ -11,6 +11,9 @@ ...@@ -11,6 +11,9 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/memory.h>
#include <asm/chpid.h> #include <asm/chpid.h>
#include <asm/sclp.h> #include <asm/sclp.h>
#include "sclp.h" #include "sclp.h"
...@@ -43,6 +46,8 @@ static int __initdata early_read_info_sccb_valid; ...@@ -43,6 +46,8 @@ static int __initdata early_read_info_sccb_valid;
u64 sclp_facilities; u64 sclp_facilities;
static u8 sclp_fac84; static u8 sclp_fac84;
static unsigned long long rzm;
static unsigned long long rnmax;
static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
{ {
...@@ -62,7 +67,7 @@ static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) ...@@ -62,7 +67,7 @@ static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
return rc; return rc;
} }
void __init sclp_read_info_early(void) static void __init sclp_read_info_early(void)
{ {
int rc; int rc;
int i; int i;
...@@ -92,34 +97,33 @@ void __init sclp_read_info_early(void) ...@@ -92,34 +97,33 @@ void __init sclp_read_info_early(void)
void __init sclp_facilities_detect(void) void __init sclp_facilities_detect(void)
{ {
struct read_info_sccb *sccb;
sclp_read_info_early();
if (!early_read_info_sccb_valid) if (!early_read_info_sccb_valid)
return; return;
sclp_facilities = early_read_info_sccb.facilities;
sclp_fac84 = early_read_info_sccb.fac84; sccb = &early_read_info_sccb;
sclp_facilities = sccb->facilities;
sclp_fac84 = sccb->fac84;
rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
rzm <<= 20;
} }
unsigned long long __init sclp_memory_detect(void) unsigned long long sclp_get_rnmax(void)
{ {
unsigned long long memsize; return rnmax;
struct read_info_sccb *sccb; }
if (!early_read_info_sccb_valid) unsigned long long sclp_get_rzm(void)
return 0; {
sccb = &early_read_info_sccb; return rzm;
if (sccb->rnsize)
memsize = sccb->rnsize << 20;
else
memsize = sccb->rnsize2 << 20;
if (sccb->rnmax)
memsize *= sccb->rnmax;
else
memsize *= sccb->rnmax2;
return memsize;
} }
/* /*
* This function will be called after sclp_memory_detect(), which gets called * This function will be called after sclp_facilities_detect(), which gets
* early from early.c code. Therefore the sccb should have valid contents. * called from early.c code. Therefore the sccb should have valid contents.
*/ */
void __init sclp_get_ipl_info(struct sclp_ipl_info *info) void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
{ {
...@@ -278,6 +282,305 @@ int sclp_cpu_deconfigure(u8 cpu) ...@@ -278,6 +282,305 @@ int sclp_cpu_deconfigure(u8 cpu)
return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8); return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8);
} }
#ifdef CONFIG_MEMORY_HOTPLUG
static DEFINE_MUTEX(sclp_mem_mutex);
static LIST_HEAD(sclp_mem_list);
static u8 sclp_max_storage_id;
static unsigned long sclp_storage_ids[256 / BITS_PER_LONG];
struct memory_increment {
struct list_head list;
u16 rn;
int standby;
int usecount;
};
struct assign_storage_sccb {
struct sccb_header header;
u16 rn;
} __packed;
static unsigned long long rn2addr(u16 rn)
{
return (unsigned long long) (rn - 1) * rzm;
}
static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
{
struct assign_storage_sccb *sccb;
int rc;
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
sccb->rn = rn;
rc = do_sync_request(cmd, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
case 0x0120:
break;
default:
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
static int sclp_assign_storage(u16 rn)
{
return do_assign_storage(0x000d0001, rn);
}
static int sclp_unassign_storage(u16 rn)
{
return do_assign_storage(0x000c0001, rn);
}
struct attach_storage_sccb {
struct sccb_header header;
u16 :16;
u16 assigned;
u32 :32;
u32 entries[0];
} __packed;
static int sclp_attach_storage(u8 id)
{
struct attach_storage_sccb *sccb;
int rc;
int i;
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
rc = do_sync_request(0x00080001 | id << 8, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
set_bit(id, sclp_storage_ids);
for (i = 0; i < sccb->assigned; i++)
sclp_unassign_storage(sccb->entries[i] >> 16);
break;
default:
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
static int sclp_mem_change_state(unsigned long start, unsigned long size,
int online)
{
struct memory_increment *incr;
unsigned long long istart;
int rc = 0;
list_for_each_entry(incr, &sclp_mem_list, list) {
istart = rn2addr(incr->rn);
if (start + size - 1 < istart)
break;
if (start > istart + rzm - 1)
continue;
if (online) {
if (incr->usecount++)
continue;
/*
* Don't break the loop if one assign fails. Loop may
* be walked again on CANCEL and we can't save
* information if state changed before or not.
* So continue and increase usecount for all increments.
*/
rc |= sclp_assign_storage(incr->rn);
} else {
if (--incr->usecount)
continue;
sclp_unassign_storage(incr->rn);
}
}
return rc ? -EIO : 0;
}
static int sclp_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
unsigned long start, size;
struct memory_notify *arg;
unsigned char id;
int rc = 0;
arg = data;
start = arg->start_pfn << PAGE_SHIFT;
size = arg->nr_pages << PAGE_SHIFT;
mutex_lock(&sclp_mem_mutex);
for (id = 0; id <= sclp_max_storage_id; id++)
if (!test_bit(id, sclp_storage_ids))
sclp_attach_storage(id);
switch (action) {
case MEM_ONLINE:
break;
case MEM_GOING_ONLINE:
rc = sclp_mem_change_state(start, size, 1);
break;
case MEM_CANCEL_ONLINE:
sclp_mem_change_state(start, size, 0);
break;
default:
rc = -EINVAL;
break;
}
mutex_unlock(&sclp_mem_mutex);
return rc ? NOTIFY_BAD : NOTIFY_OK;
}
static struct notifier_block sclp_mem_nb = {
.notifier_call = sclp_mem_notifier,
};
static void __init add_memory_merged(u16 rn)
{
static u16 first_rn, num;
unsigned long long start, size;
if (rn && first_rn && (first_rn + num == rn)) {
num++;
return;
}
if (!first_rn)
goto skip_add;
start = rn2addr(first_rn);
size = (unsigned long long ) num * rzm;
if (start >= VMEM_MAX_PHYS)
goto skip_add;
if (start + size > VMEM_MAX_PHYS)
size = VMEM_MAX_PHYS - start;
add_memory(0, start, size);
skip_add:
first_rn = rn;
num = 1;
}
static void __init sclp_add_standby_memory(void)
{
struct memory_increment *incr;
list_for_each_entry(incr, &sclp_mem_list, list)
if (incr->standby)
add_memory_merged(incr->rn);
add_memory_merged(0);
}
static void __init insert_increment(u16 rn, int standby, int assigned)
{
struct memory_increment *incr, *new_incr;
struct list_head *prev;
u16 last_rn;
new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
if (!new_incr)
return;
new_incr->rn = rn;
new_incr->standby = standby;
last_rn = 0;
prev = &sclp_mem_list;
list_for_each_entry(incr, &sclp_mem_list, list) {
if (assigned && incr->rn > rn)
break;
if (!assigned && incr->rn - last_rn > 1)
break;
last_rn = incr->rn;
prev = &incr->list;
}
if (!assigned)
new_incr->rn = last_rn + 1;
if (new_incr->rn > rnmax) {
kfree(new_incr);
return;
}
list_add(&new_incr->list, prev);
}
struct read_storage_sccb {
struct sccb_header header;
u16 max_id;
u16 assigned;
u16 standby;
u16 :16;
u32 entries[0];
} __packed;
static int __init sclp_detect_standby_memory(void)
{
struct read_storage_sccb *sccb;
int i, id, assigned, rc;
if (!early_read_info_sccb_valid)
return 0;
if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
return 0;
rc = -ENOMEM;
sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
goto out;
assigned = 0;
for (id = 0; id <= sclp_max_storage_id; id++) {
memset(sccb, 0, PAGE_SIZE);
sccb->header.length = PAGE_SIZE;
rc = do_sync_request(0x00040001 | id << 8, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0010:
set_bit(id, sclp_storage_ids);
for (i = 0; i < sccb->assigned; i++) {
if (!sccb->entries[i])
continue;
assigned++;
insert_increment(sccb->entries[i] >> 16, 0, 1);
}
break;
case 0x0310:
break;
case 0x0410:
for (i = 0; i < sccb->assigned; i++) {
if (!sccb->entries[i])
continue;
assigned++;
insert_increment(sccb->entries[i] >> 16, 1, 1);
}
break;
default:
rc = -EIO;
break;
}
if (!rc)
sclp_max_storage_id = sccb->max_id;
}
if (rc || list_empty(&sclp_mem_list))
goto out;
for (i = 1; i <= rnmax - assigned; i++)
insert_increment(0, 1, 0);
rc = register_memory_notifier(&sclp_mem_nb);
if (rc)
goto out;
sclp_add_standby_memory();
out:
free_page((unsigned long) sccb);
return rc;
}
__initcall(sclp_detect_standby_memory);
#endif /* CONFIG_MEMORY_HOTPLUG */
/* /*
* Channel path configuration related functions. * Channel path configuration related functions.
*/ */
......
...@@ -14,14 +14,13 @@ ...@@ -14,14 +14,13 @@
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/termios.h>
#include <linux/err.h> #include <linux/err.h>
#include "sclp.h" #include "sclp.h"
#include "sclp_rw.h" #include "sclp_rw.h"
#include "sclp_tty.h" #include "sclp_tty.h"
#define SCLP_CON_PRINT_HEADER "sclp console driver: "
#define sclp_console_major 4 /* TTYAUX_MAJOR */ #define sclp_console_major 4 /* TTYAUX_MAJOR */
#define sclp_console_minor 64 #define sclp_console_minor 64
#define sclp_console_name "ttyS" #define sclp_console_name "ttyS"
...@@ -222,8 +221,6 @@ sclp_console_init(void) ...@@ -222,8 +221,6 @@ sclp_console_init(void)
INIT_LIST_HEAD(&sclp_con_pages); INIT_LIST_HEAD(&sclp_con_pages);
for (i = 0; i < MAX_CONSOLE_PAGES; i++) { for (i = 0; i < MAX_CONSOLE_PAGES; i++) {
page = alloc_bootmem_low_pages(PAGE_SIZE); page = alloc_bootmem_low_pages(PAGE_SIZE);
if (page == NULL)
return -ENOMEM;
list_add_tail((struct list_head *) page, &sclp_con_pages); list_add_tail((struct list_head *) page, &sclp_con_pages);
} }
INIT_LIST_HEAD(&sclp_con_outqueue); INIT_LIST_HEAD(&sclp_con_outqueue);
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/kthread.h>
#include <linux/sysdev.h> #include <linux/sysdev.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <asm/smp.h> #include <asm/smp.h>
...@@ -40,9 +41,19 @@ static void sclp_cpu_capability_notify(struct work_struct *work) ...@@ -40,9 +41,19 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
put_online_cpus(); put_online_cpus();
} }
static void __ref sclp_cpu_change_notify(struct work_struct *work) static int sclp_cpu_kthread(void *data)
{ {
smp_rescan_cpus(); smp_rescan_cpus();
return 0;
}
static void __ref sclp_cpu_change_notify(struct work_struct *work)
{
/* Can't call smp_rescan_cpus() from workqueue context since it may
* deadlock in case of cpu hotplug. So we have to create a kernel
* thread in order to call it.
*/
kthread_run(sclp_cpu_kthread, NULL, "cpu_rescan");
} }
static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
...@@ -74,10 +85,8 @@ static int __init sclp_conf_init(void) ...@@ -74,10 +85,8 @@ static int __init sclp_conf_init(void)
INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify); INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify);
rc = sclp_register(&sclp_conf_register); rc = sclp_register(&sclp_conf_register);
if (rc) { if (rc)
printk(KERN_ERR TAG "failed to register (%d).\n", rc);
return rc; return rc;
}
if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) { if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) {
printk(KERN_WARNING TAG "no configuration management.\n"); printk(KERN_WARNING TAG "no configuration management.\n");
......
...@@ -27,6 +27,8 @@ ...@@ -27,6 +27,8 @@
#define CPI_LENGTH_NAME 8 #define CPI_LENGTH_NAME 8
#define CPI_LENGTH_LEVEL 16 #define CPI_LENGTH_LEVEL 16
static DEFINE_MUTEX(sclp_cpi_mutex);
struct cpi_evbuf { struct cpi_evbuf {
struct evbuf_header header; struct evbuf_header header;
u8 id_format; u8 id_format;
...@@ -124,21 +126,15 @@ static int cpi_req(void) ...@@ -124,21 +126,15 @@ static int cpi_req(void)
int response; int response;
rc = sclp_register(&sclp_cpi_event); rc = sclp_register(&sclp_cpi_event);
if (rc) { if (rc)
printk(KERN_WARNING "cpi: could not register "
"to hardware console.\n");
goto out; goto out;
}
if (!(sclp_cpi_event.sclp_receive_mask & EVTYP_CTLPROGIDENT_MASK)) { if (!(sclp_cpi_event.sclp_receive_mask & EVTYP_CTLPROGIDENT_MASK)) {
printk(KERN_WARNING "cpi: no control program "
"identification support\n");
rc = -EOPNOTSUPP; rc = -EOPNOTSUPP;
goto out_unregister; goto out_unregister;
} }
req = cpi_prepare_req(); req = cpi_prepare_req();
if (IS_ERR(req)) { if (IS_ERR(req)) {
printk(KERN_WARNING "cpi: could not allocate request\n");
rc = PTR_ERR(req); rc = PTR_ERR(req);
goto out_unregister; goto out_unregister;
} }
...@@ -148,10 +144,8 @@ static int cpi_req(void) ...@@ -148,10 +144,8 @@ static int cpi_req(void)
/* Add request to sclp queue */ /* Add request to sclp queue */
rc = sclp_add_request(req); rc = sclp_add_request(req);
if (rc) { if (rc)
printk(KERN_WARNING "cpi: could not start request\n");
goto out_free_req; goto out_free_req;
}
wait_for_completion(&completion); wait_for_completion(&completion);
...@@ -223,7 +217,12 @@ static void set_string(char *attr, const char *value) ...@@ -223,7 +217,12 @@ static void set_string(char *attr, const char *value)
static ssize_t system_name_show(struct kobject *kobj, static ssize_t system_name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page) struct kobj_attribute *attr, char *page)
{ {
return snprintf(page, PAGE_SIZE, "%s\n", system_name); int rc;
mutex_lock(&sclp_cpi_mutex);
rc = snprintf(page, PAGE_SIZE, "%s\n", system_name);
mutex_unlock(&sclp_cpi_mutex);
return rc;
} }
static ssize_t system_name_store(struct kobject *kobj, static ssize_t system_name_store(struct kobject *kobj,
...@@ -237,7 +236,9 @@ static ssize_t system_name_store(struct kobject *kobj, ...@@ -237,7 +236,9 @@ static ssize_t system_name_store(struct kobject *kobj,
if (rc) if (rc)
return rc; return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(system_name, buf); set_string(system_name, buf);
mutex_unlock(&sclp_cpi_mutex);
return len; return len;
} }
...@@ -248,7 +249,12 @@ static struct kobj_attribute system_name_attr = ...@@ -248,7 +249,12 @@ static struct kobj_attribute system_name_attr =
static ssize_t sysplex_name_show(struct kobject *kobj, static ssize_t sysplex_name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page) struct kobj_attribute *attr, char *page)
{ {
return snprintf(page, PAGE_SIZE, "%s\n", sysplex_name); int rc;
mutex_lock(&sclp_cpi_mutex);
rc = snprintf(page, PAGE_SIZE, "%s\n", sysplex_name);
mutex_unlock(&sclp_cpi_mutex);
return rc;
} }
static ssize_t sysplex_name_store(struct kobject *kobj, static ssize_t sysplex_name_store(struct kobject *kobj,
...@@ -262,7 +268,9 @@ static ssize_t sysplex_name_store(struct kobject *kobj, ...@@ -262,7 +268,9 @@ static ssize_t sysplex_name_store(struct kobject *kobj,
if (rc) if (rc)
return rc; return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(sysplex_name, buf); set_string(sysplex_name, buf);
mutex_unlock(&sclp_cpi_mutex);
return len; return len;
} }
...@@ -273,7 +281,12 @@ static struct kobj_attribute sysplex_name_attr = ...@@ -273,7 +281,12 @@ static struct kobj_attribute sysplex_name_attr =
static ssize_t system_type_show(struct kobject *kobj, static ssize_t system_type_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page) struct kobj_attribute *attr, char *page)
{ {
return snprintf(page, PAGE_SIZE, "%s\n", system_type); int rc;
mutex_lock(&sclp_cpi_mutex);
rc = snprintf(page, PAGE_SIZE, "%s\n", system_type);
mutex_unlock(&sclp_cpi_mutex);
return rc;
} }
static ssize_t system_type_store(struct kobject *kobj, static ssize_t system_type_store(struct kobject *kobj,
...@@ -287,7 +300,9 @@ static ssize_t system_type_store(struct kobject *kobj, ...@@ -287,7 +300,9 @@ static ssize_t system_type_store(struct kobject *kobj,
if (rc) if (rc)
return rc; return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(system_type, buf); set_string(system_type, buf);
mutex_unlock(&sclp_cpi_mutex);
return len; return len;
} }
...@@ -298,8 +313,11 @@ static struct kobj_attribute system_type_attr = ...@@ -298,8 +313,11 @@ static struct kobj_attribute system_type_attr =
static ssize_t system_level_show(struct kobject *kobj, static ssize_t system_level_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page) struct kobj_attribute *attr, char *page)
{ {
unsigned long long level = system_level; unsigned long long level;
mutex_lock(&sclp_cpi_mutex);
level = system_level;
mutex_unlock(&sclp_cpi_mutex);
return snprintf(page, PAGE_SIZE, "%#018llx\n", level); return snprintf(page, PAGE_SIZE, "%#018llx\n", level);
} }
...@@ -320,8 +338,9 @@ static ssize_t system_level_store(struct kobject *kobj, ...@@ -320,8 +338,9 @@ static ssize_t system_level_store(struct kobject *kobj,
if (*endp) if (*endp)
return -EINVAL; return -EINVAL;
mutex_lock(&sclp_cpi_mutex);
system_level = level; system_level = level;
mutex_unlock(&sclp_cpi_mutex);
return len; return len;
} }
...@@ -334,7 +353,9 @@ static ssize_t set_store(struct kobject *kobj, ...@@ -334,7 +353,9 @@ static ssize_t set_store(struct kobject *kobj,
{ {
int rc; int rc;
mutex_lock(&sclp_cpi_mutex);
rc = cpi_req(); rc = cpi_req();
mutex_unlock(&sclp_cpi_mutex);
if (rc) if (rc)
return rc; return rc;
...@@ -373,12 +394,16 @@ int sclp_cpi_set_data(const char *system, const char *sysplex, const char *type, ...@@ -373,12 +394,16 @@ int sclp_cpi_set_data(const char *system, const char *sysplex, const char *type,
if (rc) if (rc)
return rc; return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(system_name, system); set_string(system_name, system);
set_string(sysplex_name, sysplex); set_string(sysplex_name, sysplex);
set_string(system_type, type); set_string(system_type, type);
system_level = level; system_level = level;
return cpi_req(); rc = cpi_req();
mutex_unlock(&sclp_cpi_mutex);
return rc;
} }
EXPORT_SYMBOL(sclp_cpi_set_data); EXPORT_SYMBOL(sclp_cpi_set_data);
......
...@@ -51,13 +51,7 @@ static struct sclp_register sclp_quiesce_event = { ...@@ -51,13 +51,7 @@ static struct sclp_register sclp_quiesce_event = {
static int __init static int __init
sclp_quiesce_init(void) sclp_quiesce_init(void)
{ {
int rc; return sclp_register(&sclp_quiesce_event);
rc = sclp_register(&sclp_quiesce_event);
if (rc)
printk(KERN_WARNING "sclp: could not register quiesce handler "
"(rc=%d)\n", rc);
return rc;
} }
module_init(sclp_quiesce_init); module_init(sclp_quiesce_init);
...@@ -19,8 +19,6 @@ ...@@ -19,8 +19,6 @@
#include "sclp.h" #include "sclp.h"
#include "sclp_rw.h" #include "sclp_rw.h"
#define SCLP_RW_PRINT_HEADER "sclp low level driver: "
/* /*
* The room for the SCCB (only for writing) is not equal to a pages size * The room for the SCCB (only for writing) is not equal to a pages size
* (as it is specified as the maximum size in the SCLP documentation) * (as it is specified as the maximum size in the SCLP documentation)
......
...@@ -239,10 +239,8 @@ int __init sclp_sdias_init(void) ...@@ -239,10 +239,8 @@ int __init sclp_sdias_init(void)
debug_register_view(sdias_dbf, &debug_sprintf_view); debug_register_view(sdias_dbf, &debug_sprintf_view);
debug_set_level(sdias_dbf, 6); debug_set_level(sdias_dbf, 6);
rc = sclp_register(&sclp_sdias_register); rc = sclp_register(&sclp_sdias_register);
if (rc) { if (rc)
ERROR_MSG("sclp register failed\n");
return rc; return rc;
}
init_waitqueue_head(&sdias_wq); init_waitqueue_head(&sdias_wq);
TRACE("init done\n"); TRACE("init done\n");
return 0; return 0;
......
This diff is collapsed.
...@@ -11,61 +11,8 @@ ...@@ -11,61 +11,8 @@
#ifndef __SCLP_TTY_H__ #ifndef __SCLP_TTY_H__
#define __SCLP_TTY_H__ #define __SCLP_TTY_H__
#include <linux/ioctl.h>
#include <linux/termios.h>
#include <linux/tty_driver.h> #include <linux/tty_driver.h>
/* This is the type of data structures storing sclp ioctl setting. */
struct sclp_ioctls {
unsigned short htab;
unsigned char echo;
unsigned short columns;
unsigned char final_nl;
unsigned short max_sccb;
unsigned short kmem_sccb; /* can't be modified at run time */
unsigned char tolower;
unsigned char delim;
};
/* must be unique, FIXME: must be added in Documentation/ioctl_number.txt */
#define SCLP_IOCTL_LETTER 'B'
/* set width of horizontal tabulator */
#define TIOCSCLPSHTAB _IOW(SCLP_IOCTL_LETTER, 0, unsigned short)
/* enable/disable echo of input (independent from line discipline) */
#define TIOCSCLPSECHO _IOW(SCLP_IOCTL_LETTER, 1, unsigned char)
/* set number of colums for output */
#define TIOCSCLPSCOLS _IOW(SCLP_IOCTL_LETTER, 2, unsigned short)
/* enable/disable writing without final new line character */
#define TIOCSCLPSNL _IOW(SCLP_IOCTL_LETTER, 4, signed char)
/* set the maximum buffers size for output, rounded up to next 4kB boundary */
#define TIOCSCLPSOBUF _IOW(SCLP_IOCTL_LETTER, 5, unsigned short)
/* set initial (default) sclp ioctls */
#define TIOCSCLPSINIT _IO(SCLP_IOCTL_LETTER, 6)
/* enable/disable conversion from upper to lower case of input */
#define TIOCSCLPSCASE _IOW(SCLP_IOCTL_LETTER, 7, unsigned char)
/* set special character used for separating upper and lower case, */
/* 0x00 disables this feature */
#define TIOCSCLPSDELIM _IOW(SCLP_IOCTL_LETTER, 9, unsigned char)
/* get width of horizontal tabulator */
#define TIOCSCLPGHTAB _IOR(SCLP_IOCTL_LETTER, 10, unsigned short)
/* Is echo of input enabled ? (independent from line discipline) */
#define TIOCSCLPGECHO _IOR(SCLP_IOCTL_LETTER, 11, unsigned char)
/* get number of colums for output */
#define TIOCSCLPGCOLS _IOR(SCLP_IOCTL_LETTER, 12, unsigned short)
/* Is writing without final new line character enabled ? */
#define TIOCSCLPGNL _IOR(SCLP_IOCTL_LETTER, 14, signed char)
/* get the maximum buffers size for output */
#define TIOCSCLPGOBUF _IOR(SCLP_IOCTL_LETTER, 15, unsigned short)
/* Is conversion from upper to lower case of input enabled ? */
#define TIOCSCLPGCASE _IOR(SCLP_IOCTL_LETTER, 17, unsigned char)
/* get special character used for separating upper and lower case, */
/* 0x00 disables this feature */
#define TIOCSCLPGDELIM _IOR(SCLP_IOCTL_LETTER, 19, unsigned char)
/* get the number of buffers/pages got from kernel at startup */
#define TIOCSCLPGKBUF _IOR(SCLP_IOCTL_LETTER, 20, unsigned short)
extern struct tty_driver *sclp_tty_driver; extern struct tty_driver *sclp_tty_driver;
#endif /* __SCLP_TTY_H__ */ #endif /* __SCLP_TTY_H__ */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -277,7 +277,8 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm, ...@@ -277,7 +277,8 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct urdev *urd; struct urdev *urd;
TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
intparm, irb->scsw.cstat, irb->scsw.dstat, irb->scsw.count); intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
irb->scsw.cmd.count);
if (!intparm) { if (!intparm) {
TRACE("ur_int_handler: unsolicited interrupt\n"); TRACE("ur_int_handler: unsolicited interrupt\n");
...@@ -288,7 +289,7 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm, ...@@ -288,7 +289,7 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
/* On special conditions irb is an error pointer */ /* On special conditions irb is an error pointer */
if (IS_ERR(irb)) if (IS_ERR(irb))
urd->io_request_rc = PTR_ERR(irb); urd->io_request_rc = PTR_ERR(irb);
else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
urd->io_request_rc = 0; urd->io_request_rc = 0;
else else
urd->io_request_rc = -EIO; urd->io_request_rc = -EIO;
......
This diff is collapsed.
This diff is collapsed.
...@@ -2,9 +2,11 @@ ...@@ -2,9 +2,11 @@
# Makefile for the S/390 common i/o drivers # Makefile for the S/390 common i/o drivers
# #
obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \
fcx.o itcw.o
ccw_device-objs += device.o device_fsm.o device_ops.o ccw_device-objs += device.o device_fsm.o device_ops.o
ccw_device-objs += device_id.o device_pgid.o device_status.o ccw_device-objs += device_id.o device_pgid.o device_status.o
obj-y += ccw_device.o cmf.o obj-y += ccw_device.o cmf.o
obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
obj-$(CONFIG_CCWGROUP) += ccwgroup.o obj-$(CONFIG_CCWGROUP) += ccwgroup.o
obj-$(CONFIG_QDIO) += qdio.o obj-$(CONFIG_QDIO) += qdio.o
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment