Commit bd6d323f authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] s390: DCSS block device driver.

From: Martin Schwidefsky <schwidefsky@de.ibm.com>

Add z/VM discontiguos saved segments (DCSS) block device driver.
parent 9d989b85
......@@ -134,6 +134,7 @@ CONFIG_BLK_DEV_INITRD=y
# S/390 block device drivers
#
CONFIG_BLK_DEV_XPRAM=m
# CONFIG_DCSSBLK is not set
CONFIG_DASD=y
# CONFIG_DASD_PROFILE is not set
CONFIG_DASD_ECKD=y
......
......@@ -2,6 +2,6 @@
# Makefile for the linux s390-specific parts of the memory manager.
#
obj-y := init.o fault.o ioremap.o
obj-y := init.o fault.o ioremap.o extmem.o
obj-$(CONFIG_CMM) += cmm.o
/*
* File...........: arch/s390/mm/dcss.c
* Author(s)......: Steven Shultz <shultzss@us.ibm.com>
* Carsten Otte <cotte@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* thanks to Rob M van der Heij
* - he wrote the diag64 function
* (C) IBM Corporation 2002
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/bootmem.h>
#include <asm/page.h>
#include <asm/ebcdic.h>
#include <asm/errno.h>
#include <asm/extmem.h>
#include <asm/cpcmd.h>
#include <linux/ctype.h>
#define DCSS_DEBUG /* Debug messages on/off */
#define DCSS_NAME "extmem"
#ifdef DCSS_DEBUG
#define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSS_NAME " debug:" x)
#else
#define PRINT_DEBUG(x...) do {} while (0)
#endif
#define PRINT_INFO(x...) printk(KERN_INFO DCSS_NAME " info:" x)
#define PRINT_WARN(x...) printk(KERN_WARNING DCSS_NAME " warning:" x)
#define PRINT_ERR(x...) printk(KERN_ERR DCSS_NAME " error:" x)
#define DCSS_LOADSHR 0x00
#define DCSS_LOADNSR 0x04
#define DCSS_PURGESEG 0x08
#define DCSS_FINDSEG 0x0c
#define DCSS_LOADNOLY 0x10
#define DCSS_SEGEXT 0x18
#define DCSS_QACTV 0x0c
struct dcss_segment {
struct list_head list;
char dcss_name[8];
unsigned long start_addr;
unsigned long end;
atomic_t ref_count;
int dcss_attr;
int shared_attr;
};
static spinlock_t dcss_lock = SPIN_LOCK_UNLOCKED;
static struct list_head dcss_list = LIST_HEAD_INIT(dcss_list);
extern struct {unsigned long addr, size, type;} memory_chunk[16];
/*
* Create the 8 bytes, ebcdic VM segment name from
* an ascii name.
*/
static void inline dcss_mkname(char *name, char *dcss_name)
{
int i;
for (i = 0; i <= 8; i++) {
if (name[i] == '\0')
break;
dcss_name[i] = toupper(name[i]);
};
for (; i <= 8; i++)
dcss_name[i] = ' ';
ASCEBC(dcss_name, 8);
}
/*
* Perform a function on a dcss segment.
*/
static inline int
dcss_diag (__u8 func, void *parameter,
unsigned long *ret1, unsigned long *ret2)
{
unsigned long rx, ry;
int rc;
rx = (unsigned long) parameter;
ry = (unsigned long) func;
__asm__ __volatile__(
#ifdef CONFIG_ARCH_S390X
" sam31\n" // switch to 31 bit
" diag %0,%1,0x64\n"
" sam64\n" // switch back to 64 bit
#else
" diag %0,%1,0x64\n"
#endif
" ipm %2\n"
" srl %2,28\n"
: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc" );
*ret1 = rx;
*ret2 = ry;
return rc;
}
/* use to issue "extended" dcss query */
static inline int
dcss_diag_query(char *name, int *rwattr, int *shattr, unsigned long *segstart, unsigned long *segend)
{
int i,j,rc;
unsigned long rx, ry;
typedef struct segentry {
char thisseg[8];
} segentry;
struct qout64 {
int segstart;
int segend;
int segcnt;
int segrcnt;
segentry segout[6];
};
struct qin64 {
char qopcode;
char rsrv1[3];
char qrcode;
char rsrv2[3];
char qname[8];
unsigned int qoutptr;
short int qoutlen;
};
struct qin64 *qinarea;
struct qout64 *qoutarea;
qinarea = (struct qin64*) get_zeroed_page (GFP_DMA);
if (!qinarea) {
rc =-ENOMEM;
goto out;
}
qoutarea = (struct qout64*) get_zeroed_page (GFP_DMA);
if (!qoutarea) {
rc = -ENOMEM;
free_page ((unsigned long) qinarea);
goto out;
}
memset (qinarea,0,PAGE_SIZE);
memset (qoutarea,0,PAGE_SIZE);
qinarea->qopcode = DCSS_QACTV; /* do a query for active
segments */
qinarea->qoutptr = (unsigned long) qoutarea;
qinarea->qoutlen = sizeof(struct qout64);
/* Move segment name into double word aligned
field and pad with blanks to 8 long.
*/
for (i = j = 0 ; i < 8; i++) {
qinarea->qname[i] = (name[j] == '\0') ? ' ' : name[j++];
}
/* name already in EBCDIC */
/* ASCEBC ((void *)&qinarea.qname, 8); */
/* set the assembler variables */
rx = (unsigned long) qinarea;
ry = DCSS_SEGEXT; /* this is extended function */
/* issue diagnose x'64' */
__asm__ __volatile__(
#ifdef CONFIG_ARCH_S390X
" sam31\n" // switch to 31 bit
" diag %0,%1,0x64\n"
" sam64\n" // switch back to 64 bit
#else
" diag %0,%1,0x64\n"
#endif
" ipm %2\n"
" srl %2,28\n"
: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc" );
/* parse the query output area */
*segstart=qoutarea->segstart;
*segend=qoutarea->segend;
if (rc > 1)
{
*rwattr = 2;
*shattr = 2;
rc = 0;
goto free;
}
if (qoutarea->segcnt > 6)
{
*rwattr = 3;
*shattr = 3;
rc = 0;
goto free;
}
*rwattr = 1;
*shattr = 1;
for (i=0; i < qoutarea->segrcnt; i++) {
if (qoutarea->segout[i].thisseg[3] == 2 ||
qoutarea->segout[i].thisseg[3] == 3 ||
qoutarea->segout[i].thisseg[3] == 6 )
*rwattr = 0;
if (qoutarea->segout[i].thisseg[3] == 1 ||
qoutarea->segout[i].thisseg[3] == 3 ||
qoutarea->segout[i].thisseg[3] == 5 )
*shattr = 0;
} /* end of for statement */
rc = 0;
free:
free_page ((unsigned long) qoutarea);
free_page ((unsigned long) qinarea);
out:
return rc;
}
/*
* Load a DCSS segment via the diag 0x64.
*/
int segment_load(char *name, int segtype, unsigned long *addr,
unsigned long *end)
{
char dcss_name[8];
struct list_head *l;
struct dcss_segment *seg, *tmp;
unsigned long dummy;
unsigned long segstart, segend;
int rc = 0,i;
int rwattr, shattr;
if (!MACHINE_IS_VM)
return -ENOSYS;
dcss_mkname(name, dcss_name);
/* search for the dcss in list of currently loaded segments */
spin_lock(&dcss_lock);
seg = NULL;
list_for_each(l, &dcss_list) {
tmp = list_entry(l, struct dcss_segment, list);
if (memcmp(tmp->dcss_name, dcss_name, 8) == 0) {
seg = tmp;
break;
}
}
if (seg == NULL) {
/* find out the attributes of this
shared segment */
dcss_diag_query(dcss_name, &rwattr, &shattr, &segstart, &segend);
/* does segment collide with main memory? */
for (i=0; i<16; i++) {
if (memory_chunk[i].type != 0)
continue;
if (memory_chunk[i].addr > segend)
continue;
if (memory_chunk[i].addr + memory_chunk[i].size <= segstart)
continue;
spin_unlock(&dcss_lock);
return -ENOENT;
}
/* or does it collide with other (loaded) segments? */
list_for_each(l, &dcss_list) {
tmp = list_entry(l, struct dcss_segment, list);
if ((segstart <= tmp->end && segstart >= tmp->start_addr) ||
(segend <= tmp->end && segend >= tmp->start_addr) ||
(segstart <= tmp->start_addr && segend >= tmp->end)) {
PRINT_ERR("Segment Overlap!\n");
spin_unlock(&dcss_lock);
return -ENOENT;
}
}
/* do case statement on segtype */
/* if asking for shared ro,
shared rw works */
/* if asking for exclusive ro,
exclusive rw works */
switch(segtype) {
case SEGMENT_SHARED_RO:
if (shattr > 1 || rwattr > 1) {
spin_unlock(&dcss_lock);
return -ENOENT;
} else {
if (shattr == 0 && rwattr == 0)
rc = SEGMENT_EXCLUSIVE_RO;
if (shattr == 0 && rwattr == 1)
rc = SEGMENT_EXCLUSIVE_RW;
if (shattr == 1 && rwattr == 0)
rc = SEGMENT_SHARED_RO;
if (shattr == 1 && rwattr == 1)
rc = SEGMENT_SHARED_RW;
}
break;
case SEGMENT_SHARED_RW:
if (shattr > 1 || rwattr != 1) {
spin_unlock(&dcss_lock);
return -ENOENT;
} else {
if (shattr == 0)
rc = SEGMENT_EXCLUSIVE_RW;
if (shattr == 1)
rc = SEGMENT_SHARED_RW;
}
break;
case SEGMENT_EXCLUSIVE_RO:
if (shattr > 0 || rwattr > 1) {
spin_unlock(&dcss_lock);
return -ENOENT;
} else {
if (rwattr == 0)
rc = SEGMENT_EXCLUSIVE_RO;
if (rwattr == 1)
rc = SEGMENT_EXCLUSIVE_RW;
}
break;
case SEGMENT_EXCLUSIVE_RW:
/* if (shattr != 0 || rwattr != 1) {
spin_unlock(&dcss_lock);
return -ENOENT;
} else {
*/
rc = SEGMENT_EXCLUSIVE_RW;
// }
break;
default:
spin_unlock(&dcss_lock);
return -ENOENT;
} /* end switch */
seg = kmalloc(sizeof(struct dcss_segment), GFP_DMA);
if (seg != NULL) {
memcpy(seg->dcss_name, dcss_name, 8);
if (rc == SEGMENT_EXCLUSIVE_RW) {
if (dcss_diag(DCSS_LOADNSR, seg->dcss_name,
&seg->start_addr, &seg->end) == 0) {
if (seg->end < max_low_pfn*PAGE_SIZE ) {
atomic_set(&seg->ref_count, 1);
list_add(&seg->list, &dcss_list);
*addr = seg->start_addr;
*end = seg->end;
seg->dcss_attr = rc;
if (shattr == 1 && rwattr == 1)
seg->shared_attr = SEGMENT_SHARED_RW;
else if (shattr == 1 && rwattr == 0)
seg->shared_attr = SEGMENT_SHARED_RO;
else
seg->shared_attr = SEGMENT_EXCLUSIVE_RW;
} else {
dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
kfree (seg);
rc = -ENOENT;
}
} else {
kfree(seg);
rc = -ENOENT;
}
goto out;
}
if (dcss_diag(DCSS_LOADNOLY, seg->dcss_name,
&seg->start_addr, &seg->end) == 0) {
if (seg->end < max_low_pfn*PAGE_SIZE ) {
atomic_set(&seg->ref_count, 1);
list_add(&seg->list, &dcss_list);
*addr = seg->start_addr;
*end = seg->end;
seg->dcss_attr = rc;
seg->shared_attr = rc;
} else {
dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
kfree (seg);
rc = -ENOENT;
}
} else {
kfree(seg);
rc = -ENOENT;
}
} else rc = -ENOMEM;
} else {
/* found */
if ((segtype == SEGMENT_EXCLUSIVE_RW) && (seg->dcss_attr != SEGMENT_EXCLUSIVE_RW)) {
PRINT_ERR("Segment already loaded in other mode than EXCLUSIVE_RW!\n");
rc = -EPERM;
goto out;
/* reload segment in exclusive mode */
/* dcss_diag(DCSS_LOADNSR, seg->dcss_name,
&seg->start_addr, &seg->end);
seg->dcss_attr = SEGMENT_EXCLUSIVE_RW;*/
}
if ((segtype != SEGMENT_EXCLUSIVE_RW) && (seg->dcss_attr == SEGMENT_EXCLUSIVE_RW)) {
PRINT_ERR("Segment already loaded in EXCLUSIVE_RW mode!\n");
rc = -EPERM;
goto out;
}
atomic_inc(&seg->ref_count);
*addr = seg->start_addr;
*end = seg->end;
rc = seg->dcss_attr;
}
out:
spin_unlock(&dcss_lock);
return rc;
}
/*
* Decrease the use count of a DCSS segment and remove
* it from the address space if nobody is using it
* any longer.
*/
void segment_unload(char *name)
{
char dcss_name[8];
unsigned long dummy;
struct list_head *l,*l_tmp;
struct dcss_segment *seg;
if (!MACHINE_IS_VM)
return;
dcss_mkname(name, dcss_name);
spin_lock(&dcss_lock);
list_for_each_safe(l, l_tmp, &dcss_list) {
seg = list_entry(l, struct dcss_segment, list);
if (memcmp(seg->dcss_name, dcss_name, 8) == 0) {
if (atomic_dec_return(&seg->ref_count) == 0) {
/* Last user of the segment is
gone. */
list_del(&seg->list);
dcss_diag(DCSS_PURGESEG, seg->dcss_name,
&dummy, &dummy);
kfree(seg);
}
break;
}
}
spin_unlock(&dcss_lock);
}
/*
* Replace an existing DCSS segment, so that machines
* that load it anew will see the new version.
*/
void segment_replace(char *name)
{
char dcss_name[8];
struct list_head *l;
struct dcss_segment *seg;
int mybeg = 0;
int myend = 0;
char mybuff1[80];
char mybuff2[80];
if (!MACHINE_IS_VM)
return;
dcss_mkname(name, dcss_name);
memset (mybuff1, 0, sizeof(mybuff1));
memset (mybuff2, 0, sizeof(mybuff2));
spin_lock(&dcss_lock);
list_for_each(l, &dcss_list) {
seg = list_entry(l, struct dcss_segment, list);
if (memcmp(seg->dcss_name, dcss_name, 8) == 0) {
mybeg = seg->start_addr >> 12;
myend = (seg->end) >> 12;
if (seg->shared_attr == SEGMENT_EXCLUSIVE_RW)
sprintf(mybuff1, "DEFSEG %s %X-%X EW",
name, mybeg, myend);
if (seg->shared_attr == SEGMENT_EXCLUSIVE_RO)
sprintf(mybuff1, "DEFSEG %s %X-%X RO",
name, mybeg, myend);
if (seg->shared_attr == SEGMENT_SHARED_RW)
sprintf(mybuff1, "DEFSEG %s %X-%X SW",
name, mybeg, myend);
if (seg->shared_attr == SEGMENT_SHARED_RO)
sprintf(mybuff1, "DEFSEG %s %X-%X SR",
name, mybeg, myend);
spin_unlock(&dcss_lock);
sprintf(mybuff2, "SAVESEG %s", name);
cpcmd(mybuff1, NULL, 80);
cpcmd(mybuff2, NULL, 80);
break;
}
}
if (myend == 0) spin_unlock(&dcss_lock);
}
EXPORT_SYMBOL(segment_load);
EXPORT_SYMBOL(segment_unload);
EXPORT_SYMBOL(segment_replace);
......@@ -11,6 +11,11 @@ config BLK_DEV_XPRAM
This option is also available as a module which will be called
xpram. If unsure, say "N".
config DCSSBLK
tristate "DCSSBLK support"
help
Support for dcss block device
config DASD
tristate "Support for DASD devices"
depends on CCW
......
......@@ -14,3 +14,4 @@ obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o
obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o
obj-$(CONFIG_DASD_CMB) += dasd_cmb.o
obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
obj-$(CONFIG_DCSSBLK) += dcssblk.o
/*
* dcssblk.c -- the S/390 block driver for dcss memory
*
* Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer
*/
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <asm/extmem.h>
#include <asm/io.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <asm/ccwdev.h> // for s390_root_dev_(un)register()
//#define DCSSBLK_DEBUG /* Debug messages on/off */
#define DCSSBLK_NAME "dcssblk"
#define DCSSBLK_MINORS_PER_DISK 1
#ifdef DCSSBLK_DEBUG
#define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSSBLK_NAME " debug: " x)
#else
#define PRINT_DEBUG(x...) do {} while (0)
#endif
#define PRINT_INFO(x...) printk(KERN_INFO DCSSBLK_NAME " info: " x)
#define PRINT_WARN(x...) printk(KERN_WARNING DCSSBLK_NAME " warning: " x)
#define PRINT_ERR(x...) printk(KERN_ERR DCSSBLK_NAME " error: " x)
static int dcssblk_open(struct inode *inode, struct file *filp);
static int dcssblk_release(struct inode *inode, struct file *filp);
static int dcssblk_make_request(struct request_queue *q, struct bio *bio);
static int dcssblk_major;
static struct block_device_operations dcssblk_devops = {
.owner = THIS_MODULE,
.open = dcssblk_open,
.release = dcssblk_release,
};
static ssize_t dcssblk_add_store(struct device * dev, const char * buf,
size_t count);
static ssize_t dcssblk_remove_store(struct device * dev, const char * buf,
size_t count);
static ssize_t dcssblk_save_store(struct device * dev, const char * buf,
size_t count);
static ssize_t dcssblk_save_show(struct device *dev, char *buf);
static ssize_t dcssblk_shared_store(struct device * dev, const char * buf,
size_t count);
static ssize_t dcssblk_shared_show(struct device *dev, char *buf);
static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store);
static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store);
static DEVICE_ATTR(save, S_IWUSR | S_IRUGO, dcssblk_save_show,
dcssblk_save_store);
static DEVICE_ATTR(shared, S_IWUSR | S_IRUGO, dcssblk_shared_show,
dcssblk_shared_store);
static struct device *dcssblk_root_dev;
struct dcssblk_dev_info {
struct list_head lh;
struct device dev;
char segment_name[BUS_ID_SIZE];
atomic_t use_count;
struct gendisk *gd;
unsigned long start;
unsigned long end;
int segment_type;
unsigned char save_pending;
unsigned char is_shared;
struct request_queue *dcssblk_queue;
};
static struct list_head dcssblk_devices = LIST_HEAD_INIT(dcssblk_devices);
static rwlock_t dcssblk_devices_lock = RW_LOCK_UNLOCKED;
/*
* release function for segment device.
*/
static void
dcssblk_release_segment(struct device *dev)
{
PRINT_DEBUG("segment release fn called for %s\n", dev->bus_id);
kfree(container_of(dev, struct dcssblk_dev_info, dev));
module_put(THIS_MODULE);
}
/*
* get a minor number. needs to be called with
* write_lock(&dcssblk_devices_lock) and the
* device needs to be enqueued before the lock is
* freed.
*/
static inline int
dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
{
int minor, found;
struct dcssblk_dev_info *entry;
if (dev_info == NULL)
return -EINVAL;
for (minor = 0; minor < (1<<MINORBITS); minor++) {
found = 0;
// test if minor available
list_for_each_entry(entry, &dcssblk_devices, lh)
if (minor == entry->gd->first_minor)
found++;
if (!found) break; // got unused minor
}
if (found)
return -EBUSY;
dev_info->gd->first_minor = minor;
return 0;
}
/*
* get the struct dcssblk_dev_info from dcssblk_devices
* for the given name.
* read_lock(&dcssblk_devices_lock) must be held.
*/
static struct dcssblk_dev_info *
dcssblk_get_device_by_name(char *name)
{
struct dcssblk_dev_info *entry;
list_for_each_entry(entry, &dcssblk_devices, lh) {
if (!strcmp(name, entry->segment_name)) {
return entry;
}
}
return NULL;
}
/*
* register the device that represents a segment in sysfs,
* also add the attributes for the device
*/
static inline int
dcssblk_register_segment_device(struct device *dev)
{
int rc;
rc = device_register(dev);
if (rc)
return rc;
rc = device_create_file(dev, &dev_attr_shared);
if (rc)
goto unregister_dev;
rc = device_create_file(dev, &dev_attr_save);
if (rc)
goto unregister_dev;
return rc;
unregister_dev:
device_unregister(dev);
return rc;
}
/*
* device attribute for switching shared/nonshared (exclusive)
* operation (show + store)
*/
static ssize_t
dcssblk_shared_show(struct device *dev, char *buf)
{
struct dcssblk_dev_info *dev_info;
dev_info = container_of(dev, struct dcssblk_dev_info, dev);
return sprintf(buf, dev_info->is_shared ? "1\n" : "0\n");
}
static ssize_t
dcssblk_shared_store(struct device *dev, const char *inbuf, size_t count)
{
struct dcssblk_dev_info *dev_info;
int rc;
if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) {
PRINT_WARN("Invalid value, must be 0 or 1\n");
return -EINVAL;
}
write_lock(&dcssblk_devices_lock);
dev_info = container_of(dev, struct dcssblk_dev_info, dev);
if (atomic_read(&dev_info->use_count)) {
PRINT_ERR("share: segment %s is busy!\n",
dev_info->segment_name);
write_unlock(&dcssblk_devices_lock);
return -EBUSY;
}
if ((inbuf[0] == '1') && (dev_info->is_shared == 1)) {
PRINT_WARN("Segment %s already loaded in shared mode!\n",
dev_info->segment_name);
write_unlock(&dcssblk_devices_lock);
return count;
}
if ((inbuf[0] == '0') && (dev_info->is_shared == 0)) {
PRINT_WARN("Segment %s already loaded in exclusive mode!\n",
dev_info->segment_name);
write_unlock(&dcssblk_devices_lock);
return count;
}
if (inbuf[0] == '1') {
// reload segment in shared mode
segment_unload(dev_info->segment_name);
rc = segment_load(dev_info->segment_name, SEGMENT_SHARED_RO,
&dev_info->start, &dev_info->end);
if (rc < 0) {
PRINT_ERR("Segment %s not reloaded, rc=%d\n",
dev_info->segment_name, rc);
goto removeseg;
}
dev_info->is_shared = 1;
PRINT_INFO("Segment %s reloaded, shared mode.\n",
dev_info->segment_name);
} else if (inbuf[0] == '0') {
// reload segment in exclusive mode
segment_unload(dev_info->segment_name);
rc = segment_load(dev_info->segment_name, SEGMENT_EXCLUSIVE_RW,
&dev_info->start, &dev_info->end);
if (rc < 0) {
PRINT_ERR("Segment %s not reloaded, rc=%d\n",
dev_info->segment_name, rc);
goto removeseg;
}
dev_info->is_shared = 0;
PRINT_INFO("Segment %s reloaded, exclusive (read-write) mode.\n",
dev_info->segment_name);
} else {
write_unlock(&dcssblk_devices_lock);
PRINT_WARN("Invalid value, must be 0 or 1\n");
return -EINVAL;
}
dev_info->segment_type = rc;
rc = count;
switch (dev_info->segment_type) {
case SEGMENT_SHARED_RO:
case SEGMENT_EXCLUSIVE_RO:
set_disk_ro(dev_info->gd, 1);
break;
case SEGMENT_SHARED_RW:
case SEGMENT_EXCLUSIVE_RW:
set_disk_ro(dev_info->gd, 0);
break;
}
if ((inbuf[0] == '1') &&
((dev_info->segment_type == SEGMENT_EXCLUSIVE_RO) ||
(dev_info->segment_type == SEGMENT_EXCLUSIVE_RW))) {
PRINT_WARN("Could not get shared copy of segment %s\n",
dev_info->segment_name);
rc = -EPERM;
}
if ((inbuf[0] == '0') &&
((dev_info->segment_type == SEGMENT_SHARED_RO) ||
(dev_info->segment_type == SEGMENT_SHARED_RW))) {
PRINT_WARN("Could not get exclusive copy of segment %s\n",
dev_info->segment_name);
rc = -EPERM;
}
write_unlock(&dcssblk_devices_lock);
goto out;
removeseg:
PRINT_ERR("Could not reload segment %s, removing it now!\n",
dev_info->segment_name);
list_del(&dev_info->lh);
write_unlock(&dcssblk_devices_lock);
del_gendisk(dev_info->gd);
blk_put_queue(dev_info->dcssblk_queue);
dev_info->gd->queue = NULL;
put_disk(dev_info->gd);
device_unregister(dev);
put_device(dev);
out:
return rc;
}
/*
* device attribute for save operation on current copy
* of the segment. If the segment is busy, saving will
* become pending until it gets released, which can be
* undone by storing a non-true value to this entry.
* (show + store)
*/
static ssize_t
dcssblk_save_show(struct device *dev, char *buf)
{
struct dcssblk_dev_info *dev_info;
dev_info = container_of(dev, struct dcssblk_dev_info, dev);
return sprintf(buf, dev_info->save_pending ? "1\n" : "0\n");
}
static ssize_t
dcssblk_save_store(struct device *dev, const char *inbuf, size_t count)
{
struct dcssblk_dev_info *dev_info;
if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) {
PRINT_WARN("Invalid value, must be 0 or 1\n");
return -EINVAL;
}
dev_info = container_of(dev, struct dcssblk_dev_info, dev);
write_lock(&dcssblk_devices_lock);
if (inbuf[0] == '1') {
if (atomic_read(&dev_info->use_count) == 0) {
// device is idle => we save immediately
PRINT_INFO("Saving segment %s\n",
dev_info->segment_name);
segment_replace(dev_info->segment_name);
} else {
// device is busy => we save it when it becomes
// idle in dcssblk_release
PRINT_INFO("Segment %s is currently busy, it will "
"be saved when it becomes idle...\n",
dev_info->segment_name);
dev_info->save_pending = 1;
}
} else if (inbuf[0] == '0') {
if (dev_info->save_pending) {
// device is busy & the user wants to undo his save
// request
dev_info->save_pending = 0;
PRINT_INFO("Pending save for segment %s deactivated\n",
dev_info->segment_name);
}
} else {
write_unlock(&dcssblk_devices_lock);
PRINT_WARN("Invalid value, must be 0 or 1\n");
return -EINVAL;
}
write_unlock(&dcssblk_devices_lock);
return count;
}
/*
* device attribute for adding devices
*/
static ssize_t
dcssblk_add_store(struct device *dev, const char *buf, size_t count)
{
int rc, i;
struct dcssblk_dev_info *dev_info;
char *local_buf;
unsigned long seg_byte_size;
dev_info = NULL;
if (dev != dcssblk_root_dev) {
rc = -EINVAL;
goto out_nobuf;
}
local_buf = kmalloc(count + 1, GFP_KERNEL);
if (local_buf == NULL) {
rc = -ENOMEM;
goto out_nobuf;
}
/*
* parse input
*/
for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) {
local_buf[i] = toupper(buf[i]);
}
local_buf[i] = '\0';
if ((i == 0) || (i > 8)) {
rc = -ENAMETOOLONG;
goto out;
}
/*
* already loaded?
*/
read_lock(&dcssblk_devices_lock);
dev_info = dcssblk_get_device_by_name(local_buf);
read_unlock(&dcssblk_devices_lock);
if (dev_info != NULL) {
PRINT_WARN("Segment %s already loaded!\n", local_buf);
rc = -EEXIST;
goto out;
}
/*
* get a struct dcssblk_dev_info
*/
dev_info = kmalloc(sizeof(struct dcssblk_dev_info), GFP_KERNEL);
if (dev_info == NULL) {
rc = -ENOMEM;
goto out;
}
memset(dev_info, 0, sizeof(struct dcssblk_dev_info));
strcpy(dev_info->segment_name, local_buf);
strlcpy(dev_info->dev.bus_id, local_buf, BUS_ID_SIZE);
dev_info->dev.release = dcssblk_release_segment;
INIT_LIST_HEAD(&dev_info->lh);
dev_info->gd = alloc_disk(DCSSBLK_MINORS_PER_DISK);
if (dev_info->gd == NULL) {
rc = -ENOMEM;
goto free_dev_info;
}
dev_info->gd->major = dcssblk_major;
dev_info->gd->fops = &dcssblk_devops;
dev_info->dcssblk_queue = blk_alloc_queue(GFP_KERNEL);
dev_info->gd->queue = dev_info->dcssblk_queue;
dev_info->gd->private_data = dev_info;
dev_info->gd->driverfs_dev = &dev_info->dev;
/*
* load the segment
*/
rc = segment_load(local_buf, SEGMENT_SHARED_RO,
&dev_info->start, &dev_info->end);
if (rc < 0) {
PRINT_ERR("Segment %s not loaded, rc=%d\n", local_buf, rc);
goto dealloc_gendisk;
}
seg_byte_size = (dev_info->end - dev_info->start + 1);
set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
PRINT_INFO("Loaded segment %s from %p to %p, size = %lu Byte, "
"capacity = %lu sectors (512 Byte)\n", local_buf,
(void *) dev_info->start, (void *) dev_info->end,
seg_byte_size, seg_byte_size >> 9);
dev_info->segment_type = rc;
dev_info->save_pending = 0;
dev_info->is_shared = 1;
dev_info->dev.parent = dcssblk_root_dev;
/*
* get minor, add to list
*/
write_lock(&dcssblk_devices_lock);
rc = dcssblk_assign_free_minor(dev_info);
if (rc) {
write_unlock(&dcssblk_devices_lock);
PRINT_ERR("No free minor number available! "
"Unloading segment...\n");
goto unload_seg;
}
sprintf(dev_info->gd->disk_name, "dcssblk%d",
dev_info->gd->first_minor);
list_add_tail(&dev_info->lh, &dcssblk_devices);
/*
* register the device
*/
rc = dcssblk_register_segment_device(&dev_info->dev);
if (rc) {
PRINT_ERR("Segment %s could not be registered RC=%d\n",
local_buf, rc);
goto list_del;
}
if (!try_module_get(THIS_MODULE)) {
rc = -ENODEV;
goto list_del;
}
get_device(&dev_info->dev);
add_disk(dev_info->gd);
blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096);
switch (dev_info->segment_type) {
case SEGMENT_SHARED_RO:
case SEGMENT_EXCLUSIVE_RO:
set_disk_ro(dev_info->gd,1);
break;
case SEGMENT_SHARED_RW:
case SEGMENT_EXCLUSIVE_RW:
set_disk_ro(dev_info->gd,0);
break;
}
PRINT_DEBUG("Segment %s loaded successfully\n", local_buf);
write_unlock(&dcssblk_devices_lock);
rc = count;
goto out;
list_del:
list_del(&dev_info->lh);
write_unlock(&dcssblk_devices_lock);
unload_seg:
segment_unload(local_buf);
dealloc_gendisk:
blk_put_queue(dev_info->dcssblk_queue);
dev_info->gd->queue = NULL;
put_disk(dev_info->gd);
free_dev_info:
kfree(dev_info);
out:
kfree(local_buf);
out_nobuf:
return rc;
}
/*
* device attribute for removing devices
*/
static ssize_t
dcssblk_remove_store(struct device *dev, const char *buf, size_t count)
{
struct dcssblk_dev_info *dev_info;
int rc, i;
char *local_buf;
if (dev != dcssblk_root_dev) {
return -EINVAL;
}
local_buf = kmalloc(count + 1, GFP_KERNEL);
if (local_buf == NULL) {
return -ENOMEM;
}
/*
* parse input
*/
for (i = 0; ((*(buf+i)!='\0') && (*(buf+i)!='\n') && i < count); i++) {
local_buf[i] = toupper(buf[i]);
}
local_buf[i] = '\0';
if ((i == 0) || (i > 8)) {
rc = -ENAMETOOLONG;
goto out_buf;
}
write_lock(&dcssblk_devices_lock);
dev_info = dcssblk_get_device_by_name(local_buf);
if (dev_info == NULL) {
write_unlock(&dcssblk_devices_lock);
PRINT_WARN("Segment %s is not loaded!\n", local_buf);
rc = -ENODEV;
goto out_buf;
}
if (atomic_read(&dev_info->use_count) != 0) {
write_unlock(&dcssblk_devices_lock);
PRINT_WARN("Segment %s is in use!\n", local_buf);
rc = -EBUSY;
goto out_buf;
}
list_del(&dev_info->lh);
write_unlock(&dcssblk_devices_lock);
del_gendisk(dev_info->gd);
blk_put_queue(dev_info->dcssblk_queue);
dev_info->gd->queue = NULL;
put_disk(dev_info->gd);
device_unregister(&dev_info->dev);
put_device(&dev_info->dev);
segment_unload(dev_info->segment_name);
PRINT_DEBUG("Segment %s unloaded successfully\n",
dev_info->segment_name);
rc = count;
out_buf:
kfree(local_buf);
return rc;
}
static int
dcssblk_open(struct inode *inode, struct file *filp)
{
struct dcssblk_dev_info *dev_info;
int rc;
dev_info = inode->i_bdev->bd_disk->private_data;
if (NULL == dev_info) {
rc = -ENODEV;
goto out;
}
atomic_inc(&dev_info->use_count);
inode->i_bdev->bd_block_size = 4096;
rc = 0;
out:
return rc;
}
static int
dcssblk_release(struct inode *inode, struct file *filp)
{
struct dcssblk_dev_info *dev_info;
int rc;
dev_info = inode->i_bdev->bd_disk->private_data;
if (NULL == dev_info) {
rc = -ENODEV;
goto out;
}
write_lock(&dcssblk_devices_lock);
if (atomic_dec_and_test(&dev_info->use_count)
&& (dev_info->save_pending)) {
PRINT_INFO("Segment %s became idle and is being saved now\n",
dev_info->segment_name);
segment_replace(dev_info->segment_name);
dev_info->save_pending = 0;
}
write_unlock(&dcssblk_devices_lock);
rc = 0;
out:
return rc;
}
static int
dcssblk_make_request(request_queue_t *q, struct bio *bio)
{
struct dcssblk_dev_info *dev_info;
struct bio_vec *bvec;
unsigned long index;
unsigned long page_addr;
unsigned long source_addr;
unsigned long bytes_done;
int i;
bytes_done = 0;
dev_info = bio->bi_bdev->bd_disk->private_data;
if (dev_info == NULL)
goto fail;
if ((bio->bi_sector & 3) != 0 || (bio->bi_size & 4095) != 0)
/* Request is not page-aligned. */
goto fail;
if (((bio->bi_size >> 9) + bio->bi_sector)
> get_capacity(bio->bi_bdev->bd_disk)) {
/* Request beyond end of DCSS segment. */
goto fail;
}
index = (bio->bi_sector >> 3);
bio_for_each_segment(bvec, bio, i) {
page_addr = (unsigned long)
page_address(bvec->bv_page) + bvec->bv_offset;
source_addr = dev_info->start + (index<<12) + bytes_done;
if (unlikely(page_addr & 4095) != 0 || (bvec->bv_len & 4095) != 0)
// More paranoia.
goto fail;
if (bio_data_dir(bio) == READ) {
memcpy((void*)page_addr, (void*)source_addr,
bvec->bv_len);
} else {
memcpy((void*)source_addr, (void*)page_addr,
bvec->bv_len);
}
bytes_done += bvec->bv_len;
}
bio_endio(bio, bytes_done, 0);
return 0;
fail:
bio_io_error(bio, bytes_done);
return 0;
}
/*
* The init/exit functions.
*/
static void __exit
dcssblk_exit(void)
{
int rc;
PRINT_DEBUG("DCSSBLOCK EXIT...\n");
s390_root_dev_unregister(dcssblk_root_dev);
rc = unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
if (rc) {
PRINT_ERR("unregister_blkdev() failed!\n");
}
PRINT_DEBUG("...finished!\n");
}
static int __init
dcssblk_init(void)
{
int rc;
PRINT_DEBUG("DCSSBLOCK INIT...\n");
dcssblk_root_dev = s390_root_dev_register("dcssblk");
if (IS_ERR(dcssblk_root_dev)) {
PRINT_ERR("device_register() failed!\n");
return PTR_ERR(dcssblk_root_dev);
}
rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
if (rc) {
PRINT_ERR("device_create_file(add) failed!\n");
s390_root_dev_unregister(dcssblk_root_dev);
return rc;
}
rc = device_create_file(dcssblk_root_dev, &dev_attr_remove);
if (rc) {
PRINT_ERR("device_create_file(remove) failed!\n");
s390_root_dev_unregister(dcssblk_root_dev);
return rc;
}
rc = register_blkdev(0, DCSSBLK_NAME);
if (rc < 0) {
PRINT_ERR("Can't get dynamic major!\n");
s390_root_dev_unregister(dcssblk_root_dev);
return rc;
}
dcssblk_major = rc;
PRINT_DEBUG("...finished!\n");
return 0;
}
module_init(dcssblk_init);
module_exit(dcssblk_exit);
MODULE_LICENSE("GPL");
/*
* include/asm-s390x/extmem.h
*
* definitions for external memory segment support
* Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
*/
#ifndef _ASM_S390X_DCSS_H
#define _ASM_S390X_DCSS_H
#ifndef __ASSEMBLY__
#define SEGMENT_SHARED_RW 0
#define SEGMENT_SHARED_RO 1
#define SEGMENT_EXCLUSIVE_RW 2
#define SEGMENT_EXCLUSIVE_RO 3
extern int segment_load (char *name,int segtype,unsigned long *addr,unsigned long *length);
extern void segment_unload(char *name);
extern void segment_replace(char *name);
#endif
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment