Commit c9346151 authored by Stefan Haberland's avatar Stefan Haberland Committed by Martin Schwidefsky

s390/dasd: extend dasd path handling

Store flags and path_data per channel path.
Implement get/set functions for various path masks.
The patch does not add functional changes.
Signed-off-by: default avatarStefan Haberland <sth@linux.vnet.ibm.com>
Reviewed-by: default avatarSebastian Ott <sebott@linux.vnet.ibm.com>
Reviewed-by: default avatarJan Hoeppner <hoeppner@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 7df11604
...@@ -1448,9 +1448,9 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) ...@@ -1448,9 +1448,9 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
cqr->starttime = jiffies; cqr->starttime = jiffies;
cqr->retries--; cqr->retries--;
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
cqr->lpm &= device->path_data.opm; cqr->lpm &= dasd_path_get_opm(device);
if (!cqr->lpm) if (!cqr->lpm)
cqr->lpm = device->path_data.opm; cqr->lpm = dasd_path_get_opm(device);
} }
if (cqr->cpmode == 1) { if (cqr->cpmode == 1) {
rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
...@@ -1483,8 +1483,8 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) ...@@ -1483,8 +1483,8 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
DBF_DEV_EVENT(DBF_WARNING, device, DBF_DEV_EVENT(DBF_WARNING, device,
"start_IO: selected paths gone (%x)", "start_IO: selected paths gone (%x)",
cqr->lpm); cqr->lpm);
} else if (cqr->lpm != device->path_data.opm) { } else if (cqr->lpm != dasd_path_get_opm(device)) {
cqr->lpm = device->path_data.opm; cqr->lpm = dasd_path_get_opm(device);
DBF_DEV_EVENT(DBF_DEBUG, device, "%s", DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
"start_IO: selected paths gone," "start_IO: selected paths gone,"
" retry on all paths"); " retry on all paths");
...@@ -1493,11 +1493,10 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) ...@@ -1493,11 +1493,10 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
"start_IO: all paths in opm gone," "start_IO: all paths in opm gone,"
" do path verification"); " do path verification");
dasd_generic_last_path_gone(device); dasd_generic_last_path_gone(device);
device->path_data.opm = 0; dasd_path_no_path(device);
device->path_data.ppm = 0; dasd_path_set_tbvpm(device,
device->path_data.npm = 0; ccw_device_get_path_mask(
device->path_data.tbvpm = device->cdev));
ccw_device_get_path_mask(device->cdev);
} }
break; break;
case -ENODEV: case -ENODEV:
...@@ -1642,7 +1641,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, ...@@ -1642,7 +1641,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
switch (PTR_ERR(irb)) { switch (PTR_ERR(irb)) {
case -EIO: case -EIO:
if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
device = (struct dasd_device *) cqr->startdev; device = cqr->startdev;
cqr->status = DASD_CQR_CLEARED; cqr->status = DASD_CQR_CLEARED;
dasd_device_clear_timer(device); dasd_device_clear_timer(device);
wake_up(&dasd_flush_wq); wake_up(&dasd_flush_wq);
...@@ -1755,13 +1754,13 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, ...@@ -1755,13 +1754,13 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
*/ */
if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
cqr->retries > 0) { cqr->retries > 0) {
if (cqr->lpm == device->path_data.opm) if (cqr->lpm == dasd_path_get_opm(device))
DBF_DEV_EVENT(DBF_DEBUG, device, DBF_DEV_EVENT(DBF_DEBUG, device,
"default ERP in fastpath " "default ERP in fastpath "
"(%i retries left)", "(%i retries left)",
cqr->retries); cqr->retries);
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
cqr->lpm = device->path_data.opm; cqr->lpm = dasd_path_get_opm(device);
cqr->status = DASD_CQR_QUEUED; cqr->status = DASD_CQR_QUEUED;
next = cqr; next = cqr;
} else } else
...@@ -2002,17 +2001,18 @@ static void __dasd_device_check_path_events(struct dasd_device *device) ...@@ -2002,17 +2001,18 @@ static void __dasd_device_check_path_events(struct dasd_device *device)
{ {
int rc; int rc;
if (device->path_data.tbvpm) { if (!dasd_path_get_tbvpm(device))
if (device->stopped & ~(DASD_STOPPED_DC_WAIT | return;
DASD_UNRESUMED_PM))
return; if (device->stopped &
rc = device->discipline->verify_path( ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
device, device->path_data.tbvpm); return;
if (rc) rc = device->discipline->verify_path(device,
dasd_device_set_timer(device, 50); dasd_path_get_tbvpm(device));
else if (rc)
device->path_data.tbvpm = 0; dasd_device_set_timer(device, 50);
} else
dasd_path_clear_all_verify(device);
}; };
/* /*
...@@ -3684,14 +3684,12 @@ int dasd_generic_notify(struct ccw_device *cdev, int event) ...@@ -3684,14 +3684,12 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
case CIO_GONE: case CIO_GONE:
case CIO_BOXED: case CIO_BOXED:
case CIO_NO_PATH: case CIO_NO_PATH:
device->path_data.opm = 0; dasd_path_no_path(device);
device->path_data.ppm = 0;
device->path_data.npm = 0;
ret = dasd_generic_last_path_gone(device); ret = dasd_generic_last_path_gone(device);
break; break;
case CIO_OPER: case CIO_OPER:
ret = 1; ret = 1;
if (device->path_data.opm) if (dasd_path_get_opm(device))
ret = dasd_generic_path_operational(device); ret = dasd_generic_path_operational(device);
break; break;
} }
...@@ -3702,48 +3700,32 @@ EXPORT_SYMBOL_GPL(dasd_generic_notify); ...@@ -3702,48 +3700,32 @@ EXPORT_SYMBOL_GPL(dasd_generic_notify);
void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
{ {
int chp;
__u8 oldopm, eventlpm;
struct dasd_device *device; struct dasd_device *device;
int chp, oldopm;
device = dasd_device_from_cdev_locked(cdev); device = dasd_device_from_cdev_locked(cdev);
if (IS_ERR(device)) if (IS_ERR(device))
return; return;
oldopm = dasd_path_get_opm(device);
for (chp = 0; chp < 8; chp++) { for (chp = 0; chp < 8; chp++) {
eventlpm = 0x80 >> chp;
if (path_event[chp] & PE_PATH_GONE) { if (path_event[chp] & PE_PATH_GONE) {
oldopm = device->path_data.opm; dasd_path_notoper(device, chp);
device->path_data.opm &= ~eventlpm;
device->path_data.ppm &= ~eventlpm;
device->path_data.npm &= ~eventlpm;
if (oldopm && !device->path_data.opm) {
dev_warn(&device->cdev->dev,
"No verified channel paths remain "
"for the device\n");
DBF_DEV_EVENT(DBF_WARNING, device,
"%s", "last verified path gone");
dasd_eer_write(device, NULL, DASD_EER_NOPATH);
dasd_device_set_stop_bits(device,
DASD_STOPPED_DC_WAIT);
}
} }
if (path_event[chp] & PE_PATH_AVAILABLE) { if (path_event[chp] & PE_PATH_AVAILABLE) {
device->path_data.opm &= ~eventlpm; dasd_path_available(device, chp);
device->path_data.ppm &= ~eventlpm;
device->path_data.npm &= ~eventlpm;
device->path_data.tbvpm |= eventlpm;
dasd_schedule_device_bh(device); dasd_schedule_device_bh(device);
} }
if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
if (!(device->path_data.opm & eventlpm) && if (!dasd_path_is_operational(device, chp) &&
!(device->path_data.tbvpm & eventlpm)) { !dasd_path_need_verify(device, chp)) {
/* /*
* we can not establish a pathgroup on an * we can not establish a pathgroup on an
* unavailable path, so trigger a path * unavailable path, so trigger a path
* verification first * verification first
*/ */
device->path_data.tbvpm |= eventlpm; dasd_path_available(device, chp);
dasd_schedule_device_bh(device); dasd_schedule_device_bh(device);
} }
DBF_DEV_EVENT(DBF_WARNING, device, "%s", DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Pathgroup re-established\n"); "Pathgroup re-established\n");
...@@ -3751,17 +3733,26 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) ...@@ -3751,17 +3733,26 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
device->discipline->kick_validate(device); device->discipline->kick_validate(device);
} }
} }
if (oldopm && !dasd_path_get_opm(device)) {
dev_warn(&device->cdev->dev,
"No verified channel paths remain for the device\n");
DBF_DEV_EVENT(DBF_WARNING, device,
"%s", "last verified path gone");
dasd_eer_write(device, NULL, DASD_EER_NOPATH);
dasd_device_set_stop_bits(device,
DASD_STOPPED_DC_WAIT);
}
dasd_put_device(device); dasd_put_device(device);
} }
EXPORT_SYMBOL_GPL(dasd_generic_path_event); EXPORT_SYMBOL_GPL(dasd_generic_path_event);
int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
{ {
if (!device->path_data.opm && lpm) { if (!dasd_path_get_opm(device) && lpm) {
device->path_data.opm = lpm; dasd_path_set_opm(device, lpm);
dasd_generic_path_operational(device); dasd_generic_path_operational(device);
} else } else
device->path_data.opm |= lpm; dasd_path_add_opm(device, lpm);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(dasd_generic_verify_path); EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
......
...@@ -152,7 +152,7 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp) ...@@ -152,7 +152,7 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
opm = ccw_device_get_path_mask(device->cdev); opm = ccw_device_get_path_mask(device->cdev);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
if (erp->lpm == 0) if (erp->lpm == 0)
erp->lpm = device->path_data.opm & erp->lpm = dasd_path_get_opm(device) &
~(erp->irb.esw.esw0.sublog.lpum); ~(erp->irb.esw.esw0.sublog.lpum);
else else
erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum); erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum);
...@@ -273,7 +273,7 @@ static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp) ...@@ -273,7 +273,7 @@ static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp)
!test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) { !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
erp->status = DASD_CQR_FILLED; erp->status = DASD_CQR_FILLED;
erp->retries = 10; erp->retries = 10;
erp->lpm = erp->startdev->path_data.opm; erp->lpm = dasd_path_get_opm(erp->startdev);
erp->function = dasd_3990_erp_action_1_sec; erp->function = dasd_3990_erp_action_1_sec;
} }
return erp; return erp;
...@@ -1926,7 +1926,7 @@ dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense) ...@@ -1926,7 +1926,7 @@ dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
!test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) { !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
/* reset the lpm and the status to be able to /* reset the lpm and the status to be able to
* try further actions. */ * try further actions. */
erp->lpm = erp->startdev->path_data.opm; erp->lpm = dasd_path_get_opm(erp->startdev);
erp->status = DASD_CQR_NEED_ERP; erp->status = DASD_CQR_NEED_ERP;
} }
} }
......
...@@ -1438,11 +1438,11 @@ static ssize_t dasd_pm_show(struct device *dev, ...@@ -1438,11 +1438,11 @@ static ssize_t dasd_pm_show(struct device *dev,
if (IS_ERR(device)) if (IS_ERR(device))
return sprintf(buf, "0\n"); return sprintf(buf, "0\n");
opm = device->path_data.opm; opm = dasd_path_get_opm(device);
nppm = device->path_data.npm; nppm = dasd_path_get_nppm(device);
cablepm = device->path_data.cablepm; cablepm = dasd_path_get_cablepm(device);
cuirpm = device->path_data.cuirpm; cuirpm = dasd_path_get_cuirpm(device);
hpfpm = device->path_data.hpfpm; hpfpm = dasd_path_get_hpfpm(device);
dasd_put_device(device); dasd_put_device(device);
return sprintf(buf, "%02x %02x %02x %02x %02x\n", opm, nppm, return sprintf(buf, "%02x %02x %02x %02x %02x\n", opm, nppm,
......
This diff is collapsed.
...@@ -535,8 +535,7 @@ struct dasd_eckd_private { ...@@ -535,8 +535,7 @@ struct dasd_eckd_private {
struct dasd_eckd_characteristics rdc_data; struct dasd_eckd_characteristics rdc_data;
u8 *conf_data; u8 *conf_data;
int conf_len; int conf_len;
/* per path configuration data */
struct dasd_conf_data *path_conf_data[8];
/* pointers to specific parts in the conf_data */ /* pointers to specific parts in the conf_data */
struct dasd_ned *ned; struct dasd_ned *ned;
struct dasd_sneq *sneq; struct dasd_sneq *sneq;
......
...@@ -96,7 +96,7 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr) ...@@ -96,7 +96,7 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
"default ERP called (%i retries left)", "default ERP called (%i retries left)",
cqr->retries); cqr->retries);
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
cqr->lpm = device->path_data.opm; cqr->lpm = dasd_path_get_opm(device);
cqr->status = DASD_CQR_FILLED; cqr->status = DASD_CQR_FILLED;
} else { } else {
pr_err("%s: default ERP has run out of retries and failed\n", pr_err("%s: default ERP has run out of retries and failed\n",
......
...@@ -168,7 +168,7 @@ dasd_fba_check_characteristics(struct dasd_device *device) ...@@ -168,7 +168,7 @@ dasd_fba_check_characteristics(struct dasd_device *device)
device->default_expires = DASD_EXPIRES; device->default_expires = DASD_EXPIRES;
device->default_retries = FBA_DEFAULT_RETRIES; device->default_retries = FBA_DEFAULT_RETRIES;
device->path_data.opm = LPM_ANYPATH; dasd_path_set_opm(device, LPM_ANYPATH);
readonly = dasd_device_is_ro(device); readonly = dasd_device_is_ro(device);
if (readonly) if (readonly)
......
...@@ -55,6 +55,7 @@ ...@@ -55,6 +55,7 @@
#include <asm/debug.h> #include <asm/debug.h>
#include <asm/dasd.h> #include <asm/dasd.h>
#include <asm/idals.h> #include <asm/idals.h>
#include <linux/bitops.h>
/* DASD discipline magic */ /* DASD discipline magic */
#define DASD_ECKD_MAGIC 0xC5C3D2C4 #define DASD_ECKD_MAGIC 0xC5C3D2C4
...@@ -397,17 +398,23 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer; ...@@ -397,17 +398,23 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer;
#define DASD_EER_STATECHANGE 3 #define DASD_EER_STATECHANGE 3
#define DASD_EER_PPRCSUSPEND 4 #define DASD_EER_PPRCSUSPEND 4
/* DASD path handling */
#define DASD_PATH_OPERATIONAL 1
#define DASD_PATH_TBV 2
#define DASD_PATH_PP 3
#define DASD_PATH_NPP 4
#define DASD_PATH_MISCABLED 5
#define DASD_PATH_NOHPF 6
#define DASD_PATH_CUIR 7
struct dasd_path { struct dasd_path {
__u8 opm; unsigned long flags;
__u8 tbvpm; struct dasd_conf_data *conf_data;
__u8 ppm;
__u8 npm;
/* paths that are not used because of a special condition */
__u8 cablepm; /* miss-cabled */
__u8 hpfpm; /* the HPF requirements of the other paths are not met */
__u8 cuirpm; /* CUIR varied offline */
}; };
struct dasd_profile_info { struct dasd_profile_info {
/* legacy part of profile data, as in dasd_profile_info_t */ /* legacy part of profile data, as in dasd_profile_info_t */
unsigned int dasd_io_reqs; /* number of requests processed */ unsigned int dasd_io_reqs; /* number of requests processed */
...@@ -458,7 +465,8 @@ struct dasd_device { ...@@ -458,7 +465,8 @@ struct dasd_device {
struct dasd_discipline *discipline; struct dasd_discipline *discipline;
struct dasd_discipline *base_discipline; struct dasd_discipline *base_discipline;
void *private; void *private;
struct dasd_path path_data; struct dasd_path path[8];
__u8 opm;
/* Device state and target state. */ /* Device state and target state. */
int state, target; int state, target;
...@@ -835,4 +843,359 @@ static inline int dasd_eer_enabled(struct dasd_device *device) ...@@ -835,4 +843,359 @@ static inline int dasd_eer_enabled(struct dasd_device *device)
#define dasd_eer_enabled(d) (0) #define dasd_eer_enabled(d) (0)
#endif /* CONFIG_DASD_ERR */ #endif /* CONFIG_DASD_ERR */
/* DASD path handling functions */
/*
* helper functions to modify bit masks for a given channel path for a device
*/
static inline int dasd_path_is_operational(struct dasd_device *device, int chp)
{
return test_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
}
static inline int dasd_path_need_verify(struct dasd_device *device, int chp)
{
return test_bit(DASD_PATH_TBV, &device->path[chp].flags);
}
static inline void dasd_path_verify(struct dasd_device *device, int chp)
{
__set_bit(DASD_PATH_TBV, &device->path[chp].flags);
}
static inline void dasd_path_clear_verify(struct dasd_device *device, int chp)
{
__clear_bit(DASD_PATH_TBV, &device->path[chp].flags);
}
static inline void dasd_path_clear_all_verify(struct dasd_device *device)
{
int chp;
for (chp = 0; chp < 8; chp++)
dasd_path_clear_verify(device, chp);
}
static inline void dasd_path_operational(struct dasd_device *device, int chp)
{
__set_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
device->opm |= (0x80 >> chp);
}
static inline void dasd_path_nonpreferred(struct dasd_device *device, int chp)
{
__set_bit(DASD_PATH_NPP, &device->path[chp].flags);
}
static inline int dasd_path_is_nonpreferred(struct dasd_device *device, int chp)
{
return test_bit(DASD_PATH_NPP, &device->path[chp].flags);
}
static inline void dasd_path_clear_nonpreferred(struct dasd_device *device,
int chp)
{
__clear_bit(DASD_PATH_NPP, &device->path[chp].flags);
}
static inline void dasd_path_preferred(struct dasd_device *device, int chp)
{
__set_bit(DASD_PATH_PP, &device->path[chp].flags);
}
static inline int dasd_path_is_preferred(struct dasd_device *device, int chp)
{
return test_bit(DASD_PATH_PP, &device->path[chp].flags);
}
static inline void dasd_path_clear_preferred(struct dasd_device *device,
int chp)
{
__clear_bit(DASD_PATH_PP, &device->path[chp].flags);
}
static inline void dasd_path_clear_oper(struct dasd_device *device, int chp)
{
__clear_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
device->opm &= ~(0x80 >> chp);
}
static inline void dasd_path_clear_cable(struct dasd_device *device, int chp)
{
__clear_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
}
static inline void dasd_path_cuir(struct dasd_device *device, int chp)
{
__set_bit(DASD_PATH_CUIR, &device->path[chp].flags);
}
static inline int dasd_path_is_cuir(struct dasd_device *device, int chp)
{
return test_bit(DASD_PATH_CUIR, &device->path[chp].flags);
}
static inline void dasd_path_clear_cuir(struct dasd_device *device, int chp)
{
__clear_bit(DASD_PATH_CUIR, &device->path[chp].flags);
}
static inline void dasd_path_clear_nohpf(struct dasd_device *device, int chp)
{
__clear_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
}
static inline void dasd_path_miscabled(struct dasd_device *device, int chp)
{
__set_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
}
static inline int dasd_path_is_miscabled(struct dasd_device *device, int chp)
{
return test_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
}
static inline void dasd_path_nohpf(struct dasd_device *device, int chp)
{
__set_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
}
static inline int dasd_path_is_nohpf(struct dasd_device *device, int chp)
{
return test_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
}
/*
* get functions for path masks
* will return a path masks for the given device
*/
static inline __u8 dasd_path_get_opm(struct dasd_device *device)
{
return device->opm;
}
static inline __u8 dasd_path_get_tbvpm(struct dasd_device *device)
{
int chp;
__u8 tbvpm = 0x00;
for (chp = 0; chp < 8; chp++)
if (dasd_path_need_verify(device, chp))
tbvpm |= 0x80 >> chp;
return tbvpm;
}
static inline __u8 dasd_path_get_nppm(struct dasd_device *device)
{
int chp;
__u8 npm = 0x00;
for (chp = 0; chp < 8; chp++) {
if (dasd_path_is_nonpreferred(device, chp))
npm |= 0x80 >> chp;
}
return npm;
}
static inline __u8 dasd_path_get_ppm(struct dasd_device *device)
{
int chp;
__u8 ppm = 0x00;
for (chp = 0; chp < 8; chp++)
if (dasd_path_is_preferred(device, chp))
ppm |= 0x80 >> chp;
return ppm;
}
static inline __u8 dasd_path_get_cablepm(struct dasd_device *device)
{
int chp;
__u8 cablepm = 0x00;
for (chp = 0; chp < 8; chp++)
if (dasd_path_is_miscabled(device, chp))
cablepm |= 0x80 >> chp;
return cablepm;
}
static inline __u8 dasd_path_get_cuirpm(struct dasd_device *device)
{
int chp;
__u8 cuirpm = 0x00;
for (chp = 0; chp < 8; chp++)
if (dasd_path_is_cuir(device, chp))
cuirpm |= 0x80 >> chp;
return cuirpm;
}
static inline __u8 dasd_path_get_hpfpm(struct dasd_device *device)
{
int chp;
__u8 hpfpm = 0x00;
for (chp = 0; chp < 8; chp++)
if (dasd_path_is_nohpf(device, chp))
hpfpm |= 0x80 >> chp;
return hpfpm;
}
/*
* add functions for path masks
* the existing path mask will be extended by the given path mask
*/
static inline void dasd_path_add_tbvpm(struct dasd_device *device, __u8 pm)
{
int chp;
for (chp = 0; chp < 8; chp++)
if (pm & (0x80 >> chp))
dasd_path_verify(device, chp);
}
static inline void dasd_path_add_opm(struct dasd_device *device, __u8 pm)
{
int chp;
for (chp = 0; chp < 8; chp++)
if (pm & (0x80 >> chp)) {
dasd_path_operational(device, chp);
/*
* if the path is used
* it should not be in one of the negative lists
*/
dasd_path_clear_nohpf(device, chp);
dasd_path_clear_cuir(device, chp);
dasd_path_clear_cable(device, chp);
}
}
static inline void dasd_path_add_cablepm(struct dasd_device *device, __u8 pm)
{
int chp;
for (chp = 0; chp < 8; chp++)
if (pm & (0x80 >> chp))
dasd_path_miscabled(device, chp);
}
static inline void dasd_path_add_cuirpm(struct dasd_device *device, __u8 pm)
{
int chp;
for (chp = 0; chp < 8; chp++)
if (pm & (0x80 >> chp))
dasd_path_cuir(device, chp);
}
static inline void dasd_path_add_nppm(struct dasd_device *device, __u8 pm)
{
int chp;
for (chp = 0; chp < 8; chp++)
if (pm & (0x80 >> chp))
dasd_path_nonpreferred(device, chp);
}
static inline void dasd_path_add_nohpfpm(struct dasd_device *device, __u8 pm)
{
int chp;
for (chp = 0; chp < 8; chp++)
if (pm & (0x80 >> chp))
dasd_path_nohpf(device, chp);
}
static inline void dasd_path_add_ppm(struct dasd_device *device, __u8 pm)
{
int chp;
for (chp = 0; chp < 8; chp++)
if (pm & (0x80 >> chp))
dasd_path_preferred(device, chp);
}
/*
* set functions for path masks
* the existing path mask will be replaced by the given path mask
*/
static inline void dasd_path_set_tbvpm(struct dasd_device *device, __u8 pm)
{
int chp;
for (chp = 0; chp < 8; chp++)
if (pm & (0x80 >> chp))
dasd_path_verify(device, chp);
else
dasd_path_clear_verify(device, chp);
}
static inline void dasd_path_set_opm(struct dasd_device *device, __u8 pm)
{
int chp;
for (chp = 0; chp < 8; chp++) {
dasd_path_clear_oper(device, chp);
if (pm & (0x80 >> chp)) {
dasd_path_operational(device, chp);
/*
* if the path is used
* it should not be in one of the negative lists
*/
dasd_path_clear_nohpf(device, chp);
dasd_path_clear_cuir(device, chp);
dasd_path_clear_cable(device, chp);
}
}
}
/*
* remove functions for path masks
* the existing path mask will be cleared with the given path mask
*/
static inline void dasd_path_remove_opm(struct dasd_device *device, __u8 pm)
{
int chp;
for (chp = 0; chp < 8; chp++) {
if (pm & (0x80 >> chp))
dasd_path_clear_oper(device, chp);
}
}
/*
* add the newly available path to the to be verified pm and remove it from
* normal operation until it is verified
*/
static inline void dasd_path_available(struct dasd_device *device, int chp)
{
dasd_path_clear_oper(device, chp);
dasd_path_verify(device, chp);
}
static inline void dasd_path_notoper(struct dasd_device *device, int chp)
{
dasd_path_clear_oper(device, chp);
dasd_path_clear_preferred(device, chp);
dasd_path_clear_nonpreferred(device, chp);
}
/*
* remove all paths from normal operation
*/
static inline void dasd_path_no_path(struct dasd_device *device)
{
int chp;
for (chp = 0; chp < 8; chp++)
dasd_path_notoper(device, chp);
dasd_path_clear_all_verify(device);
}
/* end - path handling */
#endif /* DASD_H */ #endif /* DASD_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment