Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
7d6322b4
Commit
7d6322b4
authored
Oct 03, 2005
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-for-linus-2.6
parents
d6b9acc0
51c928c3
Changes
28
Hide whitespace changes
Inline
Side-by-side
Showing
28 changed files
with
4859 additions
and
310 deletions
+4859
-310
drivers/scsi/3w-9xxx.c
drivers/scsi/3w-9xxx.c
+47
-8
drivers/scsi/3w-9xxx.h
drivers/scsi/3w-9xxx.h
+10
-7
drivers/scsi/Makefile
drivers/scsi/Makefile
+1
-0
drivers/scsi/aacraid/aachba.c
drivers/scsi/aacraid/aachba.c
+245
-38
drivers/scsi/aacraid/aacraid.h
drivers/scsi/aacraid/aacraid.h
+13
-4
drivers/scsi/aacraid/comminit.c
drivers/scsi/aacraid/comminit.c
+12
-5
drivers/scsi/aacraid/commsup.c
drivers/scsi/aacraid/commsup.c
+433
-148
drivers/scsi/aacraid/linit.c
drivers/scsi/aacraid/linit.c
+8
-4
drivers/scsi/aic7xxx/aic7770_osm.c
drivers/scsi/aic7xxx/aic7770_osm.c
+3
-0
drivers/scsi/aic7xxx/aic79xx_osm.c
drivers/scsi/aic7xxx/aic79xx_osm.c
+3
-5
drivers/scsi/aic7xxx/aic79xx_osm_pci.c
drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+3
-0
drivers/scsi/aic7xxx/aic7xxx_osm.c
drivers/scsi/aic7xxx/aic7xxx_osm.c
+3
-5
drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+3
-0
drivers/scsi/hosts.c
drivers/scsi/hosts.c
+1
-1
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_attr.c
+4
-4
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_hbadisc.c
+2
-2
drivers/scsi/lpfc/lpfc_hw.h
drivers/scsi/lpfc/lpfc_hw.h
+2
-2
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_init.c
+3
-3
drivers/scsi/megaraid.c
drivers/scsi/megaraid.c
+50
-20
drivers/scsi/megaraid/Kconfig.megaraid
drivers/scsi/megaraid/Kconfig.megaraid
+9
-0
drivers/scsi/megaraid/Makefile
drivers/scsi/megaraid/Makefile
+1
-0
drivers/scsi/megaraid/megaraid_sas.c
drivers/scsi/megaraid/megaraid_sas.c
+2805
-0
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/megaraid/megaraid_sas.h
+1142
-0
drivers/scsi/qla2xxx/qla_rscn.c
drivers/scsi/qla2xxx/qla_rscn.c
+2
-0
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_scan.c
+47
-49
drivers/scsi/scsi_transport_sas.c
drivers/scsi/scsi_transport_sas.c
+4
-5
include/linux/pci_ids.h
include/linux/pci_ids.h
+2
-0
include/scsi/scsi_device.h
include/scsi/scsi_device.h
+1
-0
No files found.
drivers/scsi/3w-9xxx.c
View file @
7d6322b4
...
...
@@ -60,6 +60,7 @@
Remove un-needed eh_abort handler.
Add support for embedded firmware error strings.
2.26.02.003 - Correctly handle single sgl's with use_sg=1.
2.26.02.004 - Add support for 9550SX controllers.
*/
#include <linux/module.h>
...
...
@@ -82,7 +83,7 @@
#include "3w-9xxx.h"
/* Globals */
#define TW_DRIVER_VERSION "2.26.02.00
3
"
#define TW_DRIVER_VERSION "2.26.02.00
4
"
static
TW_Device_Extension
*
twa_device_extension_list
[
TW_MAX_SLOT
];
static
unsigned
int
twa_device_extension_count
;
static
int
twa_major
=
-
1
;
...
...
@@ -892,11 +893,6 @@ static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
writel
(
TW_CONTROL_CLEAR_QUEUE_ERROR
,
TW_CONTROL_REG_ADDR
(
tw_dev
));
}
if
(
status_reg_value
&
TW_STATUS_SBUF_WRITE_ERROR
)
{
TW_PRINTK
(
tw_dev
->
host
,
TW_DRIVER
,
0xf
,
"SBUF Write Error: clearing"
);
writel
(
TW_CONTROL_CLEAR_SBUF_WRITE_ERROR
,
TW_CONTROL_REG_ADDR
(
tw_dev
));
}
if
(
status_reg_value
&
TW_STATUS_MICROCONTROLLER_ERROR
)
{
if
(
tw_dev
->
reset_print
==
0
)
{
TW_PRINTK
(
tw_dev
->
host
,
TW_DRIVER
,
0x10
,
"Microcontroller Error: clearing"
);
...
...
@@ -930,6 +926,36 @@ static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
return
retval
;
}
/* End twa_empty_response_queue() */
/* This function will clear the pchip/response queue on 9550SX */
static
int
twa_empty_response_queue_large
(
TW_Device_Extension
*
tw_dev
)
{
u32
status_reg_value
,
response_que_value
;
int
count
=
0
,
retval
=
1
;
if
(
tw_dev
->
tw_pci_dev
->
device
==
PCI_DEVICE_ID_3WARE_9550SX
)
{
status_reg_value
=
readl
(
TW_STATUS_REG_ADDR
(
tw_dev
));
while
(((
status_reg_value
&
TW_STATUS_RESPONSE_QUEUE_EMPTY
)
==
0
)
&&
(
count
<
TW_MAX_RESPONSE_DRAIN
))
{
response_que_value
=
readl
(
TW_RESPONSE_QUEUE_REG_ADDR_LARGE
(
tw_dev
));
if
((
response_que_value
&
TW_9550SX_DRAIN_COMPLETED
)
==
TW_9550SX_DRAIN_COMPLETED
)
{
/* P-chip settle time */
msleep
(
500
);
retval
=
0
;
goto
out
;
}
status_reg_value
=
readl
(
TW_STATUS_REG_ADDR
(
tw_dev
));
count
++
;
}
if
(
count
==
TW_MAX_RESPONSE_DRAIN
)
goto
out
;
retval
=
0
;
}
else
retval
=
0
;
out:
return
retval
;
}
/* End twa_empty_response_queue_large() */
/* This function passes sense keys from firmware to scsi layer */
static
int
twa_fill_sense
(
TW_Device_Extension
*
tw_dev
,
int
request_id
,
int
copy_sense
,
int
print_host
)
{
...
...
@@ -1613,8 +1639,16 @@ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
int
tries
=
0
,
retval
=
1
,
flashed
=
0
,
do_soft_reset
=
soft_reset
;
while
(
tries
<
TW_MAX_RESET_TRIES
)
{
if
(
do_soft_reset
)
if
(
do_soft_reset
)
{
TW_SOFT_RESET
(
tw_dev
);
/* Clear pchip/response queue on 9550SX */
if
(
twa_empty_response_queue_large
(
tw_dev
))
{
TW_PRINTK
(
tw_dev
->
host
,
TW_DRIVER
,
0x36
,
"Response queue (large) empty failed during reset sequence"
);
do_soft_reset
=
1
;
tries
++
;
continue
;
}
}
/* Make sure controller is in a good state */
if
(
twa_poll_status
(
tw_dev
,
TW_STATUS_MICROCONTROLLER_READY
|
(
do_soft_reset
==
1
?
TW_STATUS_ATTENTION_INTERRUPT
:
0
),
60
))
{
...
...
@@ -2034,7 +2068,10 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
goto
out_free_device_extension
;
}
mem_addr
=
pci_resource_start
(
pdev
,
1
);
if
(
pdev
->
device
==
PCI_DEVICE_ID_3WARE_9000
)
mem_addr
=
pci_resource_start
(
pdev
,
1
);
else
mem_addr
=
pci_resource_start
(
pdev
,
2
);
/* Save base address */
tw_dev
->
base_addr
=
ioremap
(
mem_addr
,
PAGE_SIZE
);
...
...
@@ -2148,6 +2185,8 @@ static void twa_remove(struct pci_dev *pdev)
static
struct
pci_device_id
twa_pci_tbl
[]
__devinitdata
=
{
{
PCI_VENDOR_ID_3WARE
,
PCI_DEVICE_ID_3WARE_9000
,
PCI_ANY_ID
,
PCI_ANY_ID
,
0
,
0
,
0
},
{
PCI_VENDOR_ID_3WARE
,
PCI_DEVICE_ID_3WARE_9550SX
,
PCI_ANY_ID
,
PCI_ANY_ID
,
0
,
0
,
0
},
{
}
};
MODULE_DEVICE_TABLE
(
pci
,
twa_pci_tbl
);
...
...
drivers/scsi/3w-9xxx.h
View file @
7d6322b4
...
...
@@ -267,7 +267,6 @@ static twa_message_type twa_error_table[] = {
#define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000
#define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000
#define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000
#define TW_CONTROL_CLEAR_SBUF_WRITE_ERROR 0x00000008
/* Status register bit definitions */
#define TW_STATUS_MAJOR_VERSION_MASK 0xF0000000
...
...
@@ -285,9 +284,8 @@ static twa_message_type twa_error_table[] = {
#define TW_STATUS_MICROCONTROLLER_READY 0x00002000
#define TW_STATUS_COMMAND_QUEUE_EMPTY 0x00001000
#define TW_STATUS_EXPECTED_BITS 0x00002000
#define TW_STATUS_UNEXPECTED_BITS 0x00F00008
#define TW_STATUS_SBUF_WRITE_ERROR 0x00000008
#define TW_STATUS_VALID_INTERRUPT 0x00DF0008
#define TW_STATUS_UNEXPECTED_BITS 0x00F00000
#define TW_STATUS_VALID_INTERRUPT 0x00DF0000
/* RESPONSE QUEUE BIT DEFINITIONS */
#define TW_RESPONSE_ID_MASK 0x00000FF0
...
...
@@ -324,9 +322,9 @@ static twa_message_type twa_error_table[] = {
/* Compatibility defines */
#define TW_9000_ARCH_ID 0x5
#define TW_CURRENT_DRIVER_SRL
28
#define TW_CURRENT_DRIVER_BUILD
9
#define TW_CURRENT_DRIVER_BRANCH
4
#define TW_CURRENT_DRIVER_SRL
30
#define TW_CURRENT_DRIVER_BUILD
80
#define TW_CURRENT_DRIVER_BRANCH
0
/* Phase defines */
#define TW_PHASE_INITIAL 0
...
...
@@ -334,6 +332,7 @@ static twa_message_type twa_error_table[] = {
#define TW_PHASE_SGLIST 2
/* Misc defines */
#define TW_9550SX_DRAIN_COMPLETED 0xFFFF
#define TW_SECTOR_SIZE 512
#define TW_ALIGNMENT_9000 4
/* 4 bytes */
#define TW_ALIGNMENT_9000_SGL 0x3
...
...
@@ -417,6 +416,9 @@ static twa_message_type twa_error_table[] = {
#ifndef PCI_DEVICE_ID_3WARE_9000
#define PCI_DEVICE_ID_3WARE_9000 0x1002
#endif
#ifndef PCI_DEVICE_ID_3WARE_9550SX
#define PCI_DEVICE_ID_3WARE_9550SX 0x1003
#endif
/* Bitmask macros to eliminate bitfields */
...
...
@@ -443,6 +445,7 @@ static twa_message_type twa_error_table[] = {
#define TW_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0x4)
#define TW_COMMAND_QUEUE_REG_ADDR(x) (sizeof(dma_addr_t) > 4 ? ((unsigned char __iomem *)x->base_addr + 0x20) : ((unsigned char __iomem *)x->base_addr + 0x8))
#define TW_RESPONSE_QUEUE_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0xC)
#define TW_RESPONSE_QUEUE_REG_ADDR_LARGE(x) ((unsigned char __iomem *)x->base_addr + 0x30)
#define TW_CLEAR_ALL_INTERRUPTS(x) (writel(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
#define TW_CLEAR_ATTENTION_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
#define TW_CLEAR_HOST_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
...
...
drivers/scsi/Makefile
View file @
7d6322b4
...
...
@@ -99,6 +99,7 @@ obj-$(CONFIG_SCSI_DC395x) += dc395x.o
obj-$(CONFIG_SCSI_DC390T)
+=
tmscsim.o
obj-$(CONFIG_MEGARAID_LEGACY)
+=
megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN)
+=
megaraid/
obj-$(CONFIG_MEGARAID_SAS)
+=
megaraid/
obj-$(CONFIG_SCSI_ACARD)
+=
atp870u.o
obj-$(CONFIG_SCSI_SUNESP)
+=
esp.o
obj-$(CONFIG_SCSI_GDTH)
+=
gdth.o
...
...
drivers/scsi/aacraid/aachba.c
View file @
7d6322b4
...
...
@@ -313,18 +313,37 @@ int aac_get_containers(struct aac_dev *dev)
}
dresp
=
(
struct
aac_mount
*
)
fib_data
(
fibptr
);
if
((
le32_to_cpu
(
dresp
->
status
)
==
ST_OK
)
&&
(
le32_to_cpu
(
dresp
->
mnt
[
0
].
vol
)
==
CT_NONE
))
{
dinfo
->
command
=
cpu_to_le32
(
VM_NameServe64
);
dinfo
->
count
=
cpu_to_le32
(
index
);
dinfo
->
type
=
cpu_to_le32
(
FT_FILESYS
);
if
(
fib_send
(
ContainerCommand
,
fibptr
,
sizeof
(
struct
aac_query_mount
),
FsaNormal
,
1
,
1
,
NULL
,
NULL
)
<
0
)
continue
;
}
else
dresp
->
mnt
[
0
].
capacityhigh
=
0
;
dprintk
((
KERN_DEBUG
"VM_NameServe cid=%d status=%d vol=%d state=%d cap=%u
\n
"
,
"VM_NameServe cid=%d status=%d vol=%d state=%d cap=%
ll
u
\n
"
,
(
int
)
index
,
(
int
)
le32_to_cpu
(
dresp
->
status
),
(
int
)
le32_to_cpu
(
dresp
->
mnt
[
0
].
vol
),
(
int
)
le32_to_cpu
(
dresp
->
mnt
[
0
].
state
),
(
unsigned
)
le32_to_cpu
(
dresp
->
mnt
[
0
].
capacity
)));
((
u64
)
le32_to_cpu
(
dresp
->
mnt
[
0
].
capacity
))
+
(((
u64
)
le32_to_cpu
(
dresp
->
mnt
[
0
].
capacityhigh
))
<<
32
)));
if
((
le32_to_cpu
(
dresp
->
status
)
==
ST_OK
)
&&
(
le32_to_cpu
(
dresp
->
mnt
[
0
].
vol
)
!=
CT_NONE
)
&&
(
le32_to_cpu
(
dresp
->
mnt
[
0
].
state
)
!=
FSCS_HIDDEN
))
{
fsa_dev_ptr
[
index
].
valid
=
1
;
fsa_dev_ptr
[
index
].
type
=
le32_to_cpu
(
dresp
->
mnt
[
0
].
vol
);
fsa_dev_ptr
[
index
].
size
=
le32_to_cpu
(
dresp
->
mnt
[
0
].
capacity
);
fsa_dev_ptr
[
index
].
size
=
((
u64
)
le32_to_cpu
(
dresp
->
mnt
[
0
].
capacity
))
+
(((
u64
)
le32_to_cpu
(
dresp
->
mnt
[
0
].
capacityhigh
))
<<
32
);
if
(
le32_to_cpu
(
dresp
->
mnt
[
0
].
state
)
&
FSCS_READONLY
)
fsa_dev_ptr
[
index
].
ro
=
1
;
}
...
...
@@ -460,7 +479,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
* is updated in the struct fsa_dev_info structure rather than returned.
*/
static
int
probe_container
(
struct
aac_dev
*
dev
,
int
cid
)
int
probe_container
(
struct
aac_dev
*
dev
,
int
cid
)
{
struct
fsa_dev_info
*
fsa_dev_ptr
;
int
status
;
...
...
@@ -496,12 +515,30 @@ static int probe_container(struct aac_dev *dev, int cid)
dresp
=
(
struct
aac_mount
*
)
fib_data
(
fibptr
);
if
((
le32_to_cpu
(
dresp
->
status
)
==
ST_OK
)
&&
(
le32_to_cpu
(
dresp
->
mnt
[
0
].
vol
)
==
CT_NONE
))
{
dinfo
->
command
=
cpu_to_le32
(
VM_NameServe64
);
dinfo
->
count
=
cpu_to_le32
(
cid
);
dinfo
->
type
=
cpu_to_le32
(
FT_FILESYS
);
if
(
fib_send
(
ContainerCommand
,
fibptr
,
sizeof
(
struct
aac_query_mount
),
FsaNormal
,
1
,
1
,
NULL
,
NULL
)
<
0
)
goto
error
;
}
else
dresp
->
mnt
[
0
].
capacityhigh
=
0
;
if
((
le32_to_cpu
(
dresp
->
status
)
==
ST_OK
)
&&
(
le32_to_cpu
(
dresp
->
mnt
[
0
].
vol
)
!=
CT_NONE
)
&&
(
le32_to_cpu
(
dresp
->
mnt
[
0
].
state
)
!=
FSCS_HIDDEN
))
{
fsa_dev_ptr
[
cid
].
valid
=
1
;
fsa_dev_ptr
[
cid
].
type
=
le32_to_cpu
(
dresp
->
mnt
[
0
].
vol
);
fsa_dev_ptr
[
cid
].
size
=
le32_to_cpu
(
dresp
->
mnt
[
0
].
capacity
);
fsa_dev_ptr
[
cid
].
size
=
((
u64
)
le32_to_cpu
(
dresp
->
mnt
[
0
].
capacity
))
+
(((
u64
)
le32_to_cpu
(
dresp
->
mnt
[
0
].
capacityhigh
))
<<
32
);
if
(
le32_to_cpu
(
dresp
->
mnt
[
0
].
state
)
&
FSCS_READONLY
)
fsa_dev_ptr
[
cid
].
ro
=
1
;
}
...
...
@@ -655,7 +692,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
fibptr
,
sizeof
(
*
info
),
FsaNormal
,
1
,
1
,
-
1
,
1
,
/* First `interrupt' command uses special wait */
NULL
,
NULL
);
...
...
@@ -806,8 +843,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
if
(
!
(
dev
->
raw_io_interface
))
{
dev
->
scsi_host_ptr
->
sg_tablesize
=
(
dev
->
max_fib_size
-
sizeof
(
struct
aac_fibhdr
)
-
sizeof
(
struct
aac_write
)
+
sizeof
(
struct
sg
map
))
/
sizeof
(
struct
sg
map
);
sizeof
(
struct
aac_write
)
+
sizeof
(
struct
sg
entry
))
/
sizeof
(
struct
sg
entry
);
if
(
dev
->
dac_support
)
{
/*
* 38 scatter gather elements
...
...
@@ -816,8 +853,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
(
dev
->
max_fib_size
-
sizeof
(
struct
aac_fibhdr
)
-
sizeof
(
struct
aac_write64
)
+
sizeof
(
struct
sg
map
64
))
/
sizeof
(
struct
sg
map
64
);
sizeof
(
struct
sg
entry
64
))
/
sizeof
(
struct
sg
entry
64
);
}
dev
->
scsi_host_ptr
->
max_sectors
=
AAC_MAX_32BIT_SGBCOUNT
;
if
(
!
(
dev
->
adapter_info
.
options
&
AAC_OPT_NEW_COMM
))
{
...
...
@@ -854,7 +891,40 @@ static void io_callback(void *context, struct fib * fibptr)
dev
=
(
struct
aac_dev
*
)
scsicmd
->
device
->
host
->
hostdata
;
cid
=
ID_LUN_TO_CONTAINER
(
scsicmd
->
device
->
id
,
scsicmd
->
device
->
lun
);
dprintk
((
KERN_DEBUG
"io_callback[cpu %d]: lba = %u, t = %ld.
\n
"
,
smp_processor_id
(),
((
scsicmd
->
cmnd
[
1
]
&
0x1F
)
<<
16
)
|
(
scsicmd
->
cmnd
[
2
]
<<
8
)
|
scsicmd
->
cmnd
[
3
],
jiffies
));
if
(
nblank
(
dprintk
(
x
)))
{
u64
lba
;
switch
(
scsicmd
->
cmnd
[
0
])
{
case
WRITE_6
:
case
READ_6
:
lba
=
((
scsicmd
->
cmnd
[
1
]
&
0x1F
)
<<
16
)
|
(
scsicmd
->
cmnd
[
2
]
<<
8
)
|
scsicmd
->
cmnd
[
3
];
break
;
case
WRITE_16
:
case
READ_16
:
lba
=
((
u64
)
scsicmd
->
cmnd
[
2
]
<<
56
)
|
((
u64
)
scsicmd
->
cmnd
[
3
]
<<
48
)
|
((
u64
)
scsicmd
->
cmnd
[
4
]
<<
40
)
|
((
u64
)
scsicmd
->
cmnd
[
5
]
<<
32
)
|
((
u64
)
scsicmd
->
cmnd
[
6
]
<<
24
)
|
(
scsicmd
->
cmnd
[
7
]
<<
16
)
|
(
scsicmd
->
cmnd
[
8
]
<<
8
)
|
scsicmd
->
cmnd
[
9
];
break
;
case
WRITE_12
:
case
READ_12
:
lba
=
((
u64
)
scsicmd
->
cmnd
[
2
]
<<
24
)
|
(
scsicmd
->
cmnd
[
3
]
<<
16
)
|
(
scsicmd
->
cmnd
[
4
]
<<
8
)
|
scsicmd
->
cmnd
[
5
];
break
;
default:
lba
=
((
u64
)
scsicmd
->
cmnd
[
2
]
<<
24
)
|
(
scsicmd
->
cmnd
[
3
]
<<
16
)
|
(
scsicmd
->
cmnd
[
4
]
<<
8
)
|
scsicmd
->
cmnd
[
5
];
break
;
}
printk
(
KERN_DEBUG
"io_callback[cpu %d]: lba = %llu, t = %ld.
\n
"
,
smp_processor_id
(),
(
unsigned
long
long
)
lba
,
jiffies
);
}
if
(
fibptr
==
NULL
)
BUG
();
...
...
@@ -895,7 +965,7 @@ static void io_callback(void *context, struct fib * fibptr)
static
int
aac_read
(
struct
scsi_cmnd
*
scsicmd
,
int
cid
)
{
u
32
lba
;
u
64
lba
;
u32
count
;
int
status
;
...
...
@@ -907,23 +977,69 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
/*
* Get block address and transfer length
*/
if
(
scsicmd
->
cmnd
[
0
]
==
READ_6
)
/* 6 byte command */
{
switch
(
scsicmd
->
cmnd
[
0
])
{
case
READ_6
:
dprintk
((
KERN_DEBUG
"aachba: received a read(6) command on id %d.
\n
"
,
cid
));
lba
=
((
scsicmd
->
cmnd
[
1
]
&
0x1F
)
<<
16
)
|
(
scsicmd
->
cmnd
[
2
]
<<
8
)
|
scsicmd
->
cmnd
[
3
];
lba
=
((
scsicmd
->
cmnd
[
1
]
&
0x1F
)
<<
16
)
|
(
scsicmd
->
cmnd
[
2
]
<<
8
)
|
scsicmd
->
cmnd
[
3
];
count
=
scsicmd
->
cmnd
[
4
];
if
(
count
==
0
)
count
=
256
;
}
else
{
break
;
case
READ_16
:
dprintk
((
KERN_DEBUG
"aachba: received a read(16) command on id %d.
\n
"
,
cid
));
lba
=
((
u64
)
scsicmd
->
cmnd
[
2
]
<<
56
)
|
((
u64
)
scsicmd
->
cmnd
[
3
]
<<
48
)
|
((
u64
)
scsicmd
->
cmnd
[
4
]
<<
40
)
|
((
u64
)
scsicmd
->
cmnd
[
5
]
<<
32
)
|
((
u64
)
scsicmd
->
cmnd
[
6
]
<<
24
)
|
(
scsicmd
->
cmnd
[
7
]
<<
16
)
|
(
scsicmd
->
cmnd
[
8
]
<<
8
)
|
scsicmd
->
cmnd
[
9
];
count
=
(
scsicmd
->
cmnd
[
10
]
<<
24
)
|
(
scsicmd
->
cmnd
[
11
]
<<
16
)
|
(
scsicmd
->
cmnd
[
12
]
<<
8
)
|
scsicmd
->
cmnd
[
13
];
break
;
case
READ_12
:
dprintk
((
KERN_DEBUG
"aachba: received a read(12) command on id %d.
\n
"
,
cid
));
lba
=
((
u64
)
scsicmd
->
cmnd
[
2
]
<<
24
)
|
(
scsicmd
->
cmnd
[
3
]
<<
16
)
|
(
scsicmd
->
cmnd
[
4
]
<<
8
)
|
scsicmd
->
cmnd
[
5
];
count
=
(
scsicmd
->
cmnd
[
6
]
<<
24
)
|
(
scsicmd
->
cmnd
[
7
]
<<
16
)
|
(
scsicmd
->
cmnd
[
8
]
<<
8
)
|
scsicmd
->
cmnd
[
9
];
break
;
default:
dprintk
((
KERN_DEBUG
"aachba: received a read(10) command on id %d.
\n
"
,
cid
));
lba
=
(
scsicmd
->
cmnd
[
2
]
<<
24
)
|
(
scsicmd
->
cmnd
[
3
]
<<
16
)
|
(
scsicmd
->
cmnd
[
4
]
<<
8
)
|
scsicmd
->
cmnd
[
5
];
lba
=
((
u64
)
scsicmd
->
cmnd
[
2
]
<<
24
)
|
(
scsicmd
->
cmnd
[
3
]
<<
16
)
|
(
scsicmd
->
cmnd
[
4
]
<<
8
)
|
scsicmd
->
cmnd
[
5
];
count
=
(
scsicmd
->
cmnd
[
7
]
<<
8
)
|
scsicmd
->
cmnd
[
8
];
break
;
}
dprintk
((
KERN_DEBUG
"aac_read[cpu %d]: lba = %u, t = %ld.
\n
"
,
dprintk
((
KERN_DEBUG
"aac_read[cpu %d]: lba = %
ll
u, t = %ld.
\n
"
,
smp_processor_id
(),
(
unsigned
long
long
)
lba
,
jiffies
));
if
((
!
(
dev
->
raw_io_interface
)
||
!
(
dev
->
raw_io_64
))
&&
(
lba
&
0xffffffff00000000LL
))
{
dprintk
((
KERN_DEBUG
"aac_read: Illegal lba
\n
"
));
scsicmd
->
result
=
DID_OK
<<
16
|
COMMAND_COMPLETE
<<
8
|
SAM_STAT_CHECK_CONDITION
;
set_sense
((
u8
*
)
&
dev
->
fsa_dev
[
cid
].
sense_data
,
HARDWARE_ERROR
,
SENCODE_INTERNAL_TARGET_FAILURE
,
ASENCODE_INTERNAL_TARGET_FAILURE
,
0
,
0
,
0
,
0
);
memcpy
(
scsicmd
->
sense_buffer
,
&
dev
->
fsa_dev
[
cid
].
sense_data
,
(
sizeof
(
dev
->
fsa_dev
[
cid
].
sense_data
)
>
sizeof
(
scsicmd
->
sense_buffer
))
?
sizeof
(
scsicmd
->
sense_buffer
)
:
sizeof
(
dev
->
fsa_dev
[
cid
].
sense_data
));
scsicmd
->
scsi_done
(
scsicmd
);
return
0
;
}
/*
* Alocate and initialize a Fib
*/
...
...
@@ -936,8 +1052,8 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
if
(
dev
->
raw_io_interface
)
{
struct
aac_raw_io
*
readcmd
;
readcmd
=
(
struct
aac_raw_io
*
)
fib_data
(
cmd_fibcontext
);
readcmd
->
block
[
0
]
=
cpu_to_le32
(
lba
);
readcmd
->
block
[
1
]
=
0
;
readcmd
->
block
[
0
]
=
cpu_to_le32
(
(
u32
)(
lba
&
0xffffffff
)
);
readcmd
->
block
[
1
]
=
cpu_to_le32
((
u32
)((
lba
&
0xffffffff00000000LL
)
>>
32
))
;
readcmd
->
count
=
cpu_to_le32
(
count
<<
9
);
readcmd
->
cid
=
cpu_to_le16
(
cid
);
readcmd
->
flags
=
cpu_to_le16
(
1
);
...
...
@@ -964,7 +1080,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
readcmd
->
command
=
cpu_to_le32
(
VM_CtHostRead64
);
readcmd
->
cid
=
cpu_to_le16
(
cid
);
readcmd
->
sector_count
=
cpu_to_le16
(
count
);
readcmd
->
block
=
cpu_to_le32
(
lba
);
readcmd
->
block
=
cpu_to_le32
(
(
u32
)(
lba
&
0xffffffff
)
);
readcmd
->
pad
=
0
;
readcmd
->
flags
=
0
;
...
...
@@ -989,7 +1105,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
readcmd
=
(
struct
aac_read
*
)
fib_data
(
cmd_fibcontext
);
readcmd
->
command
=
cpu_to_le32
(
VM_CtBlockRead
);
readcmd
->
cid
=
cpu_to_le32
(
cid
);
readcmd
->
block
=
cpu_to_le32
(
lba
);
readcmd
->
block
=
cpu_to_le32
(
(
u32
)(
lba
&
0xffffffff
)
);
readcmd
->
count
=
cpu_to_le32
(
count
*
512
);
aac_build_sg
(
scsicmd
,
&
readcmd
->
sg
);
...
...
@@ -1031,7 +1147,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
static
int
aac_write
(
struct
scsi_cmnd
*
scsicmd
,
int
cid
)
{
u
32
lba
;
u
64
lba
;
u32
count
;
int
status
;
u16
fibsize
;
...
...
@@ -1048,13 +1164,48 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
count
=
scsicmd
->
cmnd
[
4
];
if
(
count
==
0
)
count
=
256
;
}
else
if
(
scsicmd
->
cmnd
[
0
]
==
WRITE_16
)
{
/* 16 byte command */
dprintk
((
KERN_DEBUG
"aachba: received a write(16) command on id %d.
\n
"
,
cid
));
lba
=
((
u64
)
scsicmd
->
cmnd
[
2
]
<<
56
)
|
((
u64
)
scsicmd
->
cmnd
[
3
]
<<
48
)
|
((
u64
)
scsicmd
->
cmnd
[
4
]
<<
40
)
|
((
u64
)
scsicmd
->
cmnd
[
5
]
<<
32
)
|
((
u64
)
scsicmd
->
cmnd
[
6
]
<<
24
)
|
(
scsicmd
->
cmnd
[
7
]
<<
16
)
|
(
scsicmd
->
cmnd
[
8
]
<<
8
)
|
scsicmd
->
cmnd
[
9
];
count
=
(
scsicmd
->
cmnd
[
10
]
<<
24
)
|
(
scsicmd
->
cmnd
[
11
]
<<
16
)
|
(
scsicmd
->
cmnd
[
12
]
<<
8
)
|
scsicmd
->
cmnd
[
13
];
}
else
if
(
scsicmd
->
cmnd
[
0
]
==
WRITE_12
)
{
/* 12 byte command */
dprintk
((
KERN_DEBUG
"aachba: received a write(12) command on id %d.
\n
"
,
cid
));
lba
=
((
u64
)
scsicmd
->
cmnd
[
2
]
<<
24
)
|
(
scsicmd
->
cmnd
[
3
]
<<
16
)
|
(
scsicmd
->
cmnd
[
4
]
<<
8
)
|
scsicmd
->
cmnd
[
5
];
count
=
(
scsicmd
->
cmnd
[
6
]
<<
24
)
|
(
scsicmd
->
cmnd
[
7
]
<<
16
)
|
(
scsicmd
->
cmnd
[
8
]
<<
8
)
|
scsicmd
->
cmnd
[
9
];
}
else
{
dprintk
((
KERN_DEBUG
"aachba: received a write(10) command on id %d.
\n
"
,
cid
));
lba
=
(
scsicmd
->
cmnd
[
2
]
<<
24
)
|
(
scsicmd
->
cmnd
[
3
]
<<
16
)
|
(
scsicmd
->
cmnd
[
4
]
<<
8
)
|
scsicmd
->
cmnd
[
5
];
lba
=
(
(
u64
)
scsicmd
->
cmnd
[
2
]
<<
24
)
|
(
scsicmd
->
cmnd
[
3
]
<<
16
)
|
(
scsicmd
->
cmnd
[
4
]
<<
8
)
|
scsicmd
->
cmnd
[
5
];
count
=
(
scsicmd
->
cmnd
[
7
]
<<
8
)
|
scsicmd
->
cmnd
[
8
];
}
dprintk
((
KERN_DEBUG
"aac_write[cpu %d]: lba = %u, t = %ld.
\n
"
,
dprintk
((
KERN_DEBUG
"aac_write[cpu %d]: lba = %
ll
u, t = %ld.
\n
"
,
smp_processor_id
(),
(
unsigned
long
long
)
lba
,
jiffies
));
if
((
!
(
dev
->
raw_io_interface
)
||
!
(
dev
->
raw_io_64
))
&&
(
lba
&
0xffffffff00000000LL
))
{
dprintk
((
KERN_DEBUG
"aac_write: Illegal lba
\n
"
));
scsicmd
->
result
=
DID_OK
<<
16
|
COMMAND_COMPLETE
<<
8
|
SAM_STAT_CHECK_CONDITION
;
set_sense
((
u8
*
)
&
dev
->
fsa_dev
[
cid
].
sense_data
,
HARDWARE_ERROR
,
SENCODE_INTERNAL_TARGET_FAILURE
,
ASENCODE_INTERNAL_TARGET_FAILURE
,
0
,
0
,
0
,
0
);
memcpy
(
scsicmd
->
sense_buffer
,
&
dev
->
fsa_dev
[
cid
].
sense_data
,
(
sizeof
(
dev
->
fsa_dev
[
cid
].
sense_data
)
>
sizeof
(
scsicmd
->
sense_buffer
))
?
sizeof
(
scsicmd
->
sense_buffer
)
:
sizeof
(
dev
->
fsa_dev
[
cid
].
sense_data
));
scsicmd
->
scsi_done
(
scsicmd
);
return
0
;
}
/*
* Allocate and initialize a Fib then setup a BlockWrite command
*/
...
...
@@ -1068,8 +1219,8 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
if
(
dev
->
raw_io_interface
)
{
struct
aac_raw_io
*
writecmd
;
writecmd
=
(
struct
aac_raw_io
*
)
fib_data
(
cmd_fibcontext
);
writecmd
->
block
[
0
]
=
cpu_to_le32
(
lba
);
writecmd
->
block
[
1
]
=
0
;
writecmd
->
block
[
0
]
=
cpu_to_le32
(
(
u32
)(
lba
&
0xffffffff
)
);
writecmd
->
block
[
1
]
=
cpu_to_le32
((
u32
)((
lba
&
0xffffffff00000000LL
)
>>
32
))
;
writecmd
->
count
=
cpu_to_le32
(
count
<<
9
);
writecmd
->
cid
=
cpu_to_le16
(
cid
);
writecmd
->
flags
=
0
;
...
...
@@ -1096,7 +1247,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
writecmd
->
command
=
cpu_to_le32
(
VM_CtHostWrite64
);
writecmd
->
cid
=
cpu_to_le16
(
cid
);
writecmd
->
sector_count
=
cpu_to_le16
(
count
);
writecmd
->
block
=
cpu_to_le32
(
lba
);
writecmd
->
block
=
cpu_to_le32
(
(
u32
)(
lba
&
0xffffffff
)
);
writecmd
->
pad
=
0
;
writecmd
->
flags
=
0
;
...
...
@@ -1121,7 +1272,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
writecmd
=
(
struct
aac_write
*
)
fib_data
(
cmd_fibcontext
);
writecmd
->
command
=
cpu_to_le32
(
VM_CtBlockWrite
);
writecmd
->
cid
=
cpu_to_le32
(
cid
);
writecmd
->
block
=
cpu_to_le32
(
lba
);
writecmd
->
block
=
cpu_to_le32
(
(
u32
)(
lba
&
0xffffffff
)
);
writecmd
->
count
=
cpu_to_le32
(
count
*
512
);
writecmd
->
sg
.
count
=
cpu_to_le32
(
1
);
/* ->stable is not used - it did mean which type of write */
...
...
@@ -1310,11 +1461,18 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
*/
if
((
fsa_dev_ptr
[
cid
].
valid
&
1
)
==
0
)
{
switch
(
scsicmd
->
cmnd
[
0
])
{
case
SERVICE_ACTION_IN
:
if
(
!
(
dev
->
raw_io_interface
)
||
!
(
dev
->
raw_io_64
)
||
((
scsicmd
->
cmnd
[
1
]
&
0x1f
)
!=
SAI_READ_CAPACITY_16
))
break
;
case
INQUIRY
:
case
READ_CAPACITY
:
case
TEST_UNIT_READY
:
spin_unlock_irq
(
host
->
host_lock
);
probe_container
(
dev
,
cid
);
if
((
fsa_dev_ptr
[
cid
].
valid
&
1
)
==
0
)
fsa_dev_ptr
[
cid
].
valid
=
0
;
spin_lock_irq
(
host
->
host_lock
);
if
(
fsa_dev_ptr
[
cid
].
valid
==
0
)
{
scsicmd
->
result
=
DID_NO_CONNECT
<<
16
;
...
...
@@ -1375,7 +1533,6 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
memset
(
&
inq_data
,
0
,
sizeof
(
struct
inquiry_data
));
inq_data
.
inqd_ver
=
2
;
/* claim compliance to SCSI-2 */
inq_data
.
inqd_dtq
=
0x80
;
/* set RMB bit to one indicating that the medium is removable */
inq_data
.
inqd_rdf
=
2
;
/* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
inq_data
.
inqd_len
=
31
;
/*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
...
...
@@ -1397,13 +1554,55 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
aac_internal_transfer
(
scsicmd
,
&
inq_data
,
0
,
sizeof
(
inq_data
));
return
aac_get_container_name
(
scsicmd
,
cid
);
}
case
SERVICE_ACTION_IN
:
if
(
!
(
dev
->
raw_io_interface
)
||
!
(
dev
->
raw_io_64
)
||
((
scsicmd
->
cmnd
[
1
]
&
0x1f
)
!=
SAI_READ_CAPACITY_16
))
break
;
{
u64
capacity
;
char
cp
[
12
];
unsigned
int
offset
=
0
;
dprintk
((
KERN_DEBUG
"READ CAPACITY_16 command.
\n
"
));
capacity
=
fsa_dev_ptr
[
cid
].
size
-
1
;
if
(
scsicmd
->
cmnd
[
13
]
>
12
)
{
offset
=
scsicmd
->
cmnd
[
13
]
-
12
;
if
(
offset
>
sizeof
(
cp
))
break
;
memset
(
cp
,
0
,
offset
);
aac_internal_transfer
(
scsicmd
,
cp
,
0
,
offset
);
}
cp
[
0
]
=
(
capacity
>>
56
)
&
0xff
;
cp
[
1
]
=
(
capacity
>>
48
)
&
0xff
;
cp
[
2
]
=
(
capacity
>>
40
)
&
0xff
;
cp
[
3
]
=
(
capacity
>>
32
)
&
0xff
;
cp
[
4
]
=
(
capacity
>>
24
)
&
0xff
;
cp
[
5
]
=
(
capacity
>>
16
)
&
0xff
;
cp
[
6
]
=
(
capacity
>>
8
)
&
0xff
;
cp
[
7
]
=
(
capacity
>>
0
)
&
0xff
;
cp
[
8
]
=
0
;
cp
[
9
]
=
0
;
cp
[
10
]
=
2
;
cp
[
11
]
=
0
;
aac_internal_transfer
(
scsicmd
,
cp
,
offset
,
sizeof
(
cp
));
/* Do not cache partition table for arrays */
scsicmd
->
device
->
removable
=
1
;
scsicmd
->
result
=
DID_OK
<<
16
|
COMMAND_COMPLETE
<<
8
|
SAM_STAT_GOOD
;
scsicmd
->
scsi_done
(
scsicmd
);
return
0
;
}
case
READ_CAPACITY
:
{
u32
capacity
;
char
cp
[
8
];
dprintk
((
KERN_DEBUG
"READ CAPACITY command.
\n
"
));
if
(
fsa_dev_ptr
[
cid
].
size
<=
0x100000000LL
)
if
(
fsa_dev_ptr
[
cid
].
size
<=
0x100000000
U
LL
)
capacity
=
fsa_dev_ptr
[
cid
].
size
-
1
;
else
capacity
=
(
u32
)
-
1
;
...
...
@@ -1417,6 +1616,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
cp
[
6
]
=
2
;
cp
[
7
]
=
0
;
aac_internal_transfer
(
scsicmd
,
cp
,
0
,
sizeof
(
cp
));
/* Do not cache partition table for arrays */
scsicmd
->
device
->
removable
=
1
;
scsicmd
->
result
=
DID_OK
<<
16
|
COMMAND_COMPLETE
<<
8
|
SAM_STAT_GOOD
;
scsicmd
->
scsi_done
(
scsicmd
);
...
...
@@ -1497,6 +1698,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
{
case
READ_6
:
case
READ_10
:
case
READ_12
:
case
READ_16
:
/*
* Hack to keep track of ordinal number of the device that
* corresponds to a container. Needed to convert
...
...
@@ -1504,17 +1707,19 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
*/
spin_unlock_irq
(
host
->
host_lock
);
if
(
scsicmd
->
request
->
rq_disk
)
mem
cpy
(
fsa_dev_ptr
[
cid
].
devname
,
scsicmd
->
request
->
rq_disk
->
disk_name
,
8
);
if
(
scsicmd
->
request
->
rq_disk
)
strl
cpy
(
fsa_dev_ptr
[
cid
].
devname
,
scsicmd
->
request
->
rq_disk
->
disk_name
,
min
(
sizeof
(
fsa_dev_ptr
[
cid
].
devname
),
sizeof
(
scsicmd
->
request
->
rq_disk
->
disk_name
)
+
1
));
ret
=
aac_read
(
scsicmd
,
cid
);
spin_lock_irq
(
host
->
host_lock
);
return
ret
;
case
WRITE_6
:
case
WRITE_10
:
case
WRITE_12
:
case
WRITE_16
:
spin_unlock_irq
(
host
->
host_lock
);
ret
=
aac_write
(
scsicmd
,
cid
);
spin_lock_irq
(
host
->
host_lock
);
...
...
@@ -1745,6 +1950,8 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
case
WRITE_10
:
case
READ_12
:
case
WRITE_12
:
case
READ_16
:
case
WRITE_16
:
if
(
le32_to_cpu
(
srbreply
->
data_xfer_length
)
<
scsicmd
->
underflow
)
{
printk
(
KERN_WARNING
"aacraid: SCSI CMD underflow
\n
"
);
}
else
{
...
...
@@ -1850,8 +2057,8 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
sizeof
(
scsicmd
->
sense_buffer
)
:
le32_to_cpu
(
srbreply
->
sense_data_size
);
#ifdef AAC_DETAILED_STATUS_INFO
dprintk
((
KERN_WARNING
"aac_srb_callback: check condition, status = %d len=%d
\n
"
,
le32_to_cpu
(
srbreply
->
status
),
len
)
)
;
printk
(
KERN_WARNING
"aac_srb_callback: check condition, status = %d len=%d
\n
"
,
le32_to_cpu
(
srbreply
->
status
),
len
);
#endif
memcpy
(
scsicmd
->
sense_buffer
,
srbreply
->
sense_data
,
len
);
...
...
drivers/scsi/aacraid/aacraid.h
View file @
7d6322b4
#if (!defined(dprintk))
# define dprintk(x)
#endif
/* eg: if (nblank(dprintk(x))) */
#define _nblank(x) #x
#define nblank(x) _nblank(x)[0]
/*------------------------------------------------------------------------------
* D E F I N E S
...
...
@@ -302,7 +306,6 @@ enum aac_queue_types {
*/
#define FsaNormal 1
#define FsaHigh 2
/*
* Define the FIB. The FIB is the where all the requested data and
...
...
@@ -546,8 +549,6 @@ struct aac_queue {
/* This is only valid for adapter to host command queues. */
spinlock_t
*
lock
;
/* Spinlock for this queue must take this lock before accessing the lock */
spinlock_t
lockdata
;
/* Actual lock (used only on one side of the lock) */
unsigned
long
SavedIrql
;
/* Previous IRQL when the spin lock is taken */
u32
padding
;
/* Padding - FIXME - can remove I believe */
struct
list_head
cmdq
;
/* A queue of FIBs which need to be prcessed by the FS thread. This is */
/* only valid for command queues which receive entries from the adapter. */
struct
list_head
pendingq
;
/* A queue of outstanding fib's to the adapter. */
...
...
@@ -776,7 +777,9 @@ struct fsa_dev_info {
u64
last
;
u64
size
;
u32
type
;
u32
config_waiting_on
;
u16
queue_depth
;
u8
config_needed
;
u8
valid
;
u8
ro
;
u8
locked
;
...
...
@@ -1012,6 +1015,7 @@ struct aac_dev
/* macro side-effects BEWARE */
# define raw_io_interface \
init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4)
u8
raw_io_64
;
u8
printf_enabled
;
};
...
...
@@ -1362,8 +1366,10 @@ struct aac_srb_reply
#define VM_CtBlockVerify64 18
#define VM_CtHostRead64 19
#define VM_CtHostWrite64 20
#define VM_DrvErrTblLog 21
#define VM_NameServe64 22
#define MAX_VMCOMMAND_NUM 2
1
/* used for sizing stats array - leave last */
#define MAX_VMCOMMAND_NUM 2
3
/* used for sizing stats array - leave last */
/*
* Descriptive information (eg, vital stats)
...
...
@@ -1472,6 +1478,7 @@ struct aac_mntent {
manager (eg, filesystem) */
__le32
altoid
;
/* != oid <==> snapshot or
broken mirror exists */
__le32
capacityhigh
;
};
#define FSCS_NOTCLEAN 0x0001
/* fsck is neccessary before mounting */
...
...
@@ -1707,6 +1714,7 @@ extern struct aac_common aac_config;
#define AifCmdJobProgress 2
/* Progress report */
#define AifJobCtrZero 101
/* Array Zero progress */
#define AifJobStsSuccess 1
/* Job completes */
#define AifJobStsRunning 102
/* Job running */
#define AifCmdAPIReport 3
/* Report from other user of API */
#define AifCmdDriverNotify 4
/* Notify host driver of event */
#define AifDenMorphComplete 200
/* A morph operation completed */
...
...
@@ -1777,6 +1785,7 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size);
struct
aac_driver_ident
*
aac_get_driver_ident
(
int
devtype
);
int
aac_get_adapter_info
(
struct
aac_dev
*
dev
);
int
aac_send_shutdown
(
struct
aac_dev
*
dev
);
int
probe_container
(
struct
aac_dev
*
dev
,
int
cid
);
extern
int
numacb
;
extern
int
acbsize
;
extern
char
aac_driver_version
[];
drivers/scsi/aacraid/comminit.c
View file @
7d6322b4
...
...
@@ -195,7 +195,7 @@ int aac_send_shutdown(struct aac_dev * dev)
fibctx
,
sizeof
(
struct
aac_close
),
FsaNormal
,
1
,
1
,
-
2
/* Timeout silently */
,
1
,
NULL
,
NULL
);
if
(
status
==
0
)
...
...
@@ -313,8 +313,15 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
dev
->
max_fib_size
=
sizeof
(
struct
hw_fib
);
dev
->
sg_tablesize
=
host
->
sg_tablesize
=
(
dev
->
max_fib_size
-
sizeof
(
struct
aac_fibhdr
)
-
sizeof
(
struct
aac_write
)
+
sizeof
(
struct
sgmap
))
/
sizeof
(
struct
sgmap
);
-
sizeof
(
struct
aac_write
)
+
sizeof
(
struct
sgentry
))
/
sizeof
(
struct
sgentry
);
dev
->
raw_io_64
=
0
;
if
((
!
aac_adapter_sync_cmd
(
dev
,
GET_ADAPTER_PROPERTIES
,
0
,
0
,
0
,
0
,
0
,
0
,
status
+
0
,
status
+
1
,
status
+
2
,
NULL
,
NULL
))
&&
(
status
[
0
]
==
0x00000001
))
{
if
(
status
[
1
]
&
AAC_OPT_NEW_COMM_64
)
dev
->
raw_io_64
=
1
;
}
if
((
!
aac_adapter_sync_cmd
(
dev
,
GET_COMM_PREFERRED_SETTINGS
,
0
,
0
,
0
,
0
,
0
,
0
,
status
+
0
,
status
+
1
,
status
+
2
,
status
+
3
,
status
+
4
))
...
...
@@ -342,8 +349,8 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
dev
->
max_fib_size
=
512
;
dev
->
sg_tablesize
=
host
->
sg_tablesize
=
(
512
-
sizeof
(
struct
aac_fibhdr
)
-
sizeof
(
struct
aac_write
)
+
sizeof
(
struct
sg
map
))
/
sizeof
(
struct
sg
map
);
-
sizeof
(
struct
aac_write
)
+
sizeof
(
struct
sg
entry
))
/
sizeof
(
struct
sg
entry
);
host
->
can_queue
=
AAC_NUM_IO_FIB
;
}
else
if
(
acbsize
==
2048
)
{
host
->
max_sectors
=
512
;
...
...
drivers/scsi/aacraid/commsup.c
View file @
7d6322b4
...
...
@@ -39,7 +39,9 @@
#include <linux/completion.h>
#include <linux/blkdev.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <asm/semaphore.h>
#include <asm/delay.h>
#include "aacraid.h"
...
...
@@ -269,40 +271,22 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
/* Interrupt Moderation, only interrupt for first two entries */
if
(
idx
!=
le32_to_cpu
(
*
(
q
->
headers
.
consumer
)))
{
if
(
--
idx
==
0
)
{
if
(
qid
==
AdapHighCmdQueue
)
idx
=
ADAP_HIGH_CMD_ENTRIES
;
else
if
(
qid
==
AdapNormCmdQueue
)
if
(
qid
==
AdapNormCmdQueue
)
idx
=
ADAP_NORM_CMD_ENTRIES
;
else
if
(
qid
==
AdapHighRespQueue
)
idx
=
ADAP_HIGH_RESP_ENTRIES
;
else
if
(
qid
==
AdapNormRespQueue
)
else
idx
=
ADAP_NORM_RESP_ENTRIES
;
}
if
(
idx
!=
le32_to_cpu
(
*
(
q
->
headers
.
consumer
)))
*
nonotify
=
1
;
}
if
(
qid
==
AdapHighCmdQueue
)
{
if
(
*
index
>=
ADAP_HIGH_CMD_ENTRIES
)
*
index
=
0
;
}
else
if
(
qid
==
AdapNormCmdQueue
)
{
if
(
qid
==
AdapNormCmdQueue
)
{
if
(
*
index
>=
ADAP_NORM_CMD_ENTRIES
)
*
index
=
0
;
/* Wrap to front of the Producer Queue. */
}
else
if
(
qid
==
AdapHighRespQueue
)
{
if
(
*
index
>=
ADAP_HIGH_RESP_ENTRIES
)
*
index
=
0
;
}
else
if
(
qid
==
AdapNormRespQueue
)
{
}
else
{
if
(
*
index
>=
ADAP_NORM_RESP_ENTRIES
)
*
index
=
0
;
/* Wrap to front of the Producer Queue. */
}
else
{
printk
(
"aacraid: invalid qid
\n
"
);
BUG
();
}
if
((
*
index
+
1
)
==
le32_to_cpu
(
*
(
q
->
headers
.
consumer
)))
{
/* Queue is full */
printk
(
KERN_WARNING
"Queue %d full, %u outstanding.
\n
"
,
...
...
@@ -334,12 +318,8 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
{
struct
aac_entry
*
entry
=
NULL
;
int
map
=
0
;
struct
aac_queue
*
q
=
&
dev
->
queues
->
queue
[
qid
];
spin_lock_irqsave
(
q
->
lock
,
q
->
SavedIrql
);
if
(
qid
==
AdapHighCmdQueue
||
qid
==
AdapNormCmdQueue
)
{
if
(
qid
==
AdapNormCmdQueue
)
{
/* if no entries wait for some if caller wants to */
while
(
!
aac_get_entry
(
dev
,
qid
,
&
entry
,
index
,
nonotify
))
{
...
...
@@ -350,9 +330,7 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
*/
entry
->
size
=
cpu_to_le32
(
le16_to_cpu
(
hw_fib
->
header
.
Size
));
map
=
1
;
}
else
if
(
qid
==
AdapHighRespQueue
||
qid
==
AdapNormRespQueue
)
{
}
else
{
while
(
!
aac_get_entry
(
dev
,
qid
,
&
entry
,
index
,
nonotify
))
{
/* if no entries wait for some if caller wants to */
...
...
@@ -375,42 +353,6 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
return
0
;
}
/**
* aac_insert_entry - insert a queue entry
* @dev: Adapter
* @index: Index of entry to insert
* @qid: Queue number
* @nonotify: Suppress adapter notification
*
* Gets the next free QE off the requested priorty adapter command
* queue and associates the Fib with the QE. The QE represented by
* index is ready to insert on the queue when this routine returns
* success.
*/
static
int
aac_insert_entry
(
struct
aac_dev
*
dev
,
u32
index
,
u32
qid
,
unsigned
long
nonotify
)
{
struct
aac_queue
*
q
=
&
dev
->
queues
->
queue
[
qid
];
if
(
q
==
NULL
)
BUG
();
*
(
q
->
headers
.
producer
)
=
cpu_to_le32
(
index
+
1
);
spin_unlock_irqrestore
(
q
->
lock
,
q
->
SavedIrql
);
if
(
qid
==
AdapHighCmdQueue
||
qid
==
AdapNormCmdQueue
||
qid
==
AdapHighRespQueue
||
qid
==
AdapNormRespQueue
)
{
if
(
!
nonotify
)
aac_adapter_notify
(
dev
,
qid
);
}
else
printk
(
"Suprise insert!
\n
"
);
return
0
;
}
/*
* Define the highest level of host to adapter communication routines.
* These routines will support host to adapter FS commuication. These
...
...
@@ -439,12 +381,13 @@ static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned l
int
fib_send
(
u16
command
,
struct
fib
*
fibptr
,
unsigned
long
size
,
int
priority
,
int
wait
,
int
reply
,
fib_callback
callback
,
void
*
callback_data
)
{
u32
index
;
u32
qid
;
struct
aac_dev
*
dev
=
fibptr
->
dev
;
unsigned
long
nointr
=
0
;
struct
hw_fib
*
hw_fib
=
fibptr
->
hw_fib
;
struct
aac_queue
*
q
;
unsigned
long
flags
=
0
;
unsigned
long
qflags
;
if
(
!
(
hw_fib
->
header
.
XferState
&
cpu_to_le32
(
HostOwned
)))
return
-
EBUSY
;
/*
...
...
@@ -497,26 +440,8 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
* Get a queue entry connect the FIB to it and send an notify
* the adapter a command is ready.
*/
if
(
priority
==
FsaHigh
)
{
hw_fib
->
header
.
XferState
|=
cpu_to_le32
(
HighPriority
);
qid
=
AdapHighCmdQueue
;
}
else
{
hw_fib
->
header
.
XferState
|=
cpu_to_le32
(
NormalPriority
);
qid
=
AdapNormCmdQueue
;
}
q
=
&
dev
->
queues
->
queue
[
qid
];
hw_fib
->
header
.
XferState
|=
cpu_to_le32
(
NormalPriority
);
if
(
wait
)
spin_lock_irqsave
(
&
fibptr
->
event_lock
,
flags
);
if
(
aac_queue_get
(
dev
,
&
index
,
qid
,
hw_fib
,
1
,
fibptr
,
&
nointr
)
<
0
)
return
-
EWOULDBLOCK
;
dprintk
((
KERN_DEBUG
"fib_send: inserting a queue entry at index %d.
\n
"
,
index
));
dprintk
((
KERN_DEBUG
"Fib contents:.
\n
"
));
dprintk
((
KERN_DEBUG
" Command = %d.
\n
"
,
hw_fib
->
header
.
Command
));
dprintk
((
KERN_DEBUG
" XferState = %x.
\n
"
,
hw_fib
->
header
.
XferState
));
dprintk
((
KERN_DEBUG
" hw_fib va being sent=%p
\n
"
,
fibptr
->
hw_fib
));
dprintk
((
KERN_DEBUG
" hw_fib pa being sent=%lx
\n
"
,(
ulong
)
fibptr
->
hw_fib_pa
));
dprintk
((
KERN_DEBUG
" fib being sent=%p
\n
"
,
fibptr
));
/*
* Fill in the Callback and CallbackContext if we are not
* going to wait.
...
...
@@ -525,22 +450,67 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
fibptr
->
callback
=
callback
;
fibptr
->
callback_data
=
callback_data
;
}
FIB_COUNTER_INCREMENT
(
aac_config
.
FibsSent
);
list_add_tail
(
&
fibptr
->
queue
,
&
q
->
pendingq
);
q
->
numpending
++
;
fibptr
->
done
=
0
;
fibptr
->
flags
=
0
;
if
(
aac_insert_entry
(
dev
,
index
,
qid
,
(
nointr
&
aac_config
.
irq_mod
))
<
0
)
return
-
EWOULDBLOCK
;
FIB_COUNTER_INCREMENT
(
aac_config
.
FibsSent
);
dprintk
((
KERN_DEBUG
"fib_send: inserting a queue entry at index %d.
\n
"
,
index
));
dprintk
((
KERN_DEBUG
"Fib contents:.
\n
"
));
dprintk
((
KERN_DEBUG
" Command = %d.
\n
"
,
hw_fib
->
header
.
Command
));
dprintk
((
KERN_DEBUG
" XferState = %x.
\n
"
,
hw_fib
->
header
.
XferState
));
dprintk
((
KERN_DEBUG
" hw_fib va being sent=%p
\n
"
,
fibptr
->
hw_fib
));
dprintk
((
KERN_DEBUG
" hw_fib pa being sent=%lx
\n
"
,(
ulong
)
fibptr
->
hw_fib_pa
));
dprintk
((
KERN_DEBUG
" fib being sent=%p
\n
"
,
fibptr
));
q
=
&
dev
->
queues
->
queue
[
AdapNormCmdQueue
];
if
(
wait
)
spin_lock_irqsave
(
&
fibptr
->
event_lock
,
flags
);
spin_lock_irqsave
(
q
->
lock
,
qflags
);
aac_queue_get
(
dev
,
&
index
,
AdapNormCmdQueue
,
hw_fib
,
1
,
fibptr
,
&
nointr
);
list_add_tail
(
&
fibptr
->
queue
,
&
q
->
pendingq
);
q
->
numpending
++
;
*
(
q
->
headers
.
producer
)
=
cpu_to_le32
(
index
+
1
);
spin_unlock_irqrestore
(
q
->
lock
,
qflags
);
if
(
!
(
nointr
&
aac_config
.
irq_mod
))
aac_adapter_notify
(
dev
,
AdapNormCmdQueue
);
/*
* If the caller wanted us to wait for response wait now.
*/
if
(
wait
)
{
spin_unlock_irqrestore
(
&
fibptr
->
event_lock
,
flags
);
down
(
&
fibptr
->
event_wait
);
/* Only set for first known interruptable command */
if
(
wait
<
0
)
{
/*
* *VERY* Dangerous to time out a command, the
* assumption is made that we have no hope of
* functioning because an interrupt routing or other
* hardware failure has occurred.
*/
unsigned
long
count
=
36000000L
;
/* 3 minutes */
unsigned
long
qflags
;
while
(
down_trylock
(
&
fibptr
->
event_wait
))
{
if
(
--
count
==
0
)
{
spin_lock_irqsave
(
q
->
lock
,
qflags
);
q
->
numpending
--
;
list_del
(
&
fibptr
->
queue
);
spin_unlock_irqrestore
(
q
->
lock
,
qflags
);
if
(
wait
==
-
1
)
{
printk
(
KERN_ERR
"aacraid: fib_send: first asynchronous command timed out.
\n
"
"Usually a result of a PCI interrupt routing problem;
\n
"
"update mother board BIOS or consider utilizing one of
\n
"
"the SAFE mode kernel options (acpi, apic etc)
\n
"
);
}
return
-
ETIMEDOUT
;
}
udelay
(
5
);
}
}
else
down
(
&
fibptr
->
event_wait
);
if
(
fibptr
->
done
==
0
)
BUG
();
...
...
@@ -622,15 +592,9 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
case
HostNormCmdQueue
:
notify
=
HostNormCmdNotFull
;
break
;
case
HostHighCmdQueue
:
notify
=
HostHighCmdNotFull
;
break
;
case
HostNormRespQueue
:
notify
=
HostNormRespNotFull
;
break
;
case
HostHighRespQueue
:
notify
=
HostHighRespNotFull
;
break
;
default:
BUG
();
return
;
...
...
@@ -652,9 +616,13 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size)
{
struct
hw_fib
*
hw_fib
=
fibptr
->
hw_fib
;
struct
aac_dev
*
dev
=
fibptr
->
dev
;
struct
aac_queue
*
q
;
unsigned
long
nointr
=
0
;
if
(
hw_fib
->
header
.
XferState
==
0
)
unsigned
long
qflags
;
if
(
hw_fib
->
header
.
XferState
==
0
)
{
return
0
;
}
/*
* If we plan to do anything check the structure type first.
*/
...
...
@@ -669,37 +637,21 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size)
* send the completed cdb to the adapter.
*/
if
(
hw_fib
->
header
.
XferState
&
cpu_to_le32
(
SentFromAdapter
))
{
u32
index
;
hw_fib
->
header
.
XferState
|=
cpu_to_le32
(
HostProcessed
);
if
(
hw_fib
->
header
.
XferState
&
cpu_to_le32
(
HighPriority
))
{
u32
index
;
if
(
size
)
{
size
+=
sizeof
(
struct
aac_fibhdr
);
if
(
size
>
le16_to_cpu
(
hw_fib
->
header
.
SenderSize
))
return
-
EMSGSIZE
;
hw_fib
->
header
.
Size
=
cpu_to_le16
(
size
);
}
if
(
aac_queue_get
(
dev
,
&
index
,
AdapHighRespQueue
,
hw_fib
,
1
,
NULL
,
&
nointr
)
<
0
)
{
return
-
EWOULDBLOCK
;
}
if
(
aac_insert_entry
(
dev
,
index
,
AdapHighRespQueue
,
(
nointr
&
(
int
)
aac_config
.
irq_mod
))
!=
0
)
{
}
}
else
if
(
hw_fib
->
header
.
XferState
&
cpu_to_le32
(
NormalPriority
))
{
u32
index
;
if
(
size
)
{
size
+=
sizeof
(
struct
aac_fibhdr
);
if
(
size
>
le16_to_cpu
(
hw_fib
->
header
.
SenderSize
))
return
-
EMSGSIZE
;
hw_fib
->
header
.
Size
=
cpu_to_le16
(
size
);
}
if
(
aac_queue_get
(
dev
,
&
index
,
AdapNormRespQueue
,
hw_fib
,
1
,
NULL
,
&
nointr
)
<
0
)
return
-
EWOULDBLOCK
;
if
(
aac_insert_entry
(
dev
,
index
,
AdapNormRespQueue
,
(
nointr
&
(
int
)
aac_config
.
irq_mod
))
!=
0
)
{
}
if
(
size
)
{
size
+=
sizeof
(
struct
aac_fibhdr
);
if
(
size
>
le16_to_cpu
(
hw_fib
->
header
.
SenderSize
))
return
-
EMSGSIZE
;
hw_fib
->
header
.
Size
=
cpu_to_le16
(
size
);
}
q
=
&
dev
->
queues
->
queue
[
AdapNormRespQueue
];
spin_lock_irqsave
(
q
->
lock
,
qflags
);
aac_queue_get
(
dev
,
&
index
,
AdapNormRespQueue
,
hw_fib
,
1
,
NULL
,
&
nointr
);
*
(
q
->
headers
.
producer
)
=
cpu_to_le32
(
index
+
1
);
spin_unlock_irqrestore
(
q
->
lock
,
qflags
);
if
(
!
(
nointr
&
(
int
)
aac_config
.
irq_mod
))
aac_adapter_notify
(
dev
,
AdapNormRespQueue
);
}
else
{
...
...
@@ -791,6 +743,268 @@ void aac_printf(struct aac_dev *dev, u32 val)
memset
(
cp
,
0
,
256
);
}
/**
* aac_handle_aif - Handle a message from the firmware
* @dev: Which adapter this fib is from
* @fibptr: Pointer to fibptr from adapter
*
* This routine handles a driver notify fib from the adapter and
* dispatches it to the appropriate routine for handling.
*/
static
void
aac_handle_aif
(
struct
aac_dev
*
dev
,
struct
fib
*
fibptr
)
{
struct
hw_fib
*
hw_fib
=
fibptr
->
hw_fib
;
struct
aac_aifcmd
*
aifcmd
=
(
struct
aac_aifcmd
*
)
hw_fib
->
data
;
int
busy
;
u32
container
;
struct
scsi_device
*
device
;
enum
{
NOTHING
,
DELETE
,
ADD
,
CHANGE
}
device_config_needed
;
/* Sniff for container changes */
if
(
!
dev
)
return
;
container
=
(
u32
)
-
1
;
/*
* We have set this up to try and minimize the number of
* re-configures that take place. As a result of this when
* certain AIF's come in we will set a flag waiting for another
* type of AIF before setting the re-config flag.
*/
switch
(
le32_to_cpu
(
aifcmd
->
command
))
{
case
AifCmdDriverNotify
:
switch
(
le32_to_cpu
(((
u32
*
)
aifcmd
->
data
)[
0
]))
{
/*
* Morph or Expand complete
*/
case
AifDenMorphComplete
:
case
AifDenVolumeExtendComplete
:
container
=
le32_to_cpu
(((
u32
*
)
aifcmd
->
data
)[
1
]);
if
(
container
>=
dev
->
maximum_num_containers
)
break
;
/*
* Find the Scsi_Device associated with the SCSI
* address. Make sure we have the right array, and if
* so set the flag to initiate a new re-config once we
* see an AifEnConfigChange AIF come through.
*/
if
((
dev
!=
NULL
)
&&
(
dev
->
scsi_host_ptr
!=
NULL
))
{
device
=
scsi_device_lookup
(
dev
->
scsi_host_ptr
,
CONTAINER_TO_CHANNEL
(
container
),
CONTAINER_TO_ID
(
container
),
CONTAINER_TO_LUN
(
container
));
if
(
device
)
{
dev
->
fsa_dev
[
container
].
config_needed
=
CHANGE
;
dev
->
fsa_dev
[
container
].
config_waiting_on
=
AifEnConfigChange
;
scsi_device_put
(
device
);
}
}
}
/*
* If we are waiting on something and this happens to be
* that thing then set the re-configure flag.
*/
if
(
container
!=
(
u32
)
-
1
)
{
if
(
container
>=
dev
->
maximum_num_containers
)
break
;
if
(
dev
->
fsa_dev
[
container
].
config_waiting_on
==
le32_to_cpu
(
*
(
u32
*
)
aifcmd
->
data
))
dev
->
fsa_dev
[
container
].
config_waiting_on
=
0
;
}
else
for
(
container
=
0
;
container
<
dev
->
maximum_num_containers
;
++
container
)
{
if
(
dev
->
fsa_dev
[
container
].
config_waiting_on
==
le32_to_cpu
(
*
(
u32
*
)
aifcmd
->
data
))
dev
->
fsa_dev
[
container
].
config_waiting_on
=
0
;
}
break
;
case
AifCmdEventNotify
:
switch
(
le32_to_cpu
(((
u32
*
)
aifcmd
->
data
)[
0
]))
{
/*
* Add an Array.
*/
case
AifEnAddContainer
:
container
=
le32_to_cpu
(((
u32
*
)
aifcmd
->
data
)[
1
]);
if
(
container
>=
dev
->
maximum_num_containers
)
break
;
dev
->
fsa_dev
[
container
].
config_needed
=
ADD
;
dev
->
fsa_dev
[
container
].
config_waiting_on
=
AifEnConfigChange
;
break
;
/*
* Delete an Array.
*/
case
AifEnDeleteContainer
:
container
=
le32_to_cpu
(((
u32
*
)
aifcmd
->
data
)[
1
]);
if
(
container
>=
dev
->
maximum_num_containers
)
break
;
dev
->
fsa_dev
[
container
].
config_needed
=
DELETE
;
dev
->
fsa_dev
[
container
].
config_waiting_on
=
AifEnConfigChange
;
break
;
/*
* Container change detected. If we currently are not
* waiting on something else, setup to wait on a Config Change.
*/
case
AifEnContainerChange
:
container
=
le32_to_cpu
(((
u32
*
)
aifcmd
->
data
)[
1
]);
if
(
container
>=
dev
->
maximum_num_containers
)
break
;
if
(
dev
->
fsa_dev
[
container
].
config_waiting_on
)
break
;
dev
->
fsa_dev
[
container
].
config_needed
=
CHANGE
;
dev
->
fsa_dev
[
container
].
config_waiting_on
=
AifEnConfigChange
;
break
;
case
AifEnConfigChange
:
break
;
}
/*
* If we are waiting on something and this happens to be
* that thing then set the re-configure flag.
*/
if
(
container
!=
(
u32
)
-
1
)
{
if
(
container
>=
dev
->
maximum_num_containers
)
break
;
if
(
dev
->
fsa_dev
[
container
].
config_waiting_on
==
le32_to_cpu
(
*
(
u32
*
)
aifcmd
->
data
))
dev
->
fsa_dev
[
container
].
config_waiting_on
=
0
;
}
else
for
(
container
=
0
;
container
<
dev
->
maximum_num_containers
;
++
container
)
{
if
(
dev
->
fsa_dev
[
container
].
config_waiting_on
==
le32_to_cpu
(
*
(
u32
*
)
aifcmd
->
data
))
dev
->
fsa_dev
[
container
].
config_waiting_on
=
0
;
}
break
;
case
AifCmdJobProgress
:
/*
* These are job progress AIF's. When a Clear is being
* done on a container it is initially created then hidden from
* the OS. When the clear completes we don't get a config
* change so we monitor the job status complete on a clear then
* wait for a container change.
*/
if
((((
u32
*
)
aifcmd
->
data
)[
1
]
==
cpu_to_le32
(
AifJobCtrZero
))
&&
((((
u32
*
)
aifcmd
->
data
)[
6
]
==
((
u32
*
)
aifcmd
->
data
)[
5
])
||
(((
u32
*
)
aifcmd
->
data
)[
4
]
==
cpu_to_le32
(
AifJobStsSuccess
))))
{
for
(
container
=
0
;
container
<
dev
->
maximum_num_containers
;
++
container
)
{
/*
* Stomp on all config sequencing for all
* containers?
*/
dev
->
fsa_dev
[
container
].
config_waiting_on
=
AifEnContainerChange
;
dev
->
fsa_dev
[
container
].
config_needed
=
ADD
;
}
}
if
((((
u32
*
)
aifcmd
->
data
)[
1
]
==
cpu_to_le32
(
AifJobCtrZero
))
&&
(((
u32
*
)
aifcmd
->
data
)[
6
]
==
0
)
&&
(((
u32
*
)
aifcmd
->
data
)[
4
]
==
cpu_to_le32
(
AifJobStsRunning
)))
{
for
(
container
=
0
;
container
<
dev
->
maximum_num_containers
;
++
container
)
{
/*
* Stomp on all config sequencing for all
* containers?
*/
dev
->
fsa_dev
[
container
].
config_waiting_on
=
AifEnContainerChange
;
dev
->
fsa_dev
[
container
].
config_needed
=
DELETE
;
}
}
break
;
}
device_config_needed
=
NOTHING
;
for
(
container
=
0
;
container
<
dev
->
maximum_num_containers
;
++
container
)
{
if
((
dev
->
fsa_dev
[
container
].
config_waiting_on
==
0
)
&&
(
dev
->
fsa_dev
[
container
].
config_needed
!=
NOTHING
))
{
device_config_needed
=
dev
->
fsa_dev
[
container
].
config_needed
;
dev
->
fsa_dev
[
container
].
config_needed
=
NOTHING
;
break
;
}
}
if
(
device_config_needed
==
NOTHING
)
return
;
/*
* If we decided that a re-configuration needs to be done,
* schedule it here on the way out the door, please close the door
* behind you.
*/
busy
=
0
;
/*
* Find the Scsi_Device associated with the SCSI address,
* and mark it as changed, invalidating the cache. This deals
* with changes to existing device IDs.
*/
if
(
!
dev
||
!
dev
->
scsi_host_ptr
)
return
;
/*
* force reload of disk info via probe_container
*/
if
((
device_config_needed
==
CHANGE
)
&&
(
dev
->
fsa_dev
[
container
].
valid
==
1
))
dev
->
fsa_dev
[
container
].
valid
=
2
;
if
((
device_config_needed
==
CHANGE
)
||
(
device_config_needed
==
ADD
))
probe_container
(
dev
,
container
);
device
=
scsi_device_lookup
(
dev
->
scsi_host_ptr
,
CONTAINER_TO_CHANNEL
(
container
),
CONTAINER_TO_ID
(
container
),
CONTAINER_TO_LUN
(
container
));
if
(
device
)
{
switch
(
device_config_needed
)
{
case
DELETE
:
scsi_remove_device
(
device
);
break
;
case
CHANGE
:
if
(
!
dev
->
fsa_dev
[
container
].
valid
)
{
scsi_remove_device
(
device
);
break
;
}
scsi_rescan_device
(
&
device
->
sdev_gendev
);
default:
break
;
}
scsi_device_put
(
device
);
}
if
(
device_config_needed
==
ADD
)
{
scsi_add_device
(
dev
->
scsi_host_ptr
,
CONTAINER_TO_CHANNEL
(
container
),
CONTAINER_TO_ID
(
container
),
CONTAINER_TO_LUN
(
container
));
}
}
/**
* aac_command_thread - command processing thread
* @dev: Adapter to monitor
...
...
@@ -805,7 +1019,6 @@ int aac_command_thread(struct aac_dev * dev)
{
struct
hw_fib
*
hw_fib
,
*
hw_newfib
;
struct
fib
*
fib
,
*
newfib
;
struct
aac_queue_block
*
queues
=
dev
->
queues
;
struct
aac_fib_context
*
fibctx
;
unsigned
long
flags
;
DECLARE_WAITQUEUE
(
wait
,
current
);
...
...
@@ -825,21 +1038,22 @@ int aac_command_thread(struct aac_dev * dev)
* Let the DPC know it has a place to send the AIF's to.
*/
dev
->
aif_thread
=
1
;
add_wait_queue
(
&
queues
->
queue
[
HostNormCmdQueue
].
cmdready
,
&
wait
);
add_wait_queue
(
&
dev
->
queues
->
queue
[
HostNormCmdQueue
].
cmdready
,
&
wait
);
set_current_state
(
TASK_INTERRUPTIBLE
);
dprintk
((
KERN_INFO
"aac_command_thread start
\n
"
));
while
(
1
)
{
spin_lock_irqsave
(
queues
->
queue
[
HostNormCmdQueue
].
lock
,
flags
);
while
(
!
list_empty
(
&
(
queues
->
queue
[
HostNormCmdQueue
].
cmdq
)))
{
spin_lock_irqsave
(
dev
->
queues
->
queue
[
HostNormCmdQueue
].
lock
,
flags
);
while
(
!
list_empty
(
&
(
dev
->
queues
->
queue
[
HostNormCmdQueue
].
cmdq
)))
{
struct
list_head
*
entry
;
struct
aac_aifcmd
*
aifcmd
;
set_current_state
(
TASK_RUNNING
);
entry
=
queues
->
queue
[
HostNormCmdQueue
].
cmdq
.
next
;
entry
=
dev
->
queues
->
queue
[
HostNormCmdQueue
].
cmdq
.
next
;
list_del
(
entry
);
spin_unlock_irqrestore
(
queues
->
queue
[
HostNormCmdQueue
].
lock
,
flags
);
spin_unlock_irqrestore
(
dev
->
queues
->
queue
[
HostNormCmdQueue
].
lock
,
flags
);
fib
=
list_entry
(
entry
,
struct
fib
,
fiblink
);
/*
* We will process the FIB here or pass it to a
...
...
@@ -860,6 +1074,7 @@ int aac_command_thread(struct aac_dev * dev)
aifcmd
=
(
struct
aac_aifcmd
*
)
hw_fib
->
data
;
if
(
aifcmd
->
command
==
cpu_to_le32
(
AifCmdDriverNotify
))
{
/* Handle Driver Notify Events */
aac_handle_aif
(
dev
,
fib
);
*
(
__le32
*
)
hw_fib
->
data
=
cpu_to_le32
(
ST_OK
);
fib_adapter_complete
(
fib
,
(
u16
)
sizeof
(
u32
));
}
else
{
...
...
@@ -869,9 +1084,62 @@ int aac_command_thread(struct aac_dev * dev)
u32
time_now
,
time_last
;
unsigned
long
flagv
;
unsigned
num
;
struct
hw_fib
**
hw_fib_pool
,
**
hw_fib_p
;
struct
fib
**
fib_pool
,
**
fib_p
;
/* Sniff events */
if
((
aifcmd
->
command
==
cpu_to_le32
(
AifCmdEventNotify
))
||
(
aifcmd
->
command
==
cpu_to_le32
(
AifCmdJobProgress
)))
{
aac_handle_aif
(
dev
,
fib
);
}
time_now
=
jiffies
/
HZ
;
/*
* Warning: no sleep allowed while
* holding spinlock. We take the estimate
* and pre-allocate a set of fibs outside the
* lock.
*/
num
=
le32_to_cpu
(
dev
->
init
->
AdapterFibsSize
)
/
sizeof
(
struct
hw_fib
);
/* some extra */
spin_lock_irqsave
(
&
dev
->
fib_lock
,
flagv
);
entry
=
dev
->
fib_list
.
next
;
while
(
entry
!=
&
dev
->
fib_list
)
{
entry
=
entry
->
next
;
++
num
;
}
spin_unlock_irqrestore
(
&
dev
->
fib_lock
,
flagv
);
hw_fib_pool
=
NULL
;
fib_pool
=
NULL
;
if
(
num
&&
((
hw_fib_pool
=
kmalloc
(
sizeof
(
struct
hw_fib
*
)
*
num
,
GFP_KERNEL
)))
&&
((
fib_pool
=
kmalloc
(
sizeof
(
struct
fib
*
)
*
num
,
GFP_KERNEL
))))
{
hw_fib_p
=
hw_fib_pool
;
fib_p
=
fib_pool
;
while
(
hw_fib_p
<
&
hw_fib_pool
[
num
])
{
if
(
!
(
*
(
hw_fib_p
++
)
=
kmalloc
(
sizeof
(
struct
hw_fib
),
GFP_KERNEL
)))
{
--
hw_fib_p
;
break
;
}
if
(
!
(
*
(
fib_p
++
)
=
kmalloc
(
sizeof
(
struct
fib
),
GFP_KERNEL
)))
{
kfree
(
*
(
--
hw_fib_p
));
break
;
}
}
if
((
num
=
hw_fib_p
-
hw_fib_pool
)
==
0
)
{
kfree
(
fib_pool
);
fib_pool
=
NULL
;
kfree
(
hw_fib_pool
);
hw_fib_pool
=
NULL
;
}
}
else
if
(
hw_fib_pool
)
{
kfree
(
hw_fib_pool
);
hw_fib_pool
=
NULL
;
}
spin_lock_irqsave
(
&
dev
->
fib_lock
,
flagv
);
entry
=
dev
->
fib_list
.
next
;
/*
...
...
@@ -880,6 +1148,8 @@ int aac_command_thread(struct aac_dev * dev)
* fib, and then set the event to wake up the
* thread that is waiting for it.
*/
hw_fib_p
=
hw_fib_pool
;
fib_p
=
fib_pool
;
while
(
entry
!=
&
dev
->
fib_list
)
{
/*
* Extract the fibctx
...
...
@@ -912,9 +1182,11 @@ int aac_command_thread(struct aac_dev * dev)
* Warning: no sleep allowed while
* holding spinlock
*/
hw_newfib
=
kmalloc
(
sizeof
(
struct
hw_fib
),
GFP_ATOMIC
);
newfib
=
kmalloc
(
sizeof
(
struct
fib
),
GFP_ATOMIC
);
if
(
newfib
&&
hw_newfib
)
{
if
(
hw_fib_p
<
&
hw_fib_pool
[
num
])
{
hw_newfib
=
*
hw_fib_p
;
*
(
hw_fib_p
++
)
=
NULL
;
newfib
=
*
fib_p
;
*
(
fib_p
++
)
=
NULL
;
/*
* Make the copy of the FIB
*/
...
...
@@ -929,15 +1201,11 @@ int aac_command_thread(struct aac_dev * dev)
fibctx
->
count
++
;
/*
* Set the event to wake up the
* thread that
will
waiting.
* thread that
is
waiting.
*/
up
(
&
fibctx
->
wait_sem
);
}
else
{
printk
(
KERN_WARNING
"aifd: didn't allocate NewFib.
\n
"
);
if
(
newfib
)
kfree
(
newfib
);
if
(
hw_newfib
)
kfree
(
hw_newfib
);
}
entry
=
entry
->
next
;
}
...
...
@@ -947,21 +1215,38 @@ int aac_command_thread(struct aac_dev * dev)
*
(
__le32
*
)
hw_fib
->
data
=
cpu_to_le32
(
ST_OK
);
fib_adapter_complete
(
fib
,
sizeof
(
u32
));
spin_unlock_irqrestore
(
&
dev
->
fib_lock
,
flagv
);
/* Free up the remaining resources */
hw_fib_p
=
hw_fib_pool
;
fib_p
=
fib_pool
;
while
(
hw_fib_p
<
&
hw_fib_pool
[
num
])
{
if
(
*
hw_fib_p
)
kfree
(
*
hw_fib_p
);
if
(
*
fib_p
)
kfree
(
*
fib_p
);
++
fib_p
;
++
hw_fib_p
;
}
if
(
hw_fib_pool
)
kfree
(
hw_fib_pool
);
if
(
fib_pool
)
kfree
(
fib_pool
);
}
spin_lock_irqsave
(
queues
->
queue
[
HostNormCmdQueue
].
lock
,
flags
);
kfree
(
fib
);
spin_lock_irqsave
(
dev
->
queues
->
queue
[
HostNormCmdQueue
].
lock
,
flags
);
}
/*
* There are no more AIF's
*/
spin_unlock_irqrestore
(
queues
->
queue
[
HostNormCmdQueue
].
lock
,
flags
);
spin_unlock_irqrestore
(
dev
->
queues
->
queue
[
HostNormCmdQueue
].
lock
,
flags
);
schedule
();
if
(
signal_pending
(
current
))
break
;
set_current_state
(
TASK_INTERRUPTIBLE
);
}
remove_wait_queue
(
&
queues
->
queue
[
HostNormCmdQueue
].
cmdready
,
&
wait
);
if
(
dev
->
queues
)
remove_wait_queue
(
&
dev
->
queues
->
queue
[
HostNormCmdQueue
].
cmdready
,
&
wait
);
dev
->
aif_thread
=
0
;
complete_and_exit
(
&
dev
->
aif_completion
,
0
);
return
0
;
}
drivers/scsi/aacraid/linit.c
View file @
7d6322b4
...
...
@@ -748,7 +748,8 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
unique_id
++
;
}
if
(
pci_enable_device
(
pdev
))
error
=
pci_enable_device
(
pdev
);
if
(
error
)
goto
out
;
if
(
pci_set_dma_mask
(
pdev
,
0xFFFFFFFFULL
)
||
...
...
@@ -772,6 +773,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
shost
->
irq
=
pdev
->
irq
;
shost
->
base
=
pci_resource_start
(
pdev
,
0
);
shost
->
unique_id
=
unique_id
;
shost
->
max_cmd_len
=
16
;
aac
=
(
struct
aac_dev
*
)
shost
->
hostdata
;
aac
->
scsi_host_ptr
=
shost
;
...
...
@@ -799,7 +801,9 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
goto
out_free_fibs
;
aac
->
maximum_num_channels
=
aac_drivers
[
index
].
channels
;
aac_get_adapter_info
(
aac
);
error
=
aac_get_adapter_info
(
aac
);
if
(
error
<
0
)
goto
out_deinit
;
/*
* Lets override negotiations and drop the maximum SG limit to 34
...
...
@@ -927,8 +931,8 @@ static int __init aac_init(void)
printk
(
KERN_INFO
"Adaptec %s driver (%s)
\n
"
,
AAC_DRIVERNAME
,
aac_driver_version
);
error
=
pci_
module_init
(
&
aac_pci_driver
);
if
(
error
)
error
=
pci_
register_driver
(
&
aac_pci_driver
);
if
(
error
<
0
)
return
error
;
aac_cfg_major
=
register_chrdev
(
0
,
"aac"
,
&
aac_cfg_fops
);
...
...
drivers/scsi/aic7xxx/aic7770_osm.c
View file @
7d6322b4
...
...
@@ -112,6 +112,9 @@ aic7770_remove(struct device *dev)
struct
ahc_softc
*
ahc
=
dev_get_drvdata
(
dev
);
u_long
s
;
if
(
ahc
->
platform_data
&&
ahc
->
platform_data
->
host
)
scsi_remove_host
(
ahc
->
platform_data
->
host
);
ahc_lock
(
ahc
,
&
s
);
ahc_intr_enable
(
ahc
,
FALSE
);
ahc_unlock
(
ahc
,
&
s
);
...
...
drivers/scsi/aic7xxx/aic79xx_osm.c
View file @
7d6322b4
...
...
@@ -1192,11 +1192,6 @@ ahd_platform_free(struct ahd_softc *ahd)
int
i
,
j
;
if
(
ahd
->
platform_data
!=
NULL
)
{
if
(
ahd
->
platform_data
->
host
!=
NULL
)
{
scsi_remove_host
(
ahd
->
platform_data
->
host
);
scsi_host_put
(
ahd
->
platform_data
->
host
);
}
/* destroy all of the device and target objects */
for
(
i
=
0
;
i
<
AHD_NUM_TARGETS
;
i
++
)
{
starget
=
ahd
->
platform_data
->
starget
[
i
];
...
...
@@ -1226,6 +1221,9 @@ ahd_platform_free(struct ahd_softc *ahd)
release_mem_region
(
ahd
->
platform_data
->
mem_busaddr
,
0x1000
);
}
if
(
ahd
->
platform_data
->
host
)
scsi_host_put
(
ahd
->
platform_data
->
host
);
free
(
ahd
->
platform_data
,
M_DEVBUF
);
}
}
...
...
drivers/scsi/aic7xxx/aic79xx_osm_pci.c
View file @
7d6322b4
...
...
@@ -95,6 +95,9 @@ ahd_linux_pci_dev_remove(struct pci_dev *pdev)
struct
ahd_softc
*
ahd
=
pci_get_drvdata
(
pdev
);
u_long
s
;
if
(
ahd
->
platform_data
&&
ahd
->
platform_data
->
host
)
scsi_remove_host
(
ahd
->
platform_data
->
host
);
ahd_lock
(
ahd
,
&
s
);
ahd_intr_enable
(
ahd
,
FALSE
);
ahd_unlock
(
ahd
,
&
s
);
...
...
drivers/scsi/aic7xxx/aic7xxx_osm.c
View file @
7d6322b4
...
...
@@ -1209,11 +1209,6 @@ ahc_platform_free(struct ahc_softc *ahc)
int
i
,
j
;
if
(
ahc
->
platform_data
!=
NULL
)
{
if
(
ahc
->
platform_data
->
host
!=
NULL
)
{
scsi_remove_host
(
ahc
->
platform_data
->
host
);
scsi_host_put
(
ahc
->
platform_data
->
host
);
}
/* destroy all of the device and target objects */
for
(
i
=
0
;
i
<
AHC_NUM_TARGETS
;
i
++
)
{
starget
=
ahc
->
platform_data
->
starget
[
i
];
...
...
@@ -1242,6 +1237,9 @@ ahc_platform_free(struct ahc_softc *ahc)
0x1000
);
}
if
(
ahc
->
platform_data
->
host
)
scsi_host_put
(
ahc
->
platform_data
->
host
);
free
(
ahc
->
platform_data
,
M_DEVBUF
);
}
}
...
...
drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
View file @
7d6322b4
...
...
@@ -143,6 +143,9 @@ ahc_linux_pci_dev_remove(struct pci_dev *pdev)
struct
ahc_softc
*
ahc
=
pci_get_drvdata
(
pdev
);
u_long
s
;
if
(
ahc
->
platform_data
&&
ahc
->
platform_data
->
host
)
scsi_remove_host
(
ahc
->
platform_data
->
host
);
ahc_lock
(
ahc
,
&
s
);
ahc_intr_enable
(
ahc
,
FALSE
);
ahc_unlock
(
ahc
,
&
s
);
...
...
drivers/scsi/hosts.c
View file @
7d6322b4
...
...
@@ -176,6 +176,7 @@ void scsi_remove_host(struct Scsi_Host *shost)
transport_unregister_device
(
&
shost
->
shost_gendev
);
class_device_unregister
(
&
shost
->
shost_classdev
);
device_del
(
&
shost
->
shost_gendev
);
scsi_proc_hostdir_rm
(
shost
->
hostt
);
}
EXPORT_SYMBOL
(
scsi_remove_host
);
...
...
@@ -262,7 +263,6 @@ static void scsi_host_dev_release(struct device *dev)
if
(
shost
->
work_q
)
destroy_workqueue
(
shost
->
work_q
);
scsi_proc_hostdir_rm
(
shost
->
hostt
);
scsi_destroy_command_freelist
(
shost
);
kfree
(
shost
->
shost_data
);
...
...
drivers/scsi/lpfc/lpfc_attr.c
View file @
7d6322b4
...
...
@@ -973,10 +973,10 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost)
if
((
phba
->
fc_flag
&
FC_FABRIC
)
||
((
phba
->
fc_topology
==
TOPOLOGY_LOOP
)
&&
(
phba
->
fc_flag
&
FC_PUBLIC_LOOP
)))
node_name
=
wwn_to_u64
(
phba
->
fc_fabparam
.
nodeName
.
wwn
);
node_name
=
wwn_to_u64
(
phba
->
fc_fabparam
.
nodeName
.
u
.
wwn
);
else
/* fabric is local port if there is no F/FL_Port */
node_name
=
wwn_to_u64
(
phba
->
fc_nodename
.
wwn
);
node_name
=
wwn_to_u64
(
phba
->
fc_nodename
.
u
.
wwn
);
spin_unlock_irq
(
shost
->
host_lock
);
...
...
@@ -1110,7 +1110,7 @@ lpfc_get_starget_node_name(struct scsi_target *starget)
/* Search the mapped list for this target ID */
list_for_each_entry
(
ndlp
,
&
phba
->
fc_nlpmap_list
,
nlp_listp
)
{
if
(
starget
->
id
==
ndlp
->
nlp_sid
)
{
node_name
=
wwn_to_u64
(
ndlp
->
nlp_nodename
.
wwn
);
node_name
=
wwn_to_u64
(
ndlp
->
nlp_nodename
.
u
.
wwn
);
break
;
}
}
...
...
@@ -1131,7 +1131,7 @@ lpfc_get_starget_port_name(struct scsi_target *starget)
/* Search the mapped list for this target ID */
list_for_each_entry
(
ndlp
,
&
phba
->
fc_nlpmap_list
,
nlp_listp
)
{
if
(
starget
->
id
==
ndlp
->
nlp_sid
)
{
port_name
=
wwn_to_u64
(
ndlp
->
nlp_portname
.
wwn
);
port_name
=
wwn_to_u64
(
ndlp
->
nlp_portname
.
u
.
wwn
);
break
;
}
}
...
...
drivers/scsi/lpfc/lpfc_hbadisc.c
View file @
7d6322b4
...
...
@@ -1019,8 +1019,8 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
struct
fc_rport_identifiers
rport_ids
;
/* Remote port has reappeared. Re-register w/ FC transport */
rport_ids
.
node_name
=
wwn_to_u64
(
ndlp
->
nlp_nodename
.
wwn
);
rport_ids
.
port_name
=
wwn_to_u64
(
ndlp
->
nlp_portname
.
wwn
);
rport_ids
.
node_name
=
wwn_to_u64
(
ndlp
->
nlp_nodename
.
u
.
wwn
);
rport_ids
.
port_name
=
wwn_to_u64
(
ndlp
->
nlp_portname
.
u
.
wwn
);
rport_ids
.
port_id
=
ndlp
->
nlp_DID
;
rport_ids
.
roles
=
FC_RPORT_ROLE_UNKNOWN
;
if
(
ndlp
->
nlp_type
&
NLP_FCP_TARGET
)
...
...
drivers/scsi/lpfc/lpfc_hw.h
View file @
7d6322b4
...
...
@@ -280,9 +280,9 @@ struct lpfc_name {
#define NAME_CCITT_GR_TYPE 0xE
uint8_t
IEEEextLsb
;
/* FC Word 0, bit 16:23, IEEE extended Lsb */
uint8_t
IEEE
[
6
];
/* FC IEEE address */
};
}
s
;
uint8_t
wwn
[
8
];
};
}
u
;
};
struct
csp
{
...
...
drivers/scsi/lpfc/lpfc_init.c
View file @
7d6322b4
...
...
@@ -285,7 +285,7 @@ lpfc_config_port_post(struct lpfc_hba * phba)
if
(
phba
->
SerialNumber
[
0
]
==
0
)
{
uint8_t
*
outptr
;
outptr
=
(
uint8_t
*
)
&
phba
->
fc_nodename
.
IEEE
[
0
];
outptr
=
&
phba
->
fc_nodename
.
u
.
s
.
IEEE
[
0
];
for
(
i
=
0
;
i
<
12
;
i
++
)
{
status
=
*
outptr
++
;
j
=
((
status
&
0xf0
)
>>
4
);
...
...
@@ -1523,8 +1523,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
* Must done after lpfc_sli_hba_setup()
*/
fc_host_node_name
(
host
)
=
wwn_to_u64
(
phba
->
fc_nodename
.
wwn
);
fc_host_port_name
(
host
)
=
wwn_to_u64
(
phba
->
fc_portname
.
wwn
);
fc_host_node_name
(
host
)
=
wwn_to_u64
(
phba
->
fc_nodename
.
u
.
wwn
);
fc_host_port_name
(
host
)
=
wwn_to_u64
(
phba
->
fc_portname
.
u
.
wwn
);
fc_host_supported_classes
(
host
)
=
FC_COS_CLASS3
;
memset
(
fc_host_supported_fc4s
(
host
),
0
,
...
...
drivers/scsi/megaraid.c
View file @
7d6322b4
...
...
@@ -621,8 +621,6 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
if
(
islogical
)
{
switch
(
cmd
->
cmnd
[
0
])
{
case
TEST_UNIT_READY
:
memset
(
cmd
->
request_buffer
,
0
,
cmd
->
request_bufflen
);
#if MEGA_HAVE_CLUSTERING
/*
* Do we support clustering and is the support enabled
...
...
@@ -652,11 +650,28 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
return
NULL
;
#endif
case
MODE_SENSE
:
case
MODE_SENSE
:
{
char
*
buf
;
if
(
cmd
->
use_sg
)
{
struct
scatterlist
*
sg
;
sg
=
(
struct
scatterlist
*
)
cmd
->
request_buffer
;
buf
=
kmap_atomic
(
sg
->
page
,
KM_IRQ0
)
+
sg
->
offset
;
}
else
buf
=
cmd
->
request_buffer
;
memset
(
cmd
->
request_buffer
,
0
,
cmd
->
cmnd
[
4
]);
if
(
cmd
->
use_sg
)
{
struct
scatterlist
*
sg
;
sg
=
(
struct
scatterlist
*
)
cmd
->
request_buffer
;
kunmap_atomic
(
buf
-
sg
->
offset
,
KM_IRQ0
);
}
cmd
->
result
=
(
DID_OK
<<
16
);
cmd
->
scsi_done
(
cmd
);
return
NULL
;
}
case
READ_CAPACITY
:
case
INQUIRY
:
...
...
@@ -1685,14 +1700,23 @@ mega_rundoneq (adapter_t *adapter)
static
void
mega_free_scb
(
adapter_t
*
adapter
,
scb_t
*
scb
)
{
unsigned
long
length
;
switch
(
scb
->
dma_type
)
{
case
MEGA_DMA_TYPE_NONE
:
break
;
case
MEGA_BULK_DATA
:
if
(
scb
->
cmd
->
use_sg
==
0
)
length
=
scb
->
cmd
->
request_bufflen
;
else
{
struct
scatterlist
*
sgl
=
(
struct
scatterlist
*
)
scb
->
cmd
->
request_buffer
;
length
=
sgl
->
length
;
}
pci_unmap_page
(
adapter
->
dev
,
scb
->
dma_h_bulkdata
,
scb
->
cmd
->
request_bufflen
,
scb
->
dma_direction
);
length
,
scb
->
dma_direction
);
break
;
case
MEGA_SGLIST
:
...
...
@@ -1741,6 +1765,7 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
struct
scatterlist
*
sgl
;
struct
page
*
page
;
unsigned
long
offset
;
unsigned
int
length
;
Scsi_Cmnd
*
cmd
;
int
sgcnt
;
int
idx
;
...
...
@@ -1748,14 +1773,23 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
cmd
=
scb
->
cmd
;
/* Scatter-gather not used */
if
(
!
cmd
->
use_sg
)
{
page
=
virt_to_page
(
cmd
->
request_buffer
);
offset
=
offset_in_page
(
cmd
->
request_buffer
);
if
(
cmd
->
use_sg
==
0
||
(
cmd
->
use_sg
==
1
&&
!
adapter
->
has_64bit_addr
))
{
if
(
cmd
->
use_sg
==
0
)
{
page
=
virt_to_page
(
cmd
->
request_buffer
);
offset
=
offset_in_page
(
cmd
->
request_buffer
);
length
=
cmd
->
request_bufflen
;
}
else
{
sgl
=
(
struct
scatterlist
*
)
cmd
->
request_buffer
;
page
=
sgl
->
page
;
offset
=
sgl
->
offset
;
length
=
sgl
->
length
;
}
scb
->
dma_h_bulkdata
=
pci_map_page
(
adapter
->
dev
,
page
,
offset
,
cmd
->
request_bufflen
,
length
,
scb
->
dma_direction
);
scb
->
dma_type
=
MEGA_BULK_DATA
;
...
...
@@ -1765,14 +1799,14 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
*/
if
(
adapter
->
has_64bit_addr
)
{
scb
->
sgl64
[
0
].
address
=
scb
->
dma_h_bulkdata
;
scb
->
sgl64
[
0
].
length
=
cmd
->
request_bufflen
;
scb
->
sgl64
[
0
].
length
=
length
;
*
buf
=
(
u32
)
scb
->
sgl_dma_addr
;
*
len
=
(
u32
)
cmd
->
request_bufflen
;
*
len
=
(
u32
)
length
;
return
1
;
}
else
{
*
buf
=
(
u32
)
scb
->
dma_h_bulkdata
;
*
len
=
(
u32
)
cmd
->
request_bufflen
;
*
len
=
(
u32
)
length
;
}
return
0
;
}
...
...
@@ -1791,27 +1825,23 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
if
(
sgcnt
>
adapter
->
sglen
)
BUG
();
*
len
=
0
;
for
(
idx
=
0
;
idx
<
sgcnt
;
idx
++
,
sgl
++
)
{
if
(
adapter
->
has_64bit_addr
)
{
scb
->
sgl64
[
idx
].
address
=
sg_dma_address
(
sgl
);
scb
->
sgl64
[
idx
].
length
=
sg_dma_len
(
sgl
);
*
len
+=
scb
->
sgl64
[
idx
].
length
=
sg_dma_len
(
sgl
);
}
else
{
scb
->
sgl
[
idx
].
address
=
sg_dma_address
(
sgl
);
scb
->
sgl
[
idx
].
length
=
sg_dma_len
(
sgl
);
*
len
+=
scb
->
sgl
[
idx
].
length
=
sg_dma_len
(
sgl
);
}
}
/* Reset pointer and length fields */
*
buf
=
scb
->
sgl_dma_addr
;
/*
* For passthru command, dataxferlen must be set, even for commands
* with a sg list
*/
*
len
=
(
u32
)
cmd
->
request_bufflen
;
/* Return count of SG requests */
return
sgcnt
;
}
...
...
drivers/scsi/megaraid/Kconfig.megaraid
View file @
7d6322b4
...
...
@@ -76,3 +76,12 @@ config MEGARAID_LEGACY
To compile this driver as a module, choose M here: the
module will be called megaraid
endif
config MEGARAID_SAS
tristate "LSI Logic MegaRAID SAS RAID Module"
depends on PCI && SCSI
help
Module for LSI Logic's SAS based RAID controllers.
To compile this driver as a module, choose 'm' here.
Module will be called megaraid_sas
drivers/scsi/megaraid/Makefile
View file @
7d6322b4
obj-$(CONFIG_MEGARAID_MM)
+=
megaraid_mm.o
obj-$(CONFIG_MEGARAID_MAILBOX)
+=
megaraid_mbox.o
obj-$(CONFIG_MEGARAID_SAS)
+=
megaraid_sas.o
drivers/scsi/megaraid/megaraid_sas.c
0 → 100644
View file @
7d6322b4
/*
*
* Linux MegaRAID driver for SAS based RAID controllers
*
* Copyright (c) 2003-2005 LSI Logic Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* FILE : megaraid_sas.c
* Version : v00.00.02.00-rc4
*
* Authors:
* Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com>
* Sumant Patro <Sumant.Patro@lsil.com>
*
* List of supported controllers
*
* OEM Product Name VID DID SSVID SSID
* --- ------------ --- --- ---- ----
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/list.h>
#include <linux/version.h>
#include <linux/moduleparam.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/uio.h>
#include <asm/uaccess.h>
#include <linux/compat.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include "megaraid_sas.h"
MODULE_LICENSE
(
"GPL"
);
MODULE_VERSION
(
MEGASAS_VERSION
);
MODULE_AUTHOR
(
"sreenivas.bagalkote@lsil.com"
);
MODULE_DESCRIPTION
(
"LSI Logic MegaRAID SAS Driver"
);
/*
* PCI ID table for all supported controllers
*/
static
struct
pci_device_id
megasas_pci_table
[]
=
{
{
PCI_VENDOR_ID_LSI_LOGIC
,
PCI_DEVICE_ID_LSI_SAS1064R
,
PCI_ANY_ID
,
PCI_ANY_ID
,
},
{
PCI_VENDOR_ID_DELL
,
PCI_DEVICE_ID_DELL_PERC5
,
PCI_ANY_ID
,
PCI_ANY_ID
,
},
{
0
}
/* Terminating entry */
};
MODULE_DEVICE_TABLE
(
pci
,
megasas_pci_table
);
static
int
megasas_mgmt_majorno
;
static
struct
megasas_mgmt_info
megasas_mgmt_info
;
static
struct
fasync_struct
*
megasas_async_queue
;
static
DECLARE_MUTEX
(
megasas_async_queue_mutex
);
/**
* megasas_get_cmd - Get a command from the free pool
* @instance: Adapter soft state
*
* Returns a free command from the pool
*/
static
inline
struct
megasas_cmd
*
megasas_get_cmd
(
struct
megasas_instance
*
instance
)
{
unsigned
long
flags
;
struct
megasas_cmd
*
cmd
=
NULL
;
spin_lock_irqsave
(
&
instance
->
cmd_pool_lock
,
flags
);
if
(
!
list_empty
(
&
instance
->
cmd_pool
))
{
cmd
=
list_entry
((
&
instance
->
cmd_pool
)
->
next
,
struct
megasas_cmd
,
list
);
list_del_init
(
&
cmd
->
list
);
}
else
{
printk
(
KERN_ERR
"megasas: Command pool empty!
\n
"
);
}
spin_unlock_irqrestore
(
&
instance
->
cmd_pool_lock
,
flags
);
return
cmd
;
}
/**
* megasas_return_cmd - Return a cmd to free command pool
* @instance: Adapter soft state
* @cmd: Command packet to be returned to free command pool
*/
static
inline
void
megasas_return_cmd
(
struct
megasas_instance
*
instance
,
struct
megasas_cmd
*
cmd
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
instance
->
cmd_pool_lock
,
flags
);
cmd
->
scmd
=
NULL
;
list_add_tail
(
&
cmd
->
list
,
&
instance
->
cmd_pool
);
spin_unlock_irqrestore
(
&
instance
->
cmd_pool_lock
,
flags
);
}
/**
* megasas_enable_intr - Enables interrupts
* @regs: MFI register set
*/
static
inline
void
megasas_enable_intr
(
struct
megasas_register_set
__iomem
*
regs
)
{
writel
(
1
,
&
(
regs
)
->
outbound_intr_mask
);
/* Dummy readl to force pci flush */
readl
(
&
regs
->
outbound_intr_mask
);
}
/**
* megasas_disable_intr - Disables interrupts
* @regs: MFI register set
*/
static
inline
void
megasas_disable_intr
(
struct
megasas_register_set
__iomem
*
regs
)
{
u32
mask
=
readl
(
&
regs
->
outbound_intr_mask
)
&
(
~
0x00000001
);
writel
(
mask
,
&
regs
->
outbound_intr_mask
);
/* Dummy readl to force pci flush */
readl
(
&
regs
->
outbound_intr_mask
);
}
/**
* megasas_issue_polled - Issues a polling command
* @instance: Adapter soft state
* @cmd: Command packet to be issued
*
* For polling, MFI requires the cmd_status to be set to 0xFF before posting.
*/
static
int
megasas_issue_polled
(
struct
megasas_instance
*
instance
,
struct
megasas_cmd
*
cmd
)
{
int
i
;
u32
msecs
=
MFI_POLL_TIMEOUT_SECS
*
1000
;
struct
megasas_header
*
frame_hdr
=
&
cmd
->
frame
->
hdr
;
frame_hdr
->
cmd_status
=
0xFF
;
frame_hdr
->
flags
|=
MFI_FRAME_DONT_POST_IN_REPLY_QUEUE
;
/*
* Issue the frame using inbound queue port
*/
writel
(
cmd
->
frame_phys_addr
>>
3
,
&
instance
->
reg_set
->
inbound_queue_port
);
/*
* Wait for cmd_status to change
*/
for
(
i
=
0
;
(
i
<
msecs
)
&&
(
frame_hdr
->
cmd_status
==
0xff
);
i
++
)
{
rmb
();
msleep
(
1
);
}
if
(
frame_hdr
->
cmd_status
==
0xff
)
return
-
ETIME
;
return
0
;
}
/**
* megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
* @instance: Adapter soft state
* @cmd: Command to be issued
*
* This function waits on an event for the command to be returned from ISR.
* Used to issue ioctl commands.
*/
static
int
megasas_issue_blocked_cmd
(
struct
megasas_instance
*
instance
,
struct
megasas_cmd
*
cmd
)
{
cmd
->
cmd_status
=
ENODATA
;
writel
(
cmd
->
frame_phys_addr
>>
3
,
&
instance
->
reg_set
->
inbound_queue_port
);
wait_event
(
instance
->
int_cmd_wait_q
,
(
cmd
->
cmd_status
!=
ENODATA
));
return
0
;
}
/**
* megasas_issue_blocked_abort_cmd - Aborts previously issued cmd
* @instance: Adapter soft state
* @cmd_to_abort: Previously issued cmd to be aborted
*
* MFI firmware can abort previously issued AEN comamnd (automatic event
* notification). The megasas_issue_blocked_abort_cmd() issues such abort
* cmd and blocks till it is completed.
*/
static
int
megasas_issue_blocked_abort_cmd
(
struct
megasas_instance
*
instance
,
struct
megasas_cmd
*
cmd_to_abort
)
{
struct
megasas_cmd
*
cmd
;
struct
megasas_abort_frame
*
abort_fr
;
cmd
=
megasas_get_cmd
(
instance
);
if
(
!
cmd
)
return
-
1
;
abort_fr
=
&
cmd
->
frame
->
abort
;
/*
* Prepare and issue the abort frame
*/
abort_fr
->
cmd
=
MFI_CMD_ABORT
;
abort_fr
->
cmd_status
=
0xFF
;
abort_fr
->
flags
=
0
;
abort_fr
->
abort_context
=
cmd_to_abort
->
index
;
abort_fr
->
abort_mfi_phys_addr_lo
=
cmd_to_abort
->
frame_phys_addr
;
abort_fr
->
abort_mfi_phys_addr_hi
=
0
;
cmd
->
sync_cmd
=
1
;
cmd
->
cmd_status
=
0xFF
;
writel
(
cmd
->
frame_phys_addr
>>
3
,
&
instance
->
reg_set
->
inbound_queue_port
);
/*
* Wait for this cmd to complete
*/
wait_event
(
instance
->
abort_cmd_wait_q
,
(
cmd
->
cmd_status
!=
0xFF
));
megasas_return_cmd
(
instance
,
cmd
);
return
0
;
}
/**
* megasas_make_sgl32 - Prepares 32-bit SGL
* @instance: Adapter soft state
* @scp: SCSI command from the mid-layer
* @mfi_sgl: SGL to be filled in
*
* If successful, this function returns the number of SG elements. Otherwise,
* it returnes -1.
*/
static
inline
int
megasas_make_sgl32
(
struct
megasas_instance
*
instance
,
struct
scsi_cmnd
*
scp
,
union
megasas_sgl
*
mfi_sgl
)
{
int
i
;
int
sge_count
;
struct
scatterlist
*
os_sgl
;
/*
* Return 0 if there is no data transfer
*/
if
(
!
scp
->
request_buffer
||
!
scp
->
request_bufflen
)
return
0
;
if
(
!
scp
->
use_sg
)
{
mfi_sgl
->
sge32
[
0
].
phys_addr
=
pci_map_single
(
instance
->
pdev
,
scp
->
request_buffer
,
scp
->
request_bufflen
,
scp
->
sc_data_direction
);
mfi_sgl
->
sge32
[
0
].
length
=
scp
->
request_bufflen
;
return
1
;
}
os_sgl
=
(
struct
scatterlist
*
)
scp
->
request_buffer
;
sge_count
=
pci_map_sg
(
instance
->
pdev
,
os_sgl
,
scp
->
use_sg
,
scp
->
sc_data_direction
);
for
(
i
=
0
;
i
<
sge_count
;
i
++
,
os_sgl
++
)
{
mfi_sgl
->
sge32
[
i
].
length
=
sg_dma_len
(
os_sgl
);
mfi_sgl
->
sge32
[
i
].
phys_addr
=
sg_dma_address
(
os_sgl
);
}
return
sge_count
;
}
/**
* megasas_make_sgl64 - Prepares 64-bit SGL
* @instance: Adapter soft state
* @scp: SCSI command from the mid-layer
* @mfi_sgl: SGL to be filled in
*
* If successful, this function returns the number of SG elements. Otherwise,
* it returnes -1.
*/
static
inline
int
megasas_make_sgl64
(
struct
megasas_instance
*
instance
,
struct
scsi_cmnd
*
scp
,
union
megasas_sgl
*
mfi_sgl
)
{
int
i
;
int
sge_count
;
struct
scatterlist
*
os_sgl
;
/*
* Return 0 if there is no data transfer
*/
if
(
!
scp
->
request_buffer
||
!
scp
->
request_bufflen
)
return
0
;
if
(
!
scp
->
use_sg
)
{
mfi_sgl
->
sge64
[
0
].
phys_addr
=
pci_map_single
(
instance
->
pdev
,
scp
->
request_buffer
,
scp
->
request_bufflen
,
scp
->
sc_data_direction
);
mfi_sgl
->
sge64
[
0
].
length
=
scp
->
request_bufflen
;
return
1
;
}
os_sgl
=
(
struct
scatterlist
*
)
scp
->
request_buffer
;
sge_count
=
pci_map_sg
(
instance
->
pdev
,
os_sgl
,
scp
->
use_sg
,
scp
->
sc_data_direction
);
for
(
i
=
0
;
i
<
sge_count
;
i
++
,
os_sgl
++
)
{
mfi_sgl
->
sge64
[
i
].
length
=
sg_dma_len
(
os_sgl
);
mfi_sgl
->
sge64
[
i
].
phys_addr
=
sg_dma_address
(
os_sgl
);
}
return
sge_count
;
}
/**
* megasas_build_dcdb - Prepares a direct cdb (DCDB) command
* @instance: Adapter soft state
* @scp: SCSI command
* @cmd: Command to be prepared in
*
* This function prepares CDB commands. These are typcially pass-through
* commands to the devices.
*/
static
inline
int
megasas_build_dcdb
(
struct
megasas_instance
*
instance
,
struct
scsi_cmnd
*
scp
,
struct
megasas_cmd
*
cmd
)
{
u32
sge_sz
;
int
sge_bytes
;
u32
is_logical
;
u32
device_id
;
u16
flags
=
0
;
struct
megasas_pthru_frame
*
pthru
;
is_logical
=
MEGASAS_IS_LOGICAL
(
scp
);
device_id
=
MEGASAS_DEV_INDEX
(
instance
,
scp
);
pthru
=
(
struct
megasas_pthru_frame
*
)
cmd
->
frame
;
if
(
scp
->
sc_data_direction
==
PCI_DMA_TODEVICE
)
flags
=
MFI_FRAME_DIR_WRITE
;
else
if
(
scp
->
sc_data_direction
==
PCI_DMA_FROMDEVICE
)
flags
=
MFI_FRAME_DIR_READ
;
else
if
(
scp
->
sc_data_direction
==
PCI_DMA_NONE
)
flags
=
MFI_FRAME_DIR_NONE
;
/*
* Prepare the DCDB frame
*/
pthru
->
cmd
=
(
is_logical
)
?
MFI_CMD_LD_SCSI_IO
:
MFI_CMD_PD_SCSI_IO
;
pthru
->
cmd_status
=
0x0
;
pthru
->
scsi_status
=
0x0
;
pthru
->
target_id
=
device_id
;
pthru
->
lun
=
scp
->
device
->
lun
;
pthru
->
cdb_len
=
scp
->
cmd_len
;
pthru
->
timeout
=
0
;
pthru
->
flags
=
flags
;
pthru
->
data_xfer_len
=
scp
->
request_bufflen
;
memcpy
(
pthru
->
cdb
,
scp
->
cmnd
,
scp
->
cmd_len
);
/*
* Construct SGL
*/
sge_sz
=
(
IS_DMA64
)
?
sizeof
(
struct
megasas_sge64
)
:
sizeof
(
struct
megasas_sge32
);
if
(
IS_DMA64
)
{
pthru
->
flags
|=
MFI_FRAME_SGL64
;
pthru
->
sge_count
=
megasas_make_sgl64
(
instance
,
scp
,
&
pthru
->
sgl
);
}
else
pthru
->
sge_count
=
megasas_make_sgl32
(
instance
,
scp
,
&
pthru
->
sgl
);
/*
* Sense info specific
*/
pthru
->
sense_len
=
SCSI_SENSE_BUFFERSIZE
;
pthru
->
sense_buf_phys_addr_hi
=
0
;
pthru
->
sense_buf_phys_addr_lo
=
cmd
->
sense_phys_addr
;
sge_bytes
=
sge_sz
*
pthru
->
sge_count
;
/*
* Compute the total number of frames this command consumes. FW uses
* this number to pull sufficient number of frames from host memory.
*/
cmd
->
frame_count
=
(
sge_bytes
/
MEGAMFI_FRAME_SIZE
)
+
((
sge_bytes
%
MEGAMFI_FRAME_SIZE
)
?
1
:
0
)
+
1
;
if
(
cmd
->
frame_count
>
7
)
cmd
->
frame_count
=
8
;
return
cmd
->
frame_count
;
}
/**
* megasas_build_ldio - Prepares IOs to logical devices
* @instance: Adapter soft state
* @scp: SCSI command
* @cmd: Command to to be prepared
*
* Frames (and accompanying SGLs) for regular SCSI IOs use this function.
*/
static
inline
int
megasas_build_ldio
(
struct
megasas_instance
*
instance
,
struct
scsi_cmnd
*
scp
,
struct
megasas_cmd
*
cmd
)
{
u32
sge_sz
;
int
sge_bytes
;
u32
device_id
;
u8
sc
=
scp
->
cmnd
[
0
];
u16
flags
=
0
;
struct
megasas_io_frame
*
ldio
;
device_id
=
MEGASAS_DEV_INDEX
(
instance
,
scp
);
ldio
=
(
struct
megasas_io_frame
*
)
cmd
->
frame
;
if
(
scp
->
sc_data_direction
==
PCI_DMA_TODEVICE
)
flags
=
MFI_FRAME_DIR_WRITE
;
else
if
(
scp
->
sc_data_direction
==
PCI_DMA_FROMDEVICE
)
flags
=
MFI_FRAME_DIR_READ
;
/*
* Preare the Logical IO frame: 2nd bit is zero for all read cmds
*/
ldio
->
cmd
=
(
sc
&
0x02
)
?
MFI_CMD_LD_WRITE
:
MFI_CMD_LD_READ
;
ldio
->
cmd_status
=
0x0
;
ldio
->
scsi_status
=
0x0
;
ldio
->
target_id
=
device_id
;
ldio
->
timeout
=
0
;
ldio
->
reserved_0
=
0
;
ldio
->
pad_0
=
0
;
ldio
->
flags
=
flags
;
ldio
->
start_lba_hi
=
0
;
ldio
->
access_byte
=
(
scp
->
cmd_len
!=
6
)
?
scp
->
cmnd
[
1
]
:
0
;
/*
* 6-byte READ(0x08) or WRITE(0x0A) cdb
*/
if
(
scp
->
cmd_len
==
6
)
{
ldio
->
lba_count
=
(
u32
)
scp
->
cmnd
[
4
];
ldio
->
start_lba_lo
=
((
u32
)
scp
->
cmnd
[
1
]
<<
16
)
|
((
u32
)
scp
->
cmnd
[
2
]
<<
8
)
|
(
u32
)
scp
->
cmnd
[
3
];
ldio
->
start_lba_lo
&=
0x1FFFFF
;
}
/*
* 10-byte READ(0x28) or WRITE(0x2A) cdb
*/
else
if
(
scp
->
cmd_len
==
10
)
{
ldio
->
lba_count
=
(
u32
)
scp
->
cmnd
[
8
]
|
((
u32
)
scp
->
cmnd
[
7
]
<<
8
);
ldio
->
start_lba_lo
=
((
u32
)
scp
->
cmnd
[
2
]
<<
24
)
|
((
u32
)
scp
->
cmnd
[
3
]
<<
16
)
|
((
u32
)
scp
->
cmnd
[
4
]
<<
8
)
|
(
u32
)
scp
->
cmnd
[
5
];
}
/*
* 12-byte READ(0xA8) or WRITE(0xAA) cdb
*/
else
if
(
scp
->
cmd_len
==
12
)
{
ldio
->
lba_count
=
((
u32
)
scp
->
cmnd
[
6
]
<<
24
)
|
((
u32
)
scp
->
cmnd
[
7
]
<<
16
)
|
((
u32
)
scp
->
cmnd
[
8
]
<<
8
)
|
(
u32
)
scp
->
cmnd
[
9
];
ldio
->
start_lba_lo
=
((
u32
)
scp
->
cmnd
[
2
]
<<
24
)
|
((
u32
)
scp
->
cmnd
[
3
]
<<
16
)
|
((
u32
)
scp
->
cmnd
[
4
]
<<
8
)
|
(
u32
)
scp
->
cmnd
[
5
];
}
/*
* 16-byte READ(0x88) or WRITE(0x8A) cdb
*/
else
if
(
scp
->
cmd_len
==
16
)
{
ldio
->
lba_count
=
((
u32
)
scp
->
cmnd
[
10
]
<<
24
)
|
((
u32
)
scp
->
cmnd
[
11
]
<<
16
)
|
((
u32
)
scp
->
cmnd
[
12
]
<<
8
)
|
(
u32
)
scp
->
cmnd
[
13
];
ldio
->
start_lba_lo
=
((
u32
)
scp
->
cmnd
[
6
]
<<
24
)
|
((
u32
)
scp
->
cmnd
[
7
]
<<
16
)
|
((
u32
)
scp
->
cmnd
[
8
]
<<
8
)
|
(
u32
)
scp
->
cmnd
[
9
];
ldio
->
start_lba_hi
=
((
u32
)
scp
->
cmnd
[
2
]
<<
24
)
|
((
u32
)
scp
->
cmnd
[
3
]
<<
16
)
|
((
u32
)
scp
->
cmnd
[
4
]
<<
8
)
|
(
u32
)
scp
->
cmnd
[
5
];
}
/*
* Construct SGL
*/
sge_sz
=
(
IS_DMA64
)
?
sizeof
(
struct
megasas_sge64
)
:
sizeof
(
struct
megasas_sge32
);
if
(
IS_DMA64
)
{
ldio
->
flags
|=
MFI_FRAME_SGL64
;
ldio
->
sge_count
=
megasas_make_sgl64
(
instance
,
scp
,
&
ldio
->
sgl
);
}
else
ldio
->
sge_count
=
megasas_make_sgl32
(
instance
,
scp
,
&
ldio
->
sgl
);
/*
* Sense info specific
*/
ldio
->
sense_len
=
SCSI_SENSE_BUFFERSIZE
;
ldio
->
sense_buf_phys_addr_hi
=
0
;
ldio
->
sense_buf_phys_addr_lo
=
cmd
->
sense_phys_addr
;
sge_bytes
=
sge_sz
*
ldio
->
sge_count
;
cmd
->
frame_count
=
(
sge_bytes
/
MEGAMFI_FRAME_SIZE
)
+
((
sge_bytes
%
MEGAMFI_FRAME_SIZE
)
?
1
:
0
)
+
1
;
if
(
cmd
->
frame_count
>
7
)
cmd
->
frame_count
=
8
;
return
cmd
->
frame_count
;
}
/**
* megasas_build_cmd - Prepares a command packet
* @instance: Adapter soft state
* @scp: SCSI command
* @frame_count: [OUT] Number of frames used to prepare this command
*/
static
inline
struct
megasas_cmd
*
megasas_build_cmd
(
struct
megasas_instance
*
instance
,
struct
scsi_cmnd
*
scp
,
int
*
frame_count
)
{
u32
logical_cmd
;
struct
megasas_cmd
*
cmd
;
/*
* Find out if this is logical or physical drive command.
*/
logical_cmd
=
MEGASAS_IS_LOGICAL
(
scp
);
/*
* Logical drive command
*/
if
(
logical_cmd
)
{
if
(
scp
->
device
->
id
>=
MEGASAS_MAX_LD
)
{
scp
->
result
=
DID_BAD_TARGET
<<
16
;
return
NULL
;
}
switch
(
scp
->
cmnd
[
0
])
{
case
READ_10
:
case
WRITE_10
:
case
READ_12
:
case
WRITE_12
:
case
READ_6
:
case
WRITE_6
:
case
READ_16
:
case
WRITE_16
:
/*
* Fail for LUN > 0
*/
if
(
scp
->
device
->
lun
)
{
scp
->
result
=
DID_BAD_TARGET
<<
16
;
return
NULL
;
}
cmd
=
megasas_get_cmd
(
instance
);
if
(
!
cmd
)
{
scp
->
result
=
DID_IMM_RETRY
<<
16
;
return
NULL
;
}
*
frame_count
=
megasas_build_ldio
(
instance
,
scp
,
cmd
);
if
(
!
(
*
frame_count
))
{
megasas_return_cmd
(
instance
,
cmd
);
return
NULL
;
}
return
cmd
;
default:
/*
* Fail for LUN > 0
*/
if
(
scp
->
device
->
lun
)
{
scp
->
result
=
DID_BAD_TARGET
<<
16
;
return
NULL
;
}
cmd
=
megasas_get_cmd
(
instance
);
if
(
!
cmd
)
{
scp
->
result
=
DID_IMM_RETRY
<<
16
;
return
NULL
;
}
*
frame_count
=
megasas_build_dcdb
(
instance
,
scp
,
cmd
);
if
(
!
(
*
frame_count
))
{
megasas_return_cmd
(
instance
,
cmd
);
return
NULL
;
}
return
cmd
;
}
}
else
{
cmd
=
megasas_get_cmd
(
instance
);
if
(
!
cmd
)
{
scp
->
result
=
DID_IMM_RETRY
<<
16
;
return
NULL
;
}
*
frame_count
=
megasas_build_dcdb
(
instance
,
scp
,
cmd
);
if
(
!
(
*
frame_count
))
{
megasas_return_cmd
(
instance
,
cmd
);
return
NULL
;
}
return
cmd
;
}
return
NULL
;
}
/**
* megasas_queue_command - Queue entry point
* @scmd: SCSI command to be queued
* @done: Callback entry point
*/
static
int
megasas_queue_command
(
struct
scsi_cmnd
*
scmd
,
void
(
*
done
)
(
struct
scsi_cmnd
*
))
{
u32
frame_count
;
unsigned
long
flags
;
struct
megasas_cmd
*
cmd
;
struct
megasas_instance
*
instance
;
instance
=
(
struct
megasas_instance
*
)
scmd
->
device
->
host
->
hostdata
;
scmd
->
scsi_done
=
done
;
scmd
->
result
=
0
;
cmd
=
megasas_build_cmd
(
instance
,
scmd
,
&
frame_count
);
if
(
!
cmd
)
{
done
(
scmd
);
return
0
;
}
cmd
->
scmd
=
scmd
;
scmd
->
SCp
.
ptr
=
(
char
*
)
cmd
;
scmd
->
SCp
.
sent_command
=
jiffies
;
/*
* Issue the command to the FW
*/
spin_lock_irqsave
(
&
instance
->
instance_lock
,
flags
);
instance
->
fw_outstanding
++
;
spin_unlock_irqrestore
(
&
instance
->
instance_lock
,
flags
);
writel
(((
cmd
->
frame_phys_addr
>>
3
)
|
(
cmd
->
frame_count
-
1
)),
&
instance
->
reg_set
->
inbound_queue_port
);
return
0
;
}
/**
* megasas_wait_for_outstanding - Wait for all outstanding cmds
* @instance: Adapter soft state
*
* This function waits for upto MEGASAS_RESET_WAIT_TIME seconds for FW to
* complete all its outstanding commands. Returns error if one or more IOs
* are pending after this time period. It also marks the controller dead.
*/
static
int
megasas_wait_for_outstanding
(
struct
megasas_instance
*
instance
)
{
int
i
;
u32
wait_time
=
MEGASAS_RESET_WAIT_TIME
;
for
(
i
=
0
;
i
<
wait_time
;
i
++
)
{
if
(
!
instance
->
fw_outstanding
)
break
;
if
(
!
(
i
%
MEGASAS_RESET_NOTICE_INTERVAL
))
{
printk
(
KERN_NOTICE
"megasas: [%2d]waiting for %d "
"commands to complete
\n
"
,
i
,
instance
->
fw_outstanding
);
}
msleep
(
1000
);
}
if
(
instance
->
fw_outstanding
)
{
instance
->
hw_crit_error
=
1
;
return
FAILED
;
}
return
SUCCESS
;
}
/**
* megasas_generic_reset - Generic reset routine
* @scmd: Mid-layer SCSI command
*
* This routine implements a generic reset handler for device, bus and host
* reset requests. Device, bus and host specific reset handlers can use this
* function after they do their specific tasks.
*/
static
int
megasas_generic_reset
(
struct
scsi_cmnd
*
scmd
)
{
int
ret_val
;
struct
megasas_instance
*
instance
;
instance
=
(
struct
megasas_instance
*
)
scmd
->
device
->
host
->
hostdata
;
printk
(
KERN_NOTICE
"megasas: RESET -%ld cmd=%x <c=%d t=%d l=%d>
\n
"
,
scmd
->
serial_number
,
scmd
->
cmnd
[
0
],
scmd
->
device
->
channel
,
scmd
->
device
->
id
,
scmd
->
device
->
lun
);
if
(
instance
->
hw_crit_error
)
{
printk
(
KERN_ERR
"megasas: cannot recover from previous reset "
"failures
\n
"
);
return
FAILED
;
}
spin_unlock
(
scmd
->
device
->
host
->
host_lock
);
ret_val
=
megasas_wait_for_outstanding
(
instance
);
if
(
ret_val
==
SUCCESS
)
printk
(
KERN_NOTICE
"megasas: reset successful
\n
"
);
else
printk
(
KERN_ERR
"megasas: failed to do reset
\n
"
);
spin_lock
(
scmd
->
device
->
host
->
host_lock
);
return
ret_val
;
}
static
enum
scsi_eh_timer_return
megasas_reset_timer
(
struct
scsi_cmnd
*
scmd
)
{
unsigned
long
seconds
;
if
(
scmd
->
SCp
.
ptr
)
{
seconds
=
(
jiffies
-
scmd
->
SCp
.
sent_command
)
/
HZ
;
if
(
seconds
<
90
)
{
return
EH_RESET_TIMER
;
}
else
{
return
EH_NOT_HANDLED
;
}
}
return
EH_HANDLED
;
}
/**
* megasas_reset_device - Device reset handler entry point
*/
static
int
megasas_reset_device
(
struct
scsi_cmnd
*
scmd
)
{
int
ret
;
/*
* First wait for all commands to complete
*/
ret
=
megasas_generic_reset
(
scmd
);
return
ret
;
}
/**
* megasas_reset_bus_host - Bus & host reset handler entry point
*/
static
int
megasas_reset_bus_host
(
struct
scsi_cmnd
*
scmd
)
{
int
ret
;
/*
* Frist wait for all commands to complete
*/
ret
=
megasas_generic_reset
(
scmd
);
return
ret
;
}
/**
* megasas_service_aen - Processes an event notification
* @instance: Adapter soft state
* @cmd: AEN command completed by the ISR
*
* For AEN, driver sends a command down to FW that is held by the FW till an
* event occurs. When an event of interest occurs, FW completes the command
* that it was previously holding.
*
* This routines sends SIGIO signal to processes that have registered with the
* driver for AEN.
*/
static
void
megasas_service_aen
(
struct
megasas_instance
*
instance
,
struct
megasas_cmd
*
cmd
)
{
/*
* Don't signal app if it is just an aborted previously registered aen
*/
if
(
!
cmd
->
abort_aen
)
kill_fasync
(
&
megasas_async_queue
,
SIGIO
,
POLL_IN
);
else
cmd
->
abort_aen
=
0
;
instance
->
aen_cmd
=
NULL
;
megasas_return_cmd
(
instance
,
cmd
);
}
/*
* Scsi host template for megaraid_sas driver
*/
static
struct
scsi_host_template
megasas_template
=
{
.
module
=
THIS_MODULE
,
.
name
=
"LSI Logic SAS based MegaRAID driver"
,
.
proc_name
=
"megaraid_sas"
,
.
queuecommand
=
megasas_queue_command
,
.
eh_device_reset_handler
=
megasas_reset_device
,
.
eh_bus_reset_handler
=
megasas_reset_bus_host
,
.
eh_host_reset_handler
=
megasas_reset_bus_host
,
.
eh_timed_out
=
megasas_reset_timer
,
.
use_clustering
=
ENABLE_CLUSTERING
,
};
/**
* megasas_complete_int_cmd - Completes an internal command
* @instance: Adapter soft state
* @cmd: Command to be completed
*
* The megasas_issue_blocked_cmd() function waits for a command to complete
* after it issues a command. This function wakes up that waiting routine by
* calling wake_up() on the wait queue.
*/
static
void
megasas_complete_int_cmd
(
struct
megasas_instance
*
instance
,
struct
megasas_cmd
*
cmd
)
{
cmd
->
cmd_status
=
cmd
->
frame
->
io
.
cmd_status
;
if
(
cmd
->
cmd_status
==
ENODATA
)
{
cmd
->
cmd_status
=
0
;
}
wake_up
(
&
instance
->
int_cmd_wait_q
);
}
/**
* megasas_complete_abort - Completes aborting a command
* @instance: Adapter soft state
* @cmd: Cmd that was issued to abort another cmd
*
* The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
* after it issues an abort on a previously issued command. This function
* wakes up all functions waiting on the same wait queue.
*/
static
void
megasas_complete_abort
(
struct
megasas_instance
*
instance
,
struct
megasas_cmd
*
cmd
)
{
if
(
cmd
->
sync_cmd
)
{
cmd
->
sync_cmd
=
0
;
cmd
->
cmd_status
=
0
;
wake_up
(
&
instance
->
abort_cmd_wait_q
);
}
return
;
}
/**
* megasas_unmap_sgbuf - Unmap SG buffers
* @instance: Adapter soft state
* @cmd: Completed command
*/
static
inline
void
megasas_unmap_sgbuf
(
struct
megasas_instance
*
instance
,
struct
megasas_cmd
*
cmd
)
{
dma_addr_t
buf_h
;
u8
opcode
;
if
(
cmd
->
scmd
->
use_sg
)
{
pci_unmap_sg
(
instance
->
pdev
,
cmd
->
scmd
->
request_buffer
,
cmd
->
scmd
->
use_sg
,
cmd
->
scmd
->
sc_data_direction
);
return
;
}
if
(
!
cmd
->
scmd
->
request_bufflen
)
return
;
opcode
=
cmd
->
frame
->
hdr
.
cmd
;
if
((
opcode
==
MFI_CMD_LD_READ
)
||
(
opcode
==
MFI_CMD_LD_WRITE
))
{
if
(
IS_DMA64
)
buf_h
=
cmd
->
frame
->
io
.
sgl
.
sge64
[
0
].
phys_addr
;
else
buf_h
=
cmd
->
frame
->
io
.
sgl
.
sge32
[
0
].
phys_addr
;
}
else
{
if
(
IS_DMA64
)
buf_h
=
cmd
->
frame
->
pthru
.
sgl
.
sge64
[
0
].
phys_addr
;
else
buf_h
=
cmd
->
frame
->
pthru
.
sgl
.
sge32
[
0
].
phys_addr
;
}
pci_unmap_single
(
instance
->
pdev
,
buf_h
,
cmd
->
scmd
->
request_bufflen
,
cmd
->
scmd
->
sc_data_direction
);
return
;
}
/**
* megasas_complete_cmd - Completes a command
* @instance: Adapter soft state
* @cmd: Command to be completed
* @alt_status: If non-zero, use this value as status to
* SCSI mid-layer instead of the value returned
* by the FW. This should be used if caller wants
* an alternate status (as in the case of aborted
* commands)
*/
static
inline
void
megasas_complete_cmd
(
struct
megasas_instance
*
instance
,
struct
megasas_cmd
*
cmd
,
u8
alt_status
)
{
int
exception
=
0
;
struct
megasas_header
*
hdr
=
&
cmd
->
frame
->
hdr
;
unsigned
long
flags
;
if
(
cmd
->
scmd
)
{
cmd
->
scmd
->
SCp
.
ptr
=
(
char
*
)
0
;
}
switch
(
hdr
->
cmd
)
{
case
MFI_CMD_PD_SCSI_IO
:
case
MFI_CMD_LD_SCSI_IO
:
/*
* MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
* issued either through an IO path or an IOCTL path. If it
* was via IOCTL, we will send it to internal completion.
*/
if
(
cmd
->
sync_cmd
)
{
cmd
->
sync_cmd
=
0
;
megasas_complete_int_cmd
(
instance
,
cmd
);
break
;
}
/*
* Don't export physical disk devices to mid-layer.
*/
if
(
!
MEGASAS_IS_LOGICAL
(
cmd
->
scmd
)
&&
(
hdr
->
cmd_status
==
MFI_STAT_OK
)
&&
(
cmd
->
scmd
->
cmnd
[
0
]
==
INQUIRY
))
{
if
(((
*
(
u8
*
)
cmd
->
scmd
->
request_buffer
)
&
0x1F
)
==
TYPE_DISK
)
{
cmd
->
scmd
->
result
=
DID_BAD_TARGET
<<
16
;
exception
=
1
;
}
}
case
MFI_CMD_LD_READ
:
case
MFI_CMD_LD_WRITE
:
if
(
alt_status
)
{
cmd
->
scmd
->
result
=
alt_status
<<
16
;
exception
=
1
;
}
if
(
exception
)
{
spin_lock_irqsave
(
&
instance
->
instance_lock
,
flags
);
instance
->
fw_outstanding
--
;
spin_unlock_irqrestore
(
&
instance
->
instance_lock
,
flags
);
megasas_unmap_sgbuf
(
instance
,
cmd
);
cmd
->
scmd
->
scsi_done
(
cmd
->
scmd
);
megasas_return_cmd
(
instance
,
cmd
);
break
;
}
switch
(
hdr
->
cmd_status
)
{
case
MFI_STAT_OK
:
cmd
->
scmd
->
result
=
DID_OK
<<
16
;
break
;
case
MFI_STAT_SCSI_IO_FAILED
:
case
MFI_STAT_LD_INIT_IN_PROGRESS
:
cmd
->
scmd
->
result
=
(
DID_ERROR
<<
16
)
|
hdr
->
scsi_status
;
break
;
case
MFI_STAT_SCSI_DONE_WITH_ERROR
:
cmd
->
scmd
->
result
=
(
DID_OK
<<
16
)
|
hdr
->
scsi_status
;
if
(
hdr
->
scsi_status
==
SAM_STAT_CHECK_CONDITION
)
{
memset
(
cmd
->
scmd
->
sense_buffer
,
0
,
SCSI_SENSE_BUFFERSIZE
);
memcpy
(
cmd
->
scmd
->
sense_buffer
,
cmd
->
sense
,
hdr
->
sense_len
);
cmd
->
scmd
->
result
|=
DRIVER_SENSE
<<
24
;
}
break
;
case
MFI_STAT_LD_OFFLINE
:
case
MFI_STAT_DEVICE_NOT_FOUND
:
cmd
->
scmd
->
result
=
DID_BAD_TARGET
<<
16
;
break
;
default:
printk
(
KERN_DEBUG
"megasas: MFI FW status %#x
\n
"
,
hdr
->
cmd_status
);
cmd
->
scmd
->
result
=
DID_ERROR
<<
16
;
break
;
}
spin_lock_irqsave
(
&
instance
->
instance_lock
,
flags
);
instance
->
fw_outstanding
--
;
spin_unlock_irqrestore
(
&
instance
->
instance_lock
,
flags
);
megasas_unmap_sgbuf
(
instance
,
cmd
);
cmd
->
scmd
->
scsi_done
(
cmd
->
scmd
);
megasas_return_cmd
(
instance
,
cmd
);
break
;
case
MFI_CMD_SMP
:
case
MFI_CMD_STP
:
case
MFI_CMD_DCMD
:
/*
* See if got an event notification
*/
if
(
cmd
->
frame
->
dcmd
.
opcode
==
MR_DCMD_CTRL_EVENT_WAIT
)
megasas_service_aen
(
instance
,
cmd
);
else
megasas_complete_int_cmd
(
instance
,
cmd
);
break
;
case
MFI_CMD_ABORT
:
/*
* Cmd issued to abort another cmd returned
*/
megasas_complete_abort
(
instance
,
cmd
);
break
;
default:
printk
(
"megasas: Unknown command completed! [0x%X]
\n
"
,
hdr
->
cmd
);
break
;
}
}
/**
* megasas_deplete_reply_queue - Processes all completed commands
* @instance: Adapter soft state
* @alt_status: Alternate status to be returned to
* SCSI mid-layer instead of the status
* returned by the FW
*/
static
inline
int
megasas_deplete_reply_queue
(
struct
megasas_instance
*
instance
,
u8
alt_status
)
{
u32
status
;
u32
producer
;
u32
consumer
;
u32
context
;
struct
megasas_cmd
*
cmd
;
/*
* Check if it is our interrupt
*/
status
=
readl
(
&
instance
->
reg_set
->
outbound_intr_status
);
if
(
!
(
status
&
MFI_OB_INTR_STATUS_MASK
))
{
return
IRQ_NONE
;
}
/*
* Clear the interrupt by writing back the same value
*/
writel
(
status
,
&
instance
->
reg_set
->
outbound_intr_status
);
producer
=
*
instance
->
producer
;
consumer
=
*
instance
->
consumer
;
while
(
consumer
!=
producer
)
{
context
=
instance
->
reply_queue
[
consumer
];
cmd
=
instance
->
cmd_list
[
context
];
megasas_complete_cmd
(
instance
,
cmd
,
alt_status
);
consumer
++
;
if
(
consumer
==
(
instance
->
max_fw_cmds
+
1
))
{
consumer
=
0
;
}
}
*
instance
->
consumer
=
producer
;
return
IRQ_HANDLED
;
}
/**
* megasas_isr - isr entry point
*/
static
irqreturn_t
megasas_isr
(
int
irq
,
void
*
devp
,
struct
pt_regs
*
regs
)
{
return
megasas_deplete_reply_queue
((
struct
megasas_instance
*
)
devp
,
DID_OK
);
}
/**
* megasas_transition_to_ready - Move the FW to READY state
* @reg_set: MFI register set
*
* During the initialization, FW passes can potentially be in any one of
* several possible states. If the FW in operational, waiting-for-handshake
* states, driver must take steps to bring it to ready state. Otherwise, it
* has to wait for the ready state.
*/
static
int
megasas_transition_to_ready
(
struct
megasas_register_set
__iomem
*
reg_set
)
{
int
i
;
u8
max_wait
;
u32
fw_state
;
u32
cur_state
;
fw_state
=
readl
(
&
reg_set
->
outbound_msg_0
)
&
MFI_STATE_MASK
;
while
(
fw_state
!=
MFI_STATE_READY
)
{
printk
(
KERN_INFO
"megasas: Waiting for FW to come to ready"
" state
\n
"
);
switch
(
fw_state
)
{
case
MFI_STATE_FAULT
:
printk
(
KERN_DEBUG
"megasas: FW in FAULT state!!
\n
"
);
return
-
ENODEV
;
case
MFI_STATE_WAIT_HANDSHAKE
:
/*
* Set the CLR bit in inbound doorbell
*/
writel
(
MFI_INIT_CLEAR_HANDSHAKE
,
&
reg_set
->
inbound_doorbell
);
max_wait
=
2
;
cur_state
=
MFI_STATE_WAIT_HANDSHAKE
;
break
;
case
MFI_STATE_OPERATIONAL
:
/*
* Bring it to READY state; assuming max wait 2 secs
*/
megasas_disable_intr
(
reg_set
);
writel
(
MFI_INIT_READY
,
&
reg_set
->
inbound_doorbell
);
max_wait
=
10
;
cur_state
=
MFI_STATE_OPERATIONAL
;
break
;
case
MFI_STATE_UNDEFINED
:
/*
* This state should not last for more than 2 seconds
*/
max_wait
=
2
;
cur_state
=
MFI_STATE_UNDEFINED
;
break
;
case
MFI_STATE_BB_INIT
:
max_wait
=
2
;
cur_state
=
MFI_STATE_BB_INIT
;
break
;
case
MFI_STATE_FW_INIT
:
max_wait
=
20
;
cur_state
=
MFI_STATE_FW_INIT
;
break
;
case
MFI_STATE_FW_INIT_2
:
max_wait
=
20
;
cur_state
=
MFI_STATE_FW_INIT_2
;
break
;
case
MFI_STATE_DEVICE_SCAN
:
max_wait
=
20
;
cur_state
=
MFI_STATE_DEVICE_SCAN
;
break
;
case
MFI_STATE_FLUSH_CACHE
:
max_wait
=
20
;
cur_state
=
MFI_STATE_FLUSH_CACHE
;
break
;
default:
printk
(
KERN_DEBUG
"megasas: Unknown state 0x%x
\n
"
,
fw_state
);
return
-
ENODEV
;
}
/*
* The cur_state should not last for more than max_wait secs
*/
for
(
i
=
0
;
i
<
(
max_wait
*
1000
);
i
++
)
{
fw_state
=
MFI_STATE_MASK
&
readl
(
&
reg_set
->
outbound_msg_0
);
if
(
fw_state
==
cur_state
)
{
msleep
(
1
);
}
else
break
;
}
/*
* Return error if fw_state hasn't changed after max_wait
*/
if
(
fw_state
==
cur_state
)
{
printk
(
KERN_DEBUG
"FW state [%d] hasn't changed "
"in %d secs
\n
"
,
fw_state
,
max_wait
);
return
-
ENODEV
;
}
};
return
0
;
}
/**
* megasas_teardown_frame_pool - Destroy the cmd frame DMA pool
* @instance: Adapter soft state
*/
static
void
megasas_teardown_frame_pool
(
struct
megasas_instance
*
instance
)
{
int
i
;
u32
max_cmd
=
instance
->
max_fw_cmds
;
struct
megasas_cmd
*
cmd
;
if
(
!
instance
->
frame_dma_pool
)
return
;
/*
* Return all frames to pool
*/
for
(
i
=
0
;
i
<
max_cmd
;
i
++
)
{
cmd
=
instance
->
cmd_list
[
i
];
if
(
cmd
->
frame
)
pci_pool_free
(
instance
->
frame_dma_pool
,
cmd
->
frame
,
cmd
->
frame_phys_addr
);
if
(
cmd
->
sense
)
pci_pool_free
(
instance
->
sense_dma_pool
,
cmd
->
frame
,
cmd
->
sense_phys_addr
);
}
/*
* Now destroy the pool itself
*/
pci_pool_destroy
(
instance
->
frame_dma_pool
);
pci_pool_destroy
(
instance
->
sense_dma_pool
);
instance
->
frame_dma_pool
=
NULL
;
instance
->
sense_dma_pool
=
NULL
;
}
/**
* megasas_create_frame_pool - Creates DMA pool for cmd frames
* @instance: Adapter soft state
*
* Each command packet has an embedded DMA memory buffer that is used for
* filling MFI frame and the SG list that immediately follows the frame. This
* function creates those DMA memory buffers for each command packet by using
* PCI pool facility.
*/
static
int
megasas_create_frame_pool
(
struct
megasas_instance
*
instance
)
{
int
i
;
u32
max_cmd
;
u32
sge_sz
;
u32
sgl_sz
;
u32
total_sz
;
u32
frame_count
;
struct
megasas_cmd
*
cmd
;
max_cmd
=
instance
->
max_fw_cmds
;
/*
* Size of our frame is 64 bytes for MFI frame, followed by max SG
* elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
*/
sge_sz
=
(
IS_DMA64
)
?
sizeof
(
struct
megasas_sge64
)
:
sizeof
(
struct
megasas_sge32
);
/*
* Calculated the number of 64byte frames required for SGL
*/
sgl_sz
=
sge_sz
*
instance
->
max_num_sge
;
frame_count
=
(
sgl_sz
+
MEGAMFI_FRAME_SIZE
-
1
)
/
MEGAMFI_FRAME_SIZE
;
/*
* We need one extra frame for the MFI command
*/
frame_count
++
;
total_sz
=
MEGAMFI_FRAME_SIZE
*
frame_count
;
/*
* Use DMA pool facility provided by PCI layer
*/
instance
->
frame_dma_pool
=
pci_pool_create
(
"megasas frame pool"
,
instance
->
pdev
,
total_sz
,
64
,
0
);
if
(
!
instance
->
frame_dma_pool
)
{
printk
(
KERN_DEBUG
"megasas: failed to setup frame pool
\n
"
);
return
-
ENOMEM
;
}
instance
->
sense_dma_pool
=
pci_pool_create
(
"megasas sense pool"
,
instance
->
pdev
,
128
,
4
,
0
);
if
(
!
instance
->
sense_dma_pool
)
{
printk
(
KERN_DEBUG
"megasas: failed to setup sense pool
\n
"
);
pci_pool_destroy
(
instance
->
frame_dma_pool
);
instance
->
frame_dma_pool
=
NULL
;
return
-
ENOMEM
;
}
/*
* Allocate and attach a frame to each of the commands in cmd_list.
* By making cmd->index as the context instead of the &cmd, we can
* always use 32bit context regardless of the architecture
*/
for
(
i
=
0
;
i
<
max_cmd
;
i
++
)
{
cmd
=
instance
->
cmd_list
[
i
];
cmd
->
frame
=
pci_pool_alloc
(
instance
->
frame_dma_pool
,
GFP_KERNEL
,
&
cmd
->
frame_phys_addr
);
cmd
->
sense
=
pci_pool_alloc
(
instance
->
sense_dma_pool
,
GFP_KERNEL
,
&
cmd
->
sense_phys_addr
);
/*
* megasas_teardown_frame_pool() takes care of freeing
* whatever has been allocated
*/
if
(
!
cmd
->
frame
||
!
cmd
->
sense
)
{
printk
(
KERN_DEBUG
"megasas: pci_pool_alloc failed
\n
"
);
megasas_teardown_frame_pool
(
instance
);
return
-
ENOMEM
;
}
cmd
->
frame
->
io
.
context
=
cmd
->
index
;
}
return
0
;
}
/**
* megasas_free_cmds - Free all the cmds in the free cmd pool
* @instance: Adapter soft state
*/
static
void
megasas_free_cmds
(
struct
megasas_instance
*
instance
)
{
int
i
;
/* First free the MFI frame pool */
megasas_teardown_frame_pool
(
instance
);
/* Free all the commands in the cmd_list */
for
(
i
=
0
;
i
<
instance
->
max_fw_cmds
;
i
++
)
kfree
(
instance
->
cmd_list
[
i
]);
/* Free the cmd_list buffer itself */
kfree
(
instance
->
cmd_list
);
instance
->
cmd_list
=
NULL
;
INIT_LIST_HEAD
(
&
instance
->
cmd_pool
);
}
/**
* megasas_alloc_cmds - Allocates the command packets
* @instance: Adapter soft state
*
* Each command that is issued to the FW, whether IO commands from the OS or
* internal commands like IOCTLs, are wrapped in local data structure called
* megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
* the FW.
*
* Each frame has a 32-bit field called context (tag). This context is used
* to get back the megasas_cmd from the frame when a frame gets completed in
* the ISR. Typically the address of the megasas_cmd itself would be used as
* the context. But we wanted to keep the differences between 32 and 64 bit
* systems to the mininum. We always use 32 bit integers for the context. In
* this driver, the 32 bit values are the indices into an array cmd_list.
* This array is used only to look up the megasas_cmd given the context. The
* free commands themselves are maintained in a linked list called cmd_pool.
*/
static
int
megasas_alloc_cmds
(
struct
megasas_instance
*
instance
)
{
int
i
;
int
j
;
u32
max_cmd
;
struct
megasas_cmd
*
cmd
;
max_cmd
=
instance
->
max_fw_cmds
;
/*
* instance->cmd_list is an array of struct megasas_cmd pointers.
* Allocate the dynamic array first and then allocate individual
* commands.
*/
instance
->
cmd_list
=
kmalloc
(
sizeof
(
struct
megasas_cmd
*
)
*
max_cmd
,
GFP_KERNEL
);
if
(
!
instance
->
cmd_list
)
{
printk
(
KERN_DEBUG
"megasas: out of memory
\n
"
);
return
-
ENOMEM
;
}
memset
(
instance
->
cmd_list
,
0
,
sizeof
(
struct
megasas_cmd
*
)
*
max_cmd
);
for
(
i
=
0
;
i
<
max_cmd
;
i
++
)
{
instance
->
cmd_list
[
i
]
=
kmalloc
(
sizeof
(
struct
megasas_cmd
),
GFP_KERNEL
);
if
(
!
instance
->
cmd_list
[
i
])
{
for
(
j
=
0
;
j
<
i
;
j
++
)
kfree
(
instance
->
cmd_list
[
j
]);
kfree
(
instance
->
cmd_list
);
instance
->
cmd_list
=
NULL
;
return
-
ENOMEM
;
}
}
/*
* Add all the commands to command pool (instance->cmd_pool)
*/
for
(
i
=
0
;
i
<
max_cmd
;
i
++
)
{
cmd
=
instance
->
cmd_list
[
i
];
memset
(
cmd
,
0
,
sizeof
(
struct
megasas_cmd
));
cmd
->
index
=
i
;
cmd
->
instance
=
instance
;
list_add_tail
(
&
cmd
->
list
,
&
instance
->
cmd_pool
);
}
/*
* Create a frame pool and assign one frame to each cmd
*/
if
(
megasas_create_frame_pool
(
instance
))
{
printk
(
KERN_DEBUG
"megasas: Error creating frame DMA pool
\n
"
);
megasas_free_cmds
(
instance
);
}
return
0
;
}
/**
* megasas_get_controller_info - Returns FW's controller structure
* @instance: Adapter soft state
* @ctrl_info: Controller information structure
*
* Issues an internal command (DCMD) to get the FW's controller structure.
* This information is mainly used to find out the maximum IO transfer per
* command supported by the FW.
*/
static
int
megasas_get_ctrl_info
(
struct
megasas_instance
*
instance
,
struct
megasas_ctrl_info
*
ctrl_info
)
{
int
ret
=
0
;
struct
megasas_cmd
*
cmd
;
struct
megasas_dcmd_frame
*
dcmd
;
struct
megasas_ctrl_info
*
ci
;
dma_addr_t
ci_h
=
0
;
cmd
=
megasas_get_cmd
(
instance
);
if
(
!
cmd
)
{
printk
(
KERN_DEBUG
"megasas: Failed to get a free cmd
\n
"
);
return
-
ENOMEM
;
}
dcmd
=
&
cmd
->
frame
->
dcmd
;
ci
=
pci_alloc_consistent
(
instance
->
pdev
,
sizeof
(
struct
megasas_ctrl_info
),
&
ci_h
);
if
(
!
ci
)
{
printk
(
KERN_DEBUG
"Failed to alloc mem for ctrl info
\n
"
);
megasas_return_cmd
(
instance
,
cmd
);
return
-
ENOMEM
;
}
memset
(
ci
,
0
,
sizeof
(
*
ci
));
memset
(
dcmd
->
mbox
.
b
,
0
,
MFI_MBOX_SIZE
);
dcmd
->
cmd
=
MFI_CMD_DCMD
;
dcmd
->
cmd_status
=
0xFF
;
dcmd
->
sge_count
=
1
;
dcmd
->
flags
=
MFI_FRAME_DIR_READ
;
dcmd
->
timeout
=
0
;
dcmd
->
data_xfer_len
=
sizeof
(
struct
megasas_ctrl_info
);
dcmd
->
opcode
=
MR_DCMD_CTRL_GET_INFO
;
dcmd
->
sgl
.
sge32
[
0
].
phys_addr
=
ci_h
;
dcmd
->
sgl
.
sge32
[
0
].
length
=
sizeof
(
struct
megasas_ctrl_info
);
if
(
!
megasas_issue_polled
(
instance
,
cmd
))
{
ret
=
0
;
memcpy
(
ctrl_info
,
ci
,
sizeof
(
struct
megasas_ctrl_info
));
}
else
{
ret
=
-
1
;
}
pci_free_consistent
(
instance
->
pdev
,
sizeof
(
struct
megasas_ctrl_info
),
ci
,
ci_h
);
megasas_return_cmd
(
instance
,
cmd
);
return
ret
;
}
/**
* megasas_init_mfi - Initializes the FW
* @instance: Adapter soft state
*
* This is the main function for initializing MFI firmware.
*/
static
int
megasas_init_mfi
(
struct
megasas_instance
*
instance
)
{
u32
context_sz
;
u32
reply_q_sz
;
u32
max_sectors_1
;
u32
max_sectors_2
;
struct
megasas_register_set
__iomem
*
reg_set
;
struct
megasas_cmd
*
cmd
;
struct
megasas_ctrl_info
*
ctrl_info
;
struct
megasas_init_frame
*
init_frame
;
struct
megasas_init_queue_info
*
initq_info
;
dma_addr_t
init_frame_h
;
dma_addr_t
initq_info_h
;
/*
* Map the message registers
*/
instance
->
base_addr
=
pci_resource_start
(
instance
->
pdev
,
0
);
if
(
pci_request_regions
(
instance
->
pdev
,
"megasas: LSI Logic"
))
{
printk
(
KERN_DEBUG
"megasas: IO memory region busy!
\n
"
);
return
-
EBUSY
;
}
instance
->
reg_set
=
ioremap_nocache
(
instance
->
base_addr
,
8192
);
if
(
!
instance
->
reg_set
)
{
printk
(
KERN_DEBUG
"megasas: Failed to map IO mem
\n
"
);
goto
fail_ioremap
;
}
reg_set
=
instance
->
reg_set
;
/*
* We expect the FW state to be READY
*/
if
(
megasas_transition_to_ready
(
instance
->
reg_set
))
goto
fail_ready_state
;
/*
* Get various operational parameters from status register
*/
instance
->
max_fw_cmds
=
readl
(
&
reg_set
->
outbound_msg_0
)
&
0x00FFFF
;
instance
->
max_num_sge
=
(
readl
(
&
reg_set
->
outbound_msg_0
)
&
0xFF0000
)
>>
0x10
;
/*
* Create a pool of commands
*/
if
(
megasas_alloc_cmds
(
instance
))
goto
fail_alloc_cmds
;
/*
* Allocate memory for reply queue. Length of reply queue should
* be _one_ more than the maximum commands handled by the firmware.
*
* Note: When FW completes commands, it places corresponding contex
* values in this circular reply queue. This circular queue is a fairly
* typical producer-consumer queue. FW is the producer (of completed
* commands) and the driver is the consumer.
*/
context_sz
=
sizeof
(
u32
);
reply_q_sz
=
context_sz
*
(
instance
->
max_fw_cmds
+
1
);
instance
->
reply_queue
=
pci_alloc_consistent
(
instance
->
pdev
,
reply_q_sz
,
&
instance
->
reply_queue_h
);
if
(
!
instance
->
reply_queue
)
{
printk
(
KERN_DEBUG
"megasas: Out of DMA mem for reply queue
\n
"
);
goto
fail_reply_queue
;
}
/*
* Prepare a init frame. Note the init frame points to queue info
* structure. Each frame has SGL allocated after first 64 bytes. For
* this frame - since we don't need any SGL - we use SGL's space as
* queue info structure
*
* We will not get a NULL command below. We just created the pool.
*/
cmd
=
megasas_get_cmd
(
instance
);
init_frame
=
(
struct
megasas_init_frame
*
)
cmd
->
frame
;
initq_info
=
(
struct
megasas_init_queue_info
*
)
((
unsigned
long
)
init_frame
+
64
);
init_frame_h
=
cmd
->
frame_phys_addr
;
initq_info_h
=
init_frame_h
+
64
;
memset
(
init_frame
,
0
,
MEGAMFI_FRAME_SIZE
);
memset
(
initq_info
,
0
,
sizeof
(
struct
megasas_init_queue_info
));
initq_info
->
reply_queue_entries
=
instance
->
max_fw_cmds
+
1
;
initq_info
->
reply_queue_start_phys_addr_lo
=
instance
->
reply_queue_h
;
initq_info
->
producer_index_phys_addr_lo
=
instance
->
producer_h
;
initq_info
->
consumer_index_phys_addr_lo
=
instance
->
consumer_h
;
init_frame
->
cmd
=
MFI_CMD_INIT
;
init_frame
->
cmd_status
=
0xFF
;
init_frame
->
queue_info_new_phys_addr_lo
=
initq_info_h
;
init_frame
->
data_xfer_len
=
sizeof
(
struct
megasas_init_queue_info
);
/*
* Issue the init frame in polled mode
*/
if
(
megasas_issue_polled
(
instance
,
cmd
))
{
printk
(
KERN_DEBUG
"megasas: Failed to init firmware
\n
"
);
goto
fail_fw_init
;
}
megasas_return_cmd
(
instance
,
cmd
);
ctrl_info
=
kmalloc
(
sizeof
(
struct
megasas_ctrl_info
),
GFP_KERNEL
);
/*
* Compute the max allowed sectors per IO: The controller info has two
* limits on max sectors. Driver should use the minimum of these two.
*
* 1 << stripe_sz_ops.min = max sectors per strip
*
* Note that older firmwares ( < FW ver 30) didn't report information
* to calculate max_sectors_1. So the number ended up as zero always.
*/
if
(
ctrl_info
&&
!
megasas_get_ctrl_info
(
instance
,
ctrl_info
))
{
max_sectors_1
=
(
1
<<
ctrl_info
->
stripe_sz_ops
.
min
)
*
ctrl_info
->
max_strips_per_io
;
max_sectors_2
=
ctrl_info
->
max_request_size
;
instance
->
max_sectors_per_req
=
(
max_sectors_1
<
max_sectors_2
)
?
max_sectors_1
:
max_sectors_2
;
}
else
instance
->
max_sectors_per_req
=
instance
->
max_num_sge
*
PAGE_SIZE
/
512
;
kfree
(
ctrl_info
);
return
0
;
fail_fw_init:
megasas_return_cmd
(
instance
,
cmd
);
pci_free_consistent
(
instance
->
pdev
,
reply_q_sz
,
instance
->
reply_queue
,
instance
->
reply_queue_h
);
fail_reply_queue:
megasas_free_cmds
(
instance
);
fail_alloc_cmds:
fail_ready_state:
iounmap
(
instance
->
reg_set
);
fail_ioremap:
pci_release_regions
(
instance
->
pdev
);
return
-
EINVAL
;
}
/**
* megasas_release_mfi - Reverses the FW initialization
* @intance: Adapter soft state
*/
static
void
megasas_release_mfi
(
struct
megasas_instance
*
instance
)
{
u32
reply_q_sz
=
sizeof
(
u32
)
*
(
instance
->
max_fw_cmds
+
1
);
pci_free_consistent
(
instance
->
pdev
,
reply_q_sz
,
instance
->
reply_queue
,
instance
->
reply_queue_h
);
megasas_free_cmds
(
instance
);
iounmap
(
instance
->
reg_set
);
pci_release_regions
(
instance
->
pdev
);
}
/**
* megasas_get_seq_num - Gets latest event sequence numbers
* @instance: Adapter soft state
* @eli: FW event log sequence numbers information
*
* FW maintains a log of all events in a non-volatile area. Upper layers would
* usually find out the latest sequence number of the events, the seq number at
* the boot etc. They would "read" all the events below the latest seq number
* by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
* number), they would subsribe to AEN (asynchronous event notification) and
* wait for the events to happen.
*/
static
int
megasas_get_seq_num
(
struct
megasas_instance
*
instance
,
struct
megasas_evt_log_info
*
eli
)
{
struct
megasas_cmd
*
cmd
;
struct
megasas_dcmd_frame
*
dcmd
;
struct
megasas_evt_log_info
*
el_info
;
dma_addr_t
el_info_h
=
0
;
cmd
=
megasas_get_cmd
(
instance
);
if
(
!
cmd
)
{
return
-
ENOMEM
;
}
dcmd
=
&
cmd
->
frame
->
dcmd
;
el_info
=
pci_alloc_consistent
(
instance
->
pdev
,
sizeof
(
struct
megasas_evt_log_info
),
&
el_info_h
);
if
(
!
el_info
)
{
megasas_return_cmd
(
instance
,
cmd
);
return
-
ENOMEM
;
}
memset
(
el_info
,
0
,
sizeof
(
*
el_info
));
memset
(
dcmd
->
mbox
.
b
,
0
,
MFI_MBOX_SIZE
);
dcmd
->
cmd
=
MFI_CMD_DCMD
;
dcmd
->
cmd_status
=
0x0
;
dcmd
->
sge_count
=
1
;
dcmd
->
flags
=
MFI_FRAME_DIR_READ
;
dcmd
->
timeout
=
0
;
dcmd
->
data_xfer_len
=
sizeof
(
struct
megasas_evt_log_info
);
dcmd
->
opcode
=
MR_DCMD_CTRL_EVENT_GET_INFO
;
dcmd
->
sgl
.
sge32
[
0
].
phys_addr
=
el_info_h
;
dcmd
->
sgl
.
sge32
[
0
].
length
=
sizeof
(
struct
megasas_evt_log_info
);
megasas_issue_blocked_cmd
(
instance
,
cmd
);
/*
* Copy the data back into callers buffer
*/
memcpy
(
eli
,
el_info
,
sizeof
(
struct
megasas_evt_log_info
));
pci_free_consistent
(
instance
->
pdev
,
sizeof
(
struct
megasas_evt_log_info
),
el_info
,
el_info_h
);
megasas_return_cmd
(
instance
,
cmd
);
return
0
;
}
/**
* megasas_register_aen - Registers for asynchronous event notification
* @instance: Adapter soft state
* @seq_num: The starting sequence number
* @class_locale: Class of the event
*
* This function subscribes for AEN for events beyond the @seq_num. It requests
* to be notified if and only if the event is of type @class_locale
*/
static
int
megasas_register_aen
(
struct
megasas_instance
*
instance
,
u32
seq_num
,
u32
class_locale_word
)
{
int
ret_val
;
struct
megasas_cmd
*
cmd
;
struct
megasas_dcmd_frame
*
dcmd
;
union
megasas_evt_class_locale
curr_aen
;
union
megasas_evt_class_locale
prev_aen
;
/*
* If there an AEN pending already (aen_cmd), check if the
* class_locale of that pending AEN is inclusive of the new
* AEN request we currently have. If it is, then we don't have
* to do anything. In other words, whichever events the current
* AEN request is subscribing to, have already been subscribed
* to.
*
* If the old_cmd is _not_ inclusive, then we have to abort
* that command, form a class_locale that is superset of both
* old and current and re-issue to the FW
*/
curr_aen
.
word
=
class_locale_word
;
if
(
instance
->
aen_cmd
)
{
prev_aen
.
word
=
instance
->
aen_cmd
->
frame
->
dcmd
.
mbox
.
w
[
1
];
/*
* A class whose enum value is smaller is inclusive of all
* higher values. If a PROGRESS (= -1) was previously
* registered, then a new registration requests for higher
* classes need not be sent to FW. They are automatically
* included.
*
* Locale numbers don't have such hierarchy. They are bitmap
* values
*/
if
((
prev_aen
.
members
.
class
<=
curr_aen
.
members
.
class
)
&&
!
((
prev_aen
.
members
.
locale
&
curr_aen
.
members
.
locale
)
^
curr_aen
.
members
.
locale
))
{
/*
* Previously issued event registration includes
* current request. Nothing to do.
*/
return
0
;
}
else
{
curr_aen
.
members
.
locale
|=
prev_aen
.
members
.
locale
;
if
(
prev_aen
.
members
.
class
<
curr_aen
.
members
.
class
)
curr_aen
.
members
.
class
=
prev_aen
.
members
.
class
;
instance
->
aen_cmd
->
abort_aen
=
1
;
ret_val
=
megasas_issue_blocked_abort_cmd
(
instance
,
instance
->
aen_cmd
);
if
(
ret_val
)
{
printk
(
KERN_DEBUG
"megasas: Failed to abort "
"previous AEN command
\n
"
);
return
ret_val
;
}
}
}
cmd
=
megasas_get_cmd
(
instance
);
if
(
!
cmd
)
return
-
ENOMEM
;
dcmd
=
&
cmd
->
frame
->
dcmd
;
memset
(
instance
->
evt_detail
,
0
,
sizeof
(
struct
megasas_evt_detail
));
/*
* Prepare DCMD for aen registration
*/
memset
(
dcmd
->
mbox
.
b
,
0
,
MFI_MBOX_SIZE
);
dcmd
->
cmd
=
MFI_CMD_DCMD
;
dcmd
->
cmd_status
=
0x0
;
dcmd
->
sge_count
=
1
;
dcmd
->
flags
=
MFI_FRAME_DIR_READ
;
dcmd
->
timeout
=
0
;
dcmd
->
data_xfer_len
=
sizeof
(
struct
megasas_evt_detail
);
dcmd
->
opcode
=
MR_DCMD_CTRL_EVENT_WAIT
;
dcmd
->
mbox
.
w
[
0
]
=
seq_num
;
dcmd
->
mbox
.
w
[
1
]
=
curr_aen
.
word
;
dcmd
->
sgl
.
sge32
[
0
].
phys_addr
=
(
u32
)
instance
->
evt_detail_h
;
dcmd
->
sgl
.
sge32
[
0
].
length
=
sizeof
(
struct
megasas_evt_detail
);
/*
* Store reference to the cmd used to register for AEN. When an
* application wants us to register for AEN, we have to abort this
* cmd and re-register with a new EVENT LOCALE supplied by that app
*/
instance
->
aen_cmd
=
cmd
;
/*
* Issue the aen registration frame
*/
writel
(
cmd
->
frame_phys_addr
>>
3
,
&
instance
->
reg_set
->
inbound_queue_port
);
return
0
;
}
/**
* megasas_start_aen - Subscribes to AEN during driver load time
* @instance: Adapter soft state
*/
static
int
megasas_start_aen
(
struct
megasas_instance
*
instance
)
{
struct
megasas_evt_log_info
eli
;
union
megasas_evt_class_locale
class_locale
;
/*
* Get the latest sequence number from FW
*/
memset
(
&
eli
,
0
,
sizeof
(
eli
));
if
(
megasas_get_seq_num
(
instance
,
&
eli
))
return
-
1
;
/*
* Register AEN with FW for latest sequence number plus 1
*/
class_locale
.
members
.
reserved
=
0
;
class_locale
.
members
.
locale
=
MR_EVT_LOCALE_ALL
;
class_locale
.
members
.
class
=
MR_EVT_CLASS_DEBUG
;
return
megasas_register_aen
(
instance
,
eli
.
newest_seq_num
+
1
,
class_locale
.
word
);
}
/**
* megasas_io_attach - Attaches this driver to SCSI mid-layer
* @instance: Adapter soft state
*/
static
int
megasas_io_attach
(
struct
megasas_instance
*
instance
)
{
struct
Scsi_Host
*
host
=
instance
->
host
;
/*
* Export parameters required by SCSI mid-layer
*/
host
->
irq
=
instance
->
pdev
->
irq
;
host
->
unique_id
=
instance
->
unique_id
;
host
->
can_queue
=
instance
->
max_fw_cmds
-
MEGASAS_INT_CMDS
;
host
->
this_id
=
instance
->
init_id
;
host
->
sg_tablesize
=
instance
->
max_num_sge
;
host
->
max_sectors
=
instance
->
max_sectors_per_req
;
host
->
cmd_per_lun
=
128
;
host
->
max_channel
=
MEGASAS_MAX_CHANNELS
-
1
;
host
->
max_id
=
MEGASAS_MAX_DEV_PER_CHANNEL
;
host
->
max_lun
=
MEGASAS_MAX_LUN
;
/*
* Notify the mid-layer about the new controller
*/
if
(
scsi_add_host
(
host
,
&
instance
->
pdev
->
dev
))
{
printk
(
KERN_DEBUG
"megasas: scsi_add_host failed
\n
"
);
return
-
ENODEV
;
}
/*
* Trigger SCSI to scan our drives
*/
scsi_scan_host
(
host
);
return
0
;
}
/**
* megasas_probe_one - PCI hotplug entry point
* @pdev: PCI device structure
* @id: PCI ids of supported hotplugged adapter
*/
static
int
__devinit
megasas_probe_one
(
struct
pci_dev
*
pdev
,
const
struct
pci_device_id
*
id
)
{
int
rval
;
struct
Scsi_Host
*
host
;
struct
megasas_instance
*
instance
;
/*
* Announce PCI information
*/
printk
(
KERN_INFO
"megasas: %#4.04x:%#4.04x:%#4.04x:%#4.04x: "
,
pdev
->
vendor
,
pdev
->
device
,
pdev
->
subsystem_vendor
,
pdev
->
subsystem_device
);
printk
(
"bus %d:slot %d:func %d
\n
"
,
pdev
->
bus
->
number
,
PCI_SLOT
(
pdev
->
devfn
),
PCI_FUNC
(
pdev
->
devfn
));
/*
* PCI prepping: enable device set bus mastering and dma mask
*/
rval
=
pci_enable_device
(
pdev
);
if
(
rval
)
{
return
rval
;
}
pci_set_master
(
pdev
);
/*
* All our contollers are capable of performing 64-bit DMA
*/
if
(
IS_DMA64
)
{
if
(
pci_set_dma_mask
(
pdev
,
DMA_64BIT_MASK
)
!=
0
)
{
if
(
pci_set_dma_mask
(
pdev
,
DMA_32BIT_MASK
)
!=
0
)
goto
fail_set_dma_mask
;
}
}
else
{
if
(
pci_set_dma_mask
(
pdev
,
DMA_32BIT_MASK
)
!=
0
)
goto
fail_set_dma_mask
;
}
host
=
scsi_host_alloc
(
&
megasas_template
,
sizeof
(
struct
megasas_instance
));
if
(
!
host
)
{
printk
(
KERN_DEBUG
"megasas: scsi_host_alloc failed
\n
"
);
goto
fail_alloc_instance
;
}
instance
=
(
struct
megasas_instance
*
)
host
->
hostdata
;
memset
(
instance
,
0
,
sizeof
(
*
instance
));
instance
->
producer
=
pci_alloc_consistent
(
pdev
,
sizeof
(
u32
),
&
instance
->
producer_h
);
instance
->
consumer
=
pci_alloc_consistent
(
pdev
,
sizeof
(
u32
),
&
instance
->
consumer_h
);
if
(
!
instance
->
producer
||
!
instance
->
consumer
)
{
printk
(
KERN_DEBUG
"megasas: Failed to allocate memory for "
"producer, consumer
\n
"
);
goto
fail_alloc_dma_buf
;
}
*
instance
->
producer
=
0
;
*
instance
->
consumer
=
0
;
instance
->
evt_detail
=
pci_alloc_consistent
(
pdev
,
sizeof
(
struct
megasas_evt_detail
),
&
instance
->
evt_detail_h
);
if
(
!
instance
->
evt_detail
)
{
printk
(
KERN_DEBUG
"megasas: Failed to allocate memory for "
"event detail structure
\n
"
);
goto
fail_alloc_dma_buf
;
}
/*
* Initialize locks and queues
*/
INIT_LIST_HEAD
(
&
instance
->
cmd_pool
);
init_waitqueue_head
(
&
instance
->
int_cmd_wait_q
);
init_waitqueue_head
(
&
instance
->
abort_cmd_wait_q
);
spin_lock_init
(
&
instance
->
cmd_pool_lock
);
spin_lock_init
(
&
instance
->
instance_lock
);
sema_init
(
&
instance
->
aen_mutex
,
1
);
sema_init
(
&
instance
->
ioctl_sem
,
MEGASAS_INT_CMDS
);
/*
* Initialize PCI related and misc parameters
*/
instance
->
pdev
=
pdev
;
instance
->
host
=
host
;
instance
->
unique_id
=
pdev
->
bus
->
number
<<
8
|
pdev
->
devfn
;
instance
->
init_id
=
MEGASAS_DEFAULT_INIT_ID
;
/*
* Initialize MFI Firmware
*/
if
(
megasas_init_mfi
(
instance
))
goto
fail_init_mfi
;
/*
* Register IRQ
*/
if
(
request_irq
(
pdev
->
irq
,
megasas_isr
,
SA_SHIRQ
,
"megasas"
,
instance
))
{
printk
(
KERN_DEBUG
"megasas: Failed to register IRQ
\n
"
);
goto
fail_irq
;
}
megasas_enable_intr
(
instance
->
reg_set
);
/*
* Store instance in PCI softstate
*/
pci_set_drvdata
(
pdev
,
instance
);
/*
* Add this controller to megasas_mgmt_info structure so that it
* can be exported to management applications
*/
megasas_mgmt_info
.
count
++
;
megasas_mgmt_info
.
instance
[
megasas_mgmt_info
.
max_index
]
=
instance
;
megasas_mgmt_info
.
max_index
++
;
/*
* Initiate AEN (Asynchronous Event Notification)
*/
if
(
megasas_start_aen
(
instance
))
{
printk
(
KERN_DEBUG
"megasas: start aen failed
\n
"
);
goto
fail_start_aen
;
}
/*
* Register with SCSI mid-layer
*/
if
(
megasas_io_attach
(
instance
))
goto
fail_io_attach
;
return
0
;
fail_start_aen:
fail_io_attach:
megasas_mgmt_info
.
count
--
;
megasas_mgmt_info
.
instance
[
megasas_mgmt_info
.
max_index
]
=
NULL
;
megasas_mgmt_info
.
max_index
--
;
pci_set_drvdata
(
pdev
,
NULL
);
megasas_disable_intr
(
instance
->
reg_set
);
free_irq
(
instance
->
pdev
->
irq
,
instance
);
megasas_release_mfi
(
instance
);
fail_irq:
fail_init_mfi:
fail_alloc_dma_buf:
if
(
instance
->
evt_detail
)
pci_free_consistent
(
pdev
,
sizeof
(
struct
megasas_evt_detail
),
instance
->
evt_detail
,
instance
->
evt_detail_h
);
if
(
instance
->
producer
)
pci_free_consistent
(
pdev
,
sizeof
(
u32
),
instance
->
producer
,
instance
->
producer_h
);
if
(
instance
->
consumer
)
pci_free_consistent
(
pdev
,
sizeof
(
u32
),
instance
->
consumer
,
instance
->
consumer_h
);
scsi_host_put
(
host
);
fail_alloc_instance:
fail_set_dma_mask:
pci_disable_device
(
pdev
);
return
-
ENODEV
;
}
/**
* megasas_flush_cache - Requests FW to flush all its caches
* @instance: Adapter soft state
*/
static
void
megasas_flush_cache
(
struct
megasas_instance
*
instance
)
{
struct
megasas_cmd
*
cmd
;
struct
megasas_dcmd_frame
*
dcmd
;
cmd
=
megasas_get_cmd
(
instance
);
if
(
!
cmd
)
return
;
dcmd
=
&
cmd
->
frame
->
dcmd
;
memset
(
dcmd
->
mbox
.
b
,
0
,
MFI_MBOX_SIZE
);
dcmd
->
cmd
=
MFI_CMD_DCMD
;
dcmd
->
cmd_status
=
0x0
;
dcmd
->
sge_count
=
0
;
dcmd
->
flags
=
MFI_FRAME_DIR_NONE
;
dcmd
->
timeout
=
0
;
dcmd
->
data_xfer_len
=
0
;
dcmd
->
opcode
=
MR_DCMD_CTRL_CACHE_FLUSH
;
dcmd
->
mbox
.
b
[
0
]
=
MR_FLUSH_CTRL_CACHE
|
MR_FLUSH_DISK_CACHE
;
megasas_issue_blocked_cmd
(
instance
,
cmd
);
megasas_return_cmd
(
instance
,
cmd
);
return
;
}
/**
* megasas_shutdown_controller - Instructs FW to shutdown the controller
* @instance: Adapter soft state
*/
static
void
megasas_shutdown_controller
(
struct
megasas_instance
*
instance
)
{
struct
megasas_cmd
*
cmd
;
struct
megasas_dcmd_frame
*
dcmd
;
cmd
=
megasas_get_cmd
(
instance
);
if
(
!
cmd
)
return
;
if
(
instance
->
aen_cmd
)
megasas_issue_blocked_abort_cmd
(
instance
,
instance
->
aen_cmd
);
dcmd
=
&
cmd
->
frame
->
dcmd
;
memset
(
dcmd
->
mbox
.
b
,
0
,
MFI_MBOX_SIZE
);
dcmd
->
cmd
=
MFI_CMD_DCMD
;
dcmd
->
cmd_status
=
0x0
;
dcmd
->
sge_count
=
0
;
dcmd
->
flags
=
MFI_FRAME_DIR_NONE
;
dcmd
->
timeout
=
0
;
dcmd
->
data_xfer_len
=
0
;
dcmd
->
opcode
=
MR_DCMD_CTRL_SHUTDOWN
;
megasas_issue_blocked_cmd
(
instance
,
cmd
);
megasas_return_cmd
(
instance
,
cmd
);
return
;
}
/**
* megasas_detach_one - PCI hot"un"plug entry point
* @pdev: PCI device structure
*/
static
void
megasas_detach_one
(
struct
pci_dev
*
pdev
)
{
int
i
;
struct
Scsi_Host
*
host
;
struct
megasas_instance
*
instance
;
instance
=
pci_get_drvdata
(
pdev
);
host
=
instance
->
host
;
scsi_remove_host
(
instance
->
host
);
megasas_flush_cache
(
instance
);
megasas_shutdown_controller
(
instance
);
/*
* Take the instance off the instance array. Note that we will not
* decrement the max_index. We let this array be sparse array
*/
for
(
i
=
0
;
i
<
megasas_mgmt_info
.
max_index
;
i
++
)
{
if
(
megasas_mgmt_info
.
instance
[
i
]
==
instance
)
{
megasas_mgmt_info
.
count
--
;
megasas_mgmt_info
.
instance
[
i
]
=
NULL
;
break
;
}
}
pci_set_drvdata
(
instance
->
pdev
,
NULL
);
megasas_disable_intr
(
instance
->
reg_set
);
free_irq
(
instance
->
pdev
->
irq
,
instance
);
megasas_release_mfi
(
instance
);
pci_free_consistent
(
pdev
,
sizeof
(
struct
megasas_evt_detail
),
instance
->
evt_detail
,
instance
->
evt_detail_h
);
pci_free_consistent
(
pdev
,
sizeof
(
u32
),
instance
->
producer
,
instance
->
producer_h
);
pci_free_consistent
(
pdev
,
sizeof
(
u32
),
instance
->
consumer
,
instance
->
consumer_h
);
scsi_host_put
(
host
);
pci_set_drvdata
(
pdev
,
NULL
);
pci_disable_device
(
pdev
);
return
;
}
/**
* megasas_shutdown - Shutdown entry point
* @device: Generic device structure
*/
static
void
megasas_shutdown
(
struct
pci_dev
*
pdev
)
{
struct
megasas_instance
*
instance
=
pci_get_drvdata
(
pdev
);
megasas_flush_cache
(
instance
);
}
/**
* megasas_mgmt_open - char node "open" entry point
*/
static
int
megasas_mgmt_open
(
struct
inode
*
inode
,
struct
file
*
filep
)
{
/*
* Allow only those users with admin rights
*/
if
(
!
capable
(
CAP_SYS_ADMIN
))
return
-
EACCES
;
return
0
;
}
/**
* megasas_mgmt_release - char node "release" entry point
*/
static
int
megasas_mgmt_release
(
struct
inode
*
inode
,
struct
file
*
filep
)
{
filep
->
private_data
=
NULL
;
fasync_helper
(
-
1
,
filep
,
0
,
&
megasas_async_queue
);
return
0
;
}
/**
* megasas_mgmt_fasync - Async notifier registration from applications
*
* This function adds the calling process to a driver global queue. When an
* event occurs, SIGIO will be sent to all processes in this queue.
*/
static
int
megasas_mgmt_fasync
(
int
fd
,
struct
file
*
filep
,
int
mode
)
{
int
rc
;
down
(
&
megasas_async_queue_mutex
);
rc
=
fasync_helper
(
fd
,
filep
,
mode
,
&
megasas_async_queue
);
up
(
&
megasas_async_queue_mutex
);
if
(
rc
>=
0
)
{
/* For sanity check when we get ioctl */
filep
->
private_data
=
filep
;
return
0
;
}
printk
(
KERN_DEBUG
"megasas: fasync_helper failed [%d]
\n
"
,
rc
);
return
rc
;
}
/**
* megasas_mgmt_fw_ioctl - Issues management ioctls to FW
* @instance: Adapter soft state
* @argp: User's ioctl packet
*/
static
int
megasas_mgmt_fw_ioctl
(
struct
megasas_instance
*
instance
,
struct
megasas_iocpacket
__user
*
user_ioc
,
struct
megasas_iocpacket
*
ioc
)
{
struct
megasas_sge32
*
kern_sge32
;
struct
megasas_cmd
*
cmd
;
void
*
kbuff_arr
[
MAX_IOCTL_SGE
];
dma_addr_t
buf_handle
=
0
;
int
error
=
0
,
i
;
void
*
sense
=
NULL
;
dma_addr_t
sense_handle
;
u32
*
sense_ptr
;
memset
(
kbuff_arr
,
0
,
sizeof
(
kbuff_arr
));
if
(
ioc
->
sge_count
>
MAX_IOCTL_SGE
)
{
printk
(
KERN_DEBUG
"megasas: SGE count [%d] > max limit [%d]
\n
"
,
ioc
->
sge_count
,
MAX_IOCTL_SGE
);
return
-
EINVAL
;
}
cmd
=
megasas_get_cmd
(
instance
);
if
(
!
cmd
)
{
printk
(
KERN_DEBUG
"megasas: Failed to get a cmd packet
\n
"
);
return
-
ENOMEM
;
}
/*
* User's IOCTL packet has 2 frames (maximum). Copy those two
* frames into our cmd's frames. cmd->frame's context will get
* overwritten when we copy from user's frames. So set that value
* alone separately
*/
memcpy
(
cmd
->
frame
,
ioc
->
frame
.
raw
,
2
*
MEGAMFI_FRAME_SIZE
);
cmd
->
frame
->
hdr
.
context
=
cmd
->
index
;
/*
* The management interface between applications and the fw uses
* MFI frames. E.g, RAID configuration changes, LD property changes
* etc are accomplishes through different kinds of MFI frames. The
* driver needs to care only about substituting user buffers with
* kernel buffers in SGLs. The location of SGL is embedded in the
* struct iocpacket itself.
*/
kern_sge32
=
(
struct
megasas_sge32
*
)
((
unsigned
long
)
cmd
->
frame
+
ioc
->
sgl_off
);
/*
* For each user buffer, create a mirror buffer and copy in
*/
for
(
i
=
0
;
i
<
ioc
->
sge_count
;
i
++
)
{
kbuff_arr
[
i
]
=
pci_alloc_consistent
(
instance
->
pdev
,
ioc
->
sgl
[
i
].
iov_len
,
&
buf_handle
);
if
(
!
kbuff_arr
[
i
])
{
printk
(
KERN_DEBUG
"megasas: Failed to alloc "
"kernel SGL buffer for IOCTL
\n
"
);
error
=
-
ENOMEM
;
goto
out
;
}
/*
* We don't change the dma_coherent_mask, so
* pci_alloc_consistent only returns 32bit addresses
*/
kern_sge32
[
i
].
phys_addr
=
(
u32
)
buf_handle
;
kern_sge32
[
i
].
length
=
ioc
->
sgl
[
i
].
iov_len
;
/*
* We created a kernel buffer corresponding to the
* user buffer. Now copy in from the user buffer
*/
if
(
copy_from_user
(
kbuff_arr
[
i
],
ioc
->
sgl
[
i
].
iov_base
,
(
u32
)
(
ioc
->
sgl
[
i
].
iov_len
)))
{
error
=
-
EFAULT
;
goto
out
;
}
}
if
(
ioc
->
sense_len
)
{
sense
=
pci_alloc_consistent
(
instance
->
pdev
,
ioc
->
sense_len
,
&
sense_handle
);
if
(
!
sense
)
{
error
=
-
ENOMEM
;
goto
out
;
}
sense_ptr
=
(
u32
*
)
((
unsigned
long
)
cmd
->
frame
+
ioc
->
sense_off
);
*
sense_ptr
=
sense_handle
;
}
/*
* Set the sync_cmd flag so that the ISR knows not to complete this
* cmd to the SCSI mid-layer
*/
cmd
->
sync_cmd
=
1
;
megasas_issue_blocked_cmd
(
instance
,
cmd
);
cmd
->
sync_cmd
=
0
;
/*
* copy out the kernel buffers to user buffers
*/
for
(
i
=
0
;
i
<
ioc
->
sge_count
;
i
++
)
{
if
(
copy_to_user
(
ioc
->
sgl
[
i
].
iov_base
,
kbuff_arr
[
i
],
ioc
->
sgl
[
i
].
iov_len
))
{
error
=
-
EFAULT
;
goto
out
;
}
}
/*
* copy out the sense
*/
if
(
ioc
->
sense_len
)
{
/*
* sense_ptr points to the location that has the user
* sense buffer address
*/
sense_ptr
=
(
u32
*
)
((
unsigned
long
)
ioc
->
frame
.
raw
+
ioc
->
sense_off
);
if
(
copy_to_user
((
void
__user
*
)((
unsigned
long
)(
*
sense_ptr
)),
sense
,
ioc
->
sense_len
))
{
error
=
-
EFAULT
;
goto
out
;
}
}
/*
* copy the status codes returned by the fw
*/
if
(
copy_to_user
(
&
user_ioc
->
frame
.
hdr
.
cmd_status
,
&
cmd
->
frame
->
hdr
.
cmd_status
,
sizeof
(
u8
)))
{
printk
(
KERN_DEBUG
"megasas: Error copying out cmd_status
\n
"
);
error
=
-
EFAULT
;
}
out:
if
(
sense
)
{
pci_free_consistent
(
instance
->
pdev
,
ioc
->
sense_len
,
sense
,
sense_handle
);
}
for
(
i
=
0
;
i
<
ioc
->
sge_count
&&
kbuff_arr
[
i
];
i
++
)
{
pci_free_consistent
(
instance
->
pdev
,
kern_sge32
[
i
].
length
,
kbuff_arr
[
i
],
kern_sge32
[
i
].
phys_addr
);
}
megasas_return_cmd
(
instance
,
cmd
);
return
error
;
}
static
struct
megasas_instance
*
megasas_lookup_instance
(
u16
host_no
)
{
int
i
;
for
(
i
=
0
;
i
<
megasas_mgmt_info
.
max_index
;
i
++
)
{
if
((
megasas_mgmt_info
.
instance
[
i
])
&&
(
megasas_mgmt_info
.
instance
[
i
]
->
host
->
host_no
==
host_no
))
return
megasas_mgmt_info
.
instance
[
i
];
}
return
NULL
;
}
static
int
megasas_mgmt_ioctl_fw
(
struct
file
*
file
,
unsigned
long
arg
)
{
struct
megasas_iocpacket
__user
*
user_ioc
=
(
struct
megasas_iocpacket
__user
*
)
arg
;
struct
megasas_iocpacket
*
ioc
;
struct
megasas_instance
*
instance
;
int
error
;
ioc
=
kmalloc
(
sizeof
(
*
ioc
),
GFP_KERNEL
);
if
(
!
ioc
)
return
-
ENOMEM
;
if
(
copy_from_user
(
ioc
,
user_ioc
,
sizeof
(
*
ioc
)))
{
error
=
-
EFAULT
;
goto
out_kfree_ioc
;
}
instance
=
megasas_lookup_instance
(
ioc
->
host_no
);
if
(
!
instance
)
{
error
=
-
ENODEV
;
goto
out_kfree_ioc
;
}
/*
* We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds
*/
if
(
down_interruptible
(
&
instance
->
ioctl_sem
))
{
error
=
-
ERESTARTSYS
;
goto
out_kfree_ioc
;
}
error
=
megasas_mgmt_fw_ioctl
(
instance
,
user_ioc
,
ioc
);
up
(
&
instance
->
ioctl_sem
);
out_kfree_ioc:
kfree
(
ioc
);
return
error
;
}
static
int
megasas_mgmt_ioctl_aen
(
struct
file
*
file
,
unsigned
long
arg
)
{
struct
megasas_instance
*
instance
;
struct
megasas_aen
aen
;
int
error
;
if
(
file
->
private_data
!=
file
)
{
printk
(
KERN_DEBUG
"megasas: fasync_helper was not "
"called first
\n
"
);
return
-
EINVAL
;
}
if
(
copy_from_user
(
&
aen
,
(
void
__user
*
)
arg
,
sizeof
(
aen
)))
return
-
EFAULT
;
instance
=
megasas_lookup_instance
(
aen
.
host_no
);
if
(
!
instance
)
return
-
ENODEV
;
down
(
&
instance
->
aen_mutex
);
error
=
megasas_register_aen
(
instance
,
aen
.
seq_num
,
aen
.
class_locale_word
);
up
(
&
instance
->
aen_mutex
);
return
error
;
}
/**
* megasas_mgmt_ioctl - char node ioctl entry point
*/
static
long
megasas_mgmt_ioctl
(
struct
file
*
file
,
unsigned
int
cmd
,
unsigned
long
arg
)
{
switch
(
cmd
)
{
case
MEGASAS_IOC_FIRMWARE
:
return
megasas_mgmt_ioctl_fw
(
file
,
arg
);
case
MEGASAS_IOC_GET_AEN
:
return
megasas_mgmt_ioctl_aen
(
file
,
arg
);
}
return
-
ENOTTY
;
}
#ifdef CONFIG_COMPAT
static
int
megasas_mgmt_compat_ioctl_fw
(
struct
file
*
file
,
unsigned
long
arg
)
{
struct
compat_megasas_iocpacket
__user
*
cioc
=
(
struct
compat_megasas_iocpacket
__user
*
)
arg
;
struct
megasas_iocpacket
__user
*
ioc
=
compat_alloc_user_space
(
sizeof
(
struct
megasas_iocpacket
));
int
i
;
int
error
=
0
;
clear_user
(
ioc
,
sizeof
(
*
ioc
));
if
(
copy_in_user
(
&
ioc
->
host_no
,
&
cioc
->
host_no
,
sizeof
(
u16
))
||
copy_in_user
(
&
ioc
->
sgl_off
,
&
cioc
->
sgl_off
,
sizeof
(
u32
))
||
copy_in_user
(
&
ioc
->
sense_off
,
&
cioc
->
sense_off
,
sizeof
(
u32
))
||
copy_in_user
(
&
ioc
->
sense_len
,
&
cioc
->
sense_len
,
sizeof
(
u32
))
||
copy_in_user
(
ioc
->
frame
.
raw
,
cioc
->
frame
.
raw
,
128
)
||
copy_in_user
(
&
ioc
->
sge_count
,
&
cioc
->
sge_count
,
sizeof
(
u32
)))
return
-
EFAULT
;
for
(
i
=
0
;
i
<
MAX_IOCTL_SGE
;
i
++
)
{
compat_uptr_t
ptr
;
if
(
get_user
(
ptr
,
&
cioc
->
sgl
[
i
].
iov_base
)
||
put_user
(
compat_ptr
(
ptr
),
&
ioc
->
sgl
[
i
].
iov_base
)
||
copy_in_user
(
&
ioc
->
sgl
[
i
].
iov_len
,
&
cioc
->
sgl
[
i
].
iov_len
,
sizeof
(
compat_size_t
)))
return
-
EFAULT
;
}
error
=
megasas_mgmt_ioctl_fw
(
file
,
(
unsigned
long
)
ioc
);
if
(
copy_in_user
(
&
cioc
->
frame
.
hdr
.
cmd_status
,
&
ioc
->
frame
.
hdr
.
cmd_status
,
sizeof
(
u8
)))
{
printk
(
KERN_DEBUG
"megasas: error copy_in_user cmd_status
\n
"
);
return
-
EFAULT
;
}
return
error
;
}
static
long
megasas_mgmt_compat_ioctl
(
struct
file
*
file
,
unsigned
int
cmd
,
unsigned
long
arg
)
{
switch
(
cmd
)
{
case
MEGASAS_IOC_FIRMWARE
:{
return
megasas_mgmt_compat_ioctl_fw
(
file
,
arg
);
}
case
MEGASAS_IOC_GET_AEN
:
return
megasas_mgmt_ioctl_aen
(
file
,
arg
);
}
return
-
ENOTTY
;
}
#endif
/*
* File operations structure for management interface
*/
static
struct
file_operations
megasas_mgmt_fops
=
{
.
owner
=
THIS_MODULE
,
.
open
=
megasas_mgmt_open
,
.
release
=
megasas_mgmt_release
,
.
fasync
=
megasas_mgmt_fasync
,
.
unlocked_ioctl
=
megasas_mgmt_ioctl
,
#ifdef CONFIG_COMPAT
.
compat_ioctl
=
megasas_mgmt_compat_ioctl
,
#endif
};
/*
* PCI hotplug support registration structure
*/
static
struct
pci_driver
megasas_pci_driver
=
{
.
name
=
"megaraid_sas"
,
.
id_table
=
megasas_pci_table
,
.
probe
=
megasas_probe_one
,
.
remove
=
__devexit_p
(
megasas_detach_one
),
.
shutdown
=
megasas_shutdown
,
};
/*
* Sysfs driver attributes
*/
static
ssize_t
megasas_sysfs_show_version
(
struct
device_driver
*
dd
,
char
*
buf
)
{
return
snprintf
(
buf
,
strlen
(
MEGASAS_VERSION
)
+
2
,
"%s
\n
"
,
MEGASAS_VERSION
);
}
static
DRIVER_ATTR
(
version
,
S_IRUGO
,
megasas_sysfs_show_version
,
NULL
);
static
ssize_t
megasas_sysfs_show_release_date
(
struct
device_driver
*
dd
,
char
*
buf
)
{
return
snprintf
(
buf
,
strlen
(
MEGASAS_RELDATE
)
+
2
,
"%s
\n
"
,
MEGASAS_RELDATE
);
}
static
DRIVER_ATTR
(
release_date
,
S_IRUGO
,
megasas_sysfs_show_release_date
,
NULL
);
/**
* megasas_init - Driver load entry point
*/
static
int
__init
megasas_init
(
void
)
{
int
rval
;
/*
* Announce driver version and other information
*/
printk
(
KERN_INFO
"megasas: %s %s
\n
"
,
MEGASAS_VERSION
,
MEGASAS_EXT_VERSION
);
memset
(
&
megasas_mgmt_info
,
0
,
sizeof
(
megasas_mgmt_info
));
/*
* Register character device node
*/
rval
=
register_chrdev
(
0
,
"megaraid_sas_ioctl"
,
&
megasas_mgmt_fops
);
if
(
rval
<
0
)
{
printk
(
KERN_DEBUG
"megasas: failed to open device node
\n
"
);
return
rval
;
}
megasas_mgmt_majorno
=
rval
;
/*
* Register ourselves as PCI hotplug module
*/
rval
=
pci_module_init
(
&
megasas_pci_driver
);
if
(
rval
)
{
printk
(
KERN_DEBUG
"megasas: PCI hotplug regisration failed
\n
"
);
unregister_chrdev
(
megasas_mgmt_majorno
,
"megaraid_sas_ioctl"
);
}
driver_create_file
(
&
megasas_pci_driver
.
driver
,
&
driver_attr_version
);
driver_create_file
(
&
megasas_pci_driver
.
driver
,
&
driver_attr_release_date
);
return
rval
;
}
/**
* megasas_exit - Driver unload entry point
*/
static
void
__exit
megasas_exit
(
void
)
{
driver_remove_file
(
&
megasas_pci_driver
.
driver
,
&
driver_attr_version
);
driver_remove_file
(
&
megasas_pci_driver
.
driver
,
&
driver_attr_release_date
);
pci_unregister_driver
(
&
megasas_pci_driver
);
unregister_chrdev
(
megasas_mgmt_majorno
,
"megaraid_sas_ioctl"
);
}
module_init
(
megasas_init
);
module_exit
(
megasas_exit
);
drivers/scsi/megaraid/megaraid_sas.h
0 → 100644
View file @
7d6322b4
/*
*
* Linux MegaRAID driver for SAS based RAID controllers
*
* Copyright (c) 2003-2005 LSI Logic Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* FILE : megaraid_sas.h
*/
#ifndef LSI_MEGARAID_SAS_H
#define LSI_MEGARAID_SAS_H
/**
* MegaRAID SAS Driver meta data
*/
#define MEGASAS_VERSION "00.00.02.00-rc4"
#define MEGASAS_RELDATE "Sep 16, 2005"
#define MEGASAS_EXT_VERSION "Fri Sep 16 12:37:08 EDT 2005"
/*
* =====================================
* MegaRAID SAS MFI firmware definitions
* =====================================
*/
/*
* MFI stands for MegaRAID SAS FW Interface. This is just a moniker for
* protocol between the software and firmware. Commands are issued using
* "message frames"
*/
/**
* FW posts its state in upper 4 bits of outbound_msg_0 register
*/
#define MFI_STATE_MASK 0xF0000000
#define MFI_STATE_UNDEFINED 0x00000000
#define MFI_STATE_BB_INIT 0x10000000
#define MFI_STATE_FW_INIT 0x40000000
#define MFI_STATE_WAIT_HANDSHAKE 0x60000000
#define MFI_STATE_FW_INIT_2 0x70000000
#define MFI_STATE_DEVICE_SCAN 0x80000000
#define MFI_STATE_FLUSH_CACHE 0xA0000000
#define MFI_STATE_READY 0xB0000000
#define MFI_STATE_OPERATIONAL 0xC0000000
#define MFI_STATE_FAULT 0xF0000000
#define MEGAMFI_FRAME_SIZE 64
/**
* During FW init, clear pending cmds & reset state using inbound_msg_0
*
* ABORT : Abort all pending cmds
* READY : Move from OPERATIONAL to READY state; discard queue info
* MFIMODE : Discard (possible) low MFA posted in 64-bit mode (??)
* CLR_HANDSHAKE: FW is waiting for HANDSHAKE from BIOS or Driver
*/
#define MFI_INIT_ABORT 0x00000000
#define MFI_INIT_READY 0x00000002
#define MFI_INIT_MFIMODE 0x00000004
#define MFI_INIT_CLEAR_HANDSHAKE 0x00000008
#define MFI_RESET_FLAGS MFI_INIT_READY|MFI_INIT_MFIMODE
/**
* MFI frame flags
*/
#define MFI_FRAME_POST_IN_REPLY_QUEUE 0x0000
#define MFI_FRAME_DONT_POST_IN_REPLY_QUEUE 0x0001
#define MFI_FRAME_SGL32 0x0000
#define MFI_FRAME_SGL64 0x0002
#define MFI_FRAME_SENSE32 0x0000
#define MFI_FRAME_SENSE64 0x0004
#define MFI_FRAME_DIR_NONE 0x0000
#define MFI_FRAME_DIR_WRITE 0x0008
#define MFI_FRAME_DIR_READ 0x0010
#define MFI_FRAME_DIR_BOTH 0x0018
/**
* Definition for cmd_status
*/
#define MFI_CMD_STATUS_POLL_MODE 0xFF
/**
* MFI command opcodes
*/
#define MFI_CMD_INIT 0x00
#define MFI_CMD_LD_READ 0x01
#define MFI_CMD_LD_WRITE 0x02
#define MFI_CMD_LD_SCSI_IO 0x03
#define MFI_CMD_PD_SCSI_IO 0x04
#define MFI_CMD_DCMD 0x05
#define MFI_CMD_ABORT 0x06
#define MFI_CMD_SMP 0x07
#define MFI_CMD_STP 0x08
#define MR_DCMD_CTRL_GET_INFO 0x01010000
#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000
#define MR_FLUSH_CTRL_CACHE 0x01
#define MR_FLUSH_DISK_CACHE 0x02
#define MR_DCMD_CTRL_SHUTDOWN 0x01050000
#define MR_ENABLE_DRIVE_SPINDOWN 0x01
#define MR_DCMD_CTRL_EVENT_GET_INFO 0x01040100
#define MR_DCMD_CTRL_EVENT_GET 0x01040300
#define MR_DCMD_CTRL_EVENT_WAIT 0x01040500
#define MR_DCMD_LD_GET_PROPERTIES 0x03030000
#define MR_DCMD_CLUSTER 0x08000000
#define MR_DCMD_CLUSTER_RESET_ALL 0x08010100
#define MR_DCMD_CLUSTER_RESET_LD 0x08010200
/**
* MFI command completion codes
*/
enum
MFI_STAT
{
MFI_STAT_OK
=
0x00
,
MFI_STAT_INVALID_CMD
=
0x01
,
MFI_STAT_INVALID_DCMD
=
0x02
,
MFI_STAT_INVALID_PARAMETER
=
0x03
,
MFI_STAT_INVALID_SEQUENCE_NUMBER
=
0x04
,
MFI_STAT_ABORT_NOT_POSSIBLE
=
0x05
,
MFI_STAT_APP_HOST_CODE_NOT_FOUND
=
0x06
,
MFI_STAT_APP_IN_USE
=
0x07
,
MFI_STAT_APP_NOT_INITIALIZED
=
0x08
,
MFI_STAT_ARRAY_INDEX_INVALID
=
0x09
,
MFI_STAT_ARRAY_ROW_NOT_EMPTY
=
0x0a
,
MFI_STAT_CONFIG_RESOURCE_CONFLICT
=
0x0b
,
MFI_STAT_DEVICE_NOT_FOUND
=
0x0c
,
MFI_STAT_DRIVE_TOO_SMALL
=
0x0d
,
MFI_STAT_FLASH_ALLOC_FAIL
=
0x0e
,
MFI_STAT_FLASH_BUSY
=
0x0f
,
MFI_STAT_FLASH_ERROR
=
0x10
,
MFI_STAT_FLASH_IMAGE_BAD
=
0x11
,
MFI_STAT_FLASH_IMAGE_INCOMPLETE
=
0x12
,
MFI_STAT_FLASH_NOT_OPEN
=
0x13
,
MFI_STAT_FLASH_NOT_STARTED
=
0x14
,
MFI_STAT_FLUSH_FAILED
=
0x15
,
MFI_STAT_HOST_CODE_NOT_FOUNT
=
0x16
,
MFI_STAT_LD_CC_IN_PROGRESS
=
0x17
,
MFI_STAT_LD_INIT_IN_PROGRESS
=
0x18
,
MFI_STAT_LD_LBA_OUT_OF_RANGE
=
0x19
,
MFI_STAT_LD_MAX_CONFIGURED
=
0x1a
,
MFI_STAT_LD_NOT_OPTIMAL
=
0x1b
,
MFI_STAT_LD_RBLD_IN_PROGRESS
=
0x1c
,
MFI_STAT_LD_RECON_IN_PROGRESS
=
0x1d
,
MFI_STAT_LD_WRONG_RAID_LEVEL
=
0x1e
,
MFI_STAT_MAX_SPARES_EXCEEDED
=
0x1f
,
MFI_STAT_MEMORY_NOT_AVAILABLE
=
0x20
,
MFI_STAT_MFC_HW_ERROR
=
0x21
,
MFI_STAT_NO_HW_PRESENT
=
0x22
,
MFI_STAT_NOT_FOUND
=
0x23
,
MFI_STAT_NOT_IN_ENCL
=
0x24
,
MFI_STAT_PD_CLEAR_IN_PROGRESS
=
0x25
,
MFI_STAT_PD_TYPE_WRONG
=
0x26
,
MFI_STAT_PR_DISABLED
=
0x27
,
MFI_STAT_ROW_INDEX_INVALID
=
0x28
,
MFI_STAT_SAS_CONFIG_INVALID_ACTION
=
0x29
,
MFI_STAT_SAS_CONFIG_INVALID_DATA
=
0x2a
,
MFI_STAT_SAS_CONFIG_INVALID_PAGE
=
0x2b
,
MFI_STAT_SAS_CONFIG_INVALID_TYPE
=
0x2c
,
MFI_STAT_SCSI_DONE_WITH_ERROR
=
0x2d
,
MFI_STAT_SCSI_IO_FAILED
=
0x2e
,
MFI_STAT_SCSI_RESERVATION_CONFLICT
=
0x2f
,
MFI_STAT_SHUTDOWN_FAILED
=
0x30
,
MFI_STAT_TIME_NOT_SET
=
0x31
,
MFI_STAT_WRONG_STATE
=
0x32
,
MFI_STAT_LD_OFFLINE
=
0x33
,
MFI_STAT_PEER_NOTIFICATION_REJECTED
=
0x34
,
MFI_STAT_PEER_NOTIFICATION_FAILED
=
0x35
,
MFI_STAT_RESERVATION_IN_PROGRESS
=
0x36
,
MFI_STAT_I2C_ERRORS_DETECTED
=
0x37
,
MFI_STAT_PCI_ERRORS_DETECTED
=
0x38
,
MFI_STAT_INVALID_STATUS
=
0xFF
};
/*
* Number of mailbox bytes in DCMD message frame
*/
#define MFI_MBOX_SIZE 12
enum
MR_EVT_CLASS
{
MR_EVT_CLASS_DEBUG
=
-
2
,
MR_EVT_CLASS_PROGRESS
=
-
1
,
MR_EVT_CLASS_INFO
=
0
,
MR_EVT_CLASS_WARNING
=
1
,
MR_EVT_CLASS_CRITICAL
=
2
,
MR_EVT_CLASS_FATAL
=
3
,
MR_EVT_CLASS_DEAD
=
4
,
};
enum
MR_EVT_LOCALE
{
MR_EVT_LOCALE_LD
=
0x0001
,
MR_EVT_LOCALE_PD
=
0x0002
,
MR_EVT_LOCALE_ENCL
=
0x0004
,
MR_EVT_LOCALE_BBU
=
0x0008
,
MR_EVT_LOCALE_SAS
=
0x0010
,
MR_EVT_LOCALE_CTRL
=
0x0020
,
MR_EVT_LOCALE_CONFIG
=
0x0040
,
MR_EVT_LOCALE_CLUSTER
=
0x0080
,
MR_EVT_LOCALE_ALL
=
0xffff
,
};
enum
MR_EVT_ARGS
{
MR_EVT_ARGS_NONE
,
MR_EVT_ARGS_CDB_SENSE
,
MR_EVT_ARGS_LD
,
MR_EVT_ARGS_LD_COUNT
,
MR_EVT_ARGS_LD_LBA
,
MR_EVT_ARGS_LD_OWNER
,
MR_EVT_ARGS_LD_LBA_PD_LBA
,
MR_EVT_ARGS_LD_PROG
,
MR_EVT_ARGS_LD_STATE
,
MR_EVT_ARGS_LD_STRIP
,
MR_EVT_ARGS_PD
,
MR_EVT_ARGS_PD_ERR
,
MR_EVT_ARGS_PD_LBA
,
MR_EVT_ARGS_PD_LBA_LD
,
MR_EVT_ARGS_PD_PROG
,
MR_EVT_ARGS_PD_STATE
,
MR_EVT_ARGS_PCI
,
MR_EVT_ARGS_RATE
,
MR_EVT_ARGS_STR
,
MR_EVT_ARGS_TIME
,
MR_EVT_ARGS_ECC
,
};
/*
* SAS controller properties
*/
struct
megasas_ctrl_prop
{
u16
seq_num
;
u16
pred_fail_poll_interval
;
u16
intr_throttle_count
;
u16
intr_throttle_timeouts
;
u8
rebuild_rate
;
u8
patrol_read_rate
;
u8
bgi_rate
;
u8
cc_rate
;
u8
recon_rate
;
u8
cache_flush_interval
;
u8
spinup_drv_count
;
u8
spinup_delay
;
u8
cluster_enable
;
u8
coercion_mode
;
u8
alarm_enable
;
u8
disable_auto_rebuild
;
u8
disable_battery_warn
;
u8
ecc_bucket_size
;
u16
ecc_bucket_leak_rate
;
u8
restore_hotspare_on_insertion
;
u8
expose_encl_devices
;
u8
reserved
[
38
];
}
__attribute__
((
packed
));
/*
* SAS controller information
*/
struct
megasas_ctrl_info
{
/*
* PCI device information
*/
struct
{
u16
vendor_id
;
u16
device_id
;
u16
sub_vendor_id
;
u16
sub_device_id
;
u8
reserved
[
24
];
}
__attribute__
((
packed
))
pci
;
/*
* Host interface information
*/
struct
{
u8
PCIX
:
1
;
u8
PCIE
:
1
;
u8
iSCSI
:
1
;
u8
SAS_3G
:
1
;
u8
reserved_0
:
4
;
u8
reserved_1
[
6
];
u8
port_count
;
u64
port_addr
[
8
];
}
__attribute__
((
packed
))
host_interface
;
/*
* Device (backend) interface information
*/
struct
{
u8
SPI
:
1
;
u8
SAS_3G
:
1
;
u8
SATA_1_5G
:
1
;
u8
SATA_3G
:
1
;
u8
reserved_0
:
4
;
u8
reserved_1
[
6
];
u8
port_count
;
u64
port_addr
[
8
];
}
__attribute__
((
packed
))
device_interface
;
/*
* List of components residing in flash. All str are null terminated
*/
u32
image_check_word
;
u32
image_component_count
;
struct
{
char
name
[
8
];
char
version
[
32
];
char
build_date
[
16
];
char
built_time
[
16
];
}
__attribute__
((
packed
))
image_component
[
8
];
/*
* List of flash components that have been flashed on the card, but
* are not in use, pending reset of the adapter. This list will be
* empty if a flash operation has not occurred. All stings are null
* terminated
*/
u32
pending_image_component_count
;
struct
{
char
name
[
8
];
char
version
[
32
];
char
build_date
[
16
];
char
build_time
[
16
];
}
__attribute__
((
packed
))
pending_image_component
[
8
];
u8
max_arms
;
u8
max_spans
;
u8
max_arrays
;
u8
max_lds
;
char
product_name
[
80
];
char
serial_no
[
32
];
/*
* Other physical/controller/operation information. Indicates the
* presence of the hardware
*/
struct
{
u32
bbu
:
1
;
u32
alarm
:
1
;
u32
nvram
:
1
;
u32
uart
:
1
;
u32
reserved
:
28
;
}
__attribute__
((
packed
))
hw_present
;
u32
current_fw_time
;
/*
* Maximum data transfer sizes
*/
u16
max_concurrent_cmds
;
u16
max_sge_count
;
u32
max_request_size
;
/*
* Logical and physical device counts
*/
u16
ld_present_count
;
u16
ld_degraded_count
;
u16
ld_offline_count
;
u16
pd_present_count
;
u16
pd_disk_present_count
;
u16
pd_disk_pred_failure_count
;
u16
pd_disk_failed_count
;
/*
* Memory size information
*/
u16
nvram_size
;
u16
memory_size
;
u16
flash_size
;
/*
* Error counters
*/
u16
mem_correctable_error_count
;
u16
mem_uncorrectable_error_count
;
/*
* Cluster information
*/
u8
cluster_permitted
;
u8
cluster_active
;
/*
* Additional max data transfer sizes
*/
u16
max_strips_per_io
;
/*
* Controller capabilities structures
*/
struct
{
u32
raid_level_0
:
1
;
u32
raid_level_1
:
1
;
u32
raid_level_5
:
1
;
u32
raid_level_1E
:
1
;
u32
raid_level_6
:
1
;
u32
reserved
:
27
;
}
__attribute__
((
packed
))
raid_levels
;
struct
{
u32
rbld_rate
:
1
;
u32
cc_rate
:
1
;
u32
bgi_rate
:
1
;
u32
recon_rate
:
1
;
u32
patrol_rate
:
1
;
u32
alarm_control
:
1
;
u32
cluster_supported
:
1
;
u32
bbu
:
1
;
u32
spanning_allowed
:
1
;
u32
dedicated_hotspares
:
1
;
u32
revertible_hotspares
:
1
;
u32
foreign_config_import
:
1
;
u32
self_diagnostic
:
1
;
u32
mixed_redundancy_arr
:
1
;
u32
global_hot_spares
:
1
;
u32
reserved
:
17
;
}
__attribute__
((
packed
))
adapter_operations
;
struct
{
u32
read_policy
:
1
;
u32
write_policy
:
1
;
u32
io_policy
:
1
;
u32
access_policy
:
1
;
u32
disk_cache_policy
:
1
;
u32
reserved
:
27
;
}
__attribute__
((
packed
))
ld_operations
;
struct
{
u8
min
;
u8
max
;
u8
reserved
[
2
];
}
__attribute__
((
packed
))
stripe_sz_ops
;
struct
{
u32
force_online
:
1
;
u32
force_offline
:
1
;
u32
force_rebuild
:
1
;
u32
reserved
:
29
;
}
__attribute__
((
packed
))
pd_operations
;
struct
{
u32
ctrl_supports_sas
:
1
;
u32
ctrl_supports_sata
:
1
;
u32
allow_mix_in_encl
:
1
;
u32
allow_mix_in_ld
:
1
;
u32
allow_sata_in_cluster
:
1
;
u32
reserved
:
27
;
}
__attribute__
((
packed
))
pd_mix_support
;
/*
* Define ECC single-bit-error bucket information
*/
u8
ecc_bucket_count
;
u8
reserved_2
[
11
];
/*
* Include the controller properties (changeable items)
*/
struct
megasas_ctrl_prop
properties
;
/*
* Define FW pkg version (set in envt v'bles on OEM basis)
*/
char
package_version
[
0x60
];
u8
pad
[
0x800
-
0x6a0
];
}
__attribute__
((
packed
));
/*
* ===============================
* MegaRAID SAS driver definitions
* ===============================
*/
#define MEGASAS_MAX_PD_CHANNELS 2
#define MEGASAS_MAX_LD_CHANNELS 2
#define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \
MEGASAS_MAX_LD_CHANNELS)
#define MEGASAS_MAX_DEV_PER_CHANNEL 128
#define MEGASAS_DEFAULT_INIT_ID -1
#define MEGASAS_MAX_LUN 8
#define MEGASAS_MAX_LD 64
/*
* When SCSI mid-layer calls driver's reset routine, driver waits for
* MEGASAS_RESET_WAIT_TIME seconds for all outstanding IO to complete. Note
* that the driver cannot _actually_ abort or reset pending commands. While
* it is waiting for the commands to complete, it prints a diagnostic message
* every MEGASAS_RESET_NOTICE_INTERVAL seconds
*/
#define MEGASAS_RESET_WAIT_TIME 180
#define MEGASAS_RESET_NOTICE_INTERVAL 5
#define MEGASAS_IOCTL_CMD 0
/*
* FW reports the maximum of number of commands that it can accept (maximum
* commands that can be outstanding) at any time. The driver must report a
* lower number to the mid layer because it can issue a few internal commands
* itself (E.g, AEN, abort cmd, IOCTLs etc). The number of commands it needs
* is shown below
*/
#define MEGASAS_INT_CMDS 32
/*
* FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit
* SGLs based on the size of dma_addr_t
*/
#define IS_DMA64 (sizeof(dma_addr_t) == 8)
#define MFI_OB_INTR_STATUS_MASK 0x00000002
#define MFI_POLL_TIMEOUT_SECS 10
struct
megasas_register_set
{
u32
reserved_0
[
4
];
/*0000h */
u32
inbound_msg_0
;
/*0010h */
u32
inbound_msg_1
;
/*0014h */
u32
outbound_msg_0
;
/*0018h */
u32
outbound_msg_1
;
/*001Ch */
u32
inbound_doorbell
;
/*0020h */
u32
inbound_intr_status
;
/*0024h */
u32
inbound_intr_mask
;
/*0028h */
u32
outbound_doorbell
;
/*002Ch */
u32
outbound_intr_status
;
/*0030h */
u32
outbound_intr_mask
;
/*0034h */
u32
reserved_1
[
2
];
/*0038h */
u32
inbound_queue_port
;
/*0040h */
u32
outbound_queue_port
;
/*0044h */
u32
reserved_2
;
/*004Ch */
u32
index_registers
[
1004
];
/*0050h */
}
__attribute__
((
packed
));
struct
megasas_sge32
{
u32
phys_addr
;
u32
length
;
}
__attribute__
((
packed
));
struct
megasas_sge64
{
u64
phys_addr
;
u32
length
;
}
__attribute__
((
packed
));
union
megasas_sgl
{
struct
megasas_sge32
sge32
[
1
];
struct
megasas_sge64
sge64
[
1
];
}
__attribute__
((
packed
));
struct
megasas_header
{
u8
cmd
;
/*00h */
u8
sense_len
;
/*01h */
u8
cmd_status
;
/*02h */
u8
scsi_status
;
/*03h */
u8
target_id
;
/*04h */
u8
lun
;
/*05h */
u8
cdb_len
;
/*06h */
u8
sge_count
;
/*07h */
u32
context
;
/*08h */
u32
pad_0
;
/*0Ch */
u16
flags
;
/*10h */
u16
timeout
;
/*12h */
u32
data_xferlen
;
/*14h */
}
__attribute__
((
packed
));
union
megasas_sgl_frame
{
struct
megasas_sge32
sge32
[
8
];
struct
megasas_sge64
sge64
[
5
];
}
__attribute__
((
packed
));
struct
megasas_init_frame
{
u8
cmd
;
/*00h */
u8
reserved_0
;
/*01h */
u8
cmd_status
;
/*02h */
u8
reserved_1
;
/*03h */
u32
reserved_2
;
/*04h */
u32
context
;
/*08h */
u32
pad_0
;
/*0Ch */
u16
flags
;
/*10h */
u16
reserved_3
;
/*12h */
u32
data_xfer_len
;
/*14h */
u32
queue_info_new_phys_addr_lo
;
/*18h */
u32
queue_info_new_phys_addr_hi
;
/*1Ch */
u32
queue_info_old_phys_addr_lo
;
/*20h */
u32
queue_info_old_phys_addr_hi
;
/*24h */
u32
reserved_4
[
6
];
/*28h */
}
__attribute__
((
packed
));
struct
megasas_init_queue_info
{
u32
init_flags
;
/*00h */
u32
reply_queue_entries
;
/*04h */
u32
reply_queue_start_phys_addr_lo
;
/*08h */
u32
reply_queue_start_phys_addr_hi
;
/*0Ch */
u32
producer_index_phys_addr_lo
;
/*10h */
u32
producer_index_phys_addr_hi
;
/*14h */
u32
consumer_index_phys_addr_lo
;
/*18h */
u32
consumer_index_phys_addr_hi
;
/*1Ch */
}
__attribute__
((
packed
));
struct
megasas_io_frame
{
u8
cmd
;
/*00h */
u8
sense_len
;
/*01h */
u8
cmd_status
;
/*02h */
u8
scsi_status
;
/*03h */
u8
target_id
;
/*04h */
u8
access_byte
;
/*05h */
u8
reserved_0
;
/*06h */
u8
sge_count
;
/*07h */
u32
context
;
/*08h */
u32
pad_0
;
/*0Ch */
u16
flags
;
/*10h */
u16
timeout
;
/*12h */
u32
lba_count
;
/*14h */
u32
sense_buf_phys_addr_lo
;
/*18h */
u32
sense_buf_phys_addr_hi
;
/*1Ch */
u32
start_lba_lo
;
/*20h */
u32
start_lba_hi
;
/*24h */
union
megasas_sgl
sgl
;
/*28h */
}
__attribute__
((
packed
));
struct
megasas_pthru_frame
{
u8
cmd
;
/*00h */
u8
sense_len
;
/*01h */
u8
cmd_status
;
/*02h */
u8
scsi_status
;
/*03h */
u8
target_id
;
/*04h */
u8
lun
;
/*05h */
u8
cdb_len
;
/*06h */
u8
sge_count
;
/*07h */
u32
context
;
/*08h */
u32
pad_0
;
/*0Ch */
u16
flags
;
/*10h */
u16
timeout
;
/*12h */
u32
data_xfer_len
;
/*14h */
u32
sense_buf_phys_addr_lo
;
/*18h */
u32
sense_buf_phys_addr_hi
;
/*1Ch */
u8
cdb
[
16
];
/*20h */
union
megasas_sgl
sgl
;
/*30h */
}
__attribute__
((
packed
));
struct
megasas_dcmd_frame
{
u8
cmd
;
/*00h */
u8
reserved_0
;
/*01h */
u8
cmd_status
;
/*02h */
u8
reserved_1
[
4
];
/*03h */
u8
sge_count
;
/*07h */
u32
context
;
/*08h */
u32
pad_0
;
/*0Ch */
u16
flags
;
/*10h */
u16
timeout
;
/*12h */
u32
data_xfer_len
;
/*14h */
u32
opcode
;
/*18h */
union
{
/*1Ch */
u8
b
[
12
];
u16
s
[
6
];
u32
w
[
3
];
}
mbox
;
union
megasas_sgl
sgl
;
/*28h */
}
__attribute__
((
packed
));
struct
megasas_abort_frame
{
u8
cmd
;
/*00h */
u8
reserved_0
;
/*01h */
u8
cmd_status
;
/*02h */
u8
reserved_1
;
/*03h */
u32
reserved_2
;
/*04h */
u32
context
;
/*08h */
u32
pad_0
;
/*0Ch */
u16
flags
;
/*10h */
u16
reserved_3
;
/*12h */
u32
reserved_4
;
/*14h */
u32
abort_context
;
/*18h */
u32
pad_1
;
/*1Ch */
u32
abort_mfi_phys_addr_lo
;
/*20h */
u32
abort_mfi_phys_addr_hi
;
/*24h */
u32
reserved_5
[
6
];
/*28h */
}
__attribute__
((
packed
));
struct
megasas_smp_frame
{
u8
cmd
;
/*00h */
u8
reserved_1
;
/*01h */
u8
cmd_status
;
/*02h */
u8
connection_status
;
/*03h */
u8
reserved_2
[
3
];
/*04h */
u8
sge_count
;
/*07h */
u32
context
;
/*08h */
u32
pad_0
;
/*0Ch */
u16
flags
;
/*10h */
u16
timeout
;
/*12h */
u32
data_xfer_len
;
/*14h */
u64
sas_addr
;
/*18h */
union
{
struct
megasas_sge32
sge32
[
2
];
/* [0]: resp [1]: req */
struct
megasas_sge64
sge64
[
2
];
/* [0]: resp [1]: req */
}
sgl
;
}
__attribute__
((
packed
));
struct
megasas_stp_frame
{
u8
cmd
;
/*00h */
u8
reserved_1
;
/*01h */
u8
cmd_status
;
/*02h */
u8
reserved_2
;
/*03h */
u8
target_id
;
/*04h */
u8
reserved_3
[
2
];
/*05h */
u8
sge_count
;
/*07h */
u32
context
;
/*08h */
u32
pad_0
;
/*0Ch */
u16
flags
;
/*10h */
u16
timeout
;
/*12h */
u32
data_xfer_len
;
/*14h */
u16
fis
[
10
];
/*18h */
u32
stp_flags
;
union
{
struct
megasas_sge32
sge32
[
2
];
/* [0]: resp [1]: data */
struct
megasas_sge64
sge64
[
2
];
/* [0]: resp [1]: data */
}
sgl
;
}
__attribute__
((
packed
));
union
megasas_frame
{
struct
megasas_header
hdr
;
struct
megasas_init_frame
init
;
struct
megasas_io_frame
io
;
struct
megasas_pthru_frame
pthru
;
struct
megasas_dcmd_frame
dcmd
;
struct
megasas_abort_frame
abort
;
struct
megasas_smp_frame
smp
;
struct
megasas_stp_frame
stp
;
u8
raw_bytes
[
64
];
};
struct
megasas_cmd
;
union
megasas_evt_class_locale
{
struct
{
u16
locale
;
u8
reserved
;
s8
class
;
}
__attribute__
((
packed
))
members
;
u32
word
;
}
__attribute__
((
packed
));
struct
megasas_evt_log_info
{
u32
newest_seq_num
;
u32
oldest_seq_num
;
u32
clear_seq_num
;
u32
shutdown_seq_num
;
u32
boot_seq_num
;
}
__attribute__
((
packed
));
struct
megasas_progress
{
u16
progress
;
u16
elapsed_seconds
;
}
__attribute__
((
packed
));
struct
megasas_evtarg_ld
{
u16
target_id
;
u8
ld_index
;
u8
reserved
;
}
__attribute__
((
packed
));
struct
megasas_evtarg_pd
{
u16
device_id
;
u8
encl_index
;
u8
slot_number
;
}
__attribute__
((
packed
));
struct
megasas_evt_detail
{
u32
seq_num
;
u32
time_stamp
;
u32
code
;
union
megasas_evt_class_locale
cl
;
u8
arg_type
;
u8
reserved1
[
15
];
union
{
struct
{
struct
megasas_evtarg_pd
pd
;
u8
cdb_length
;
u8
sense_length
;
u8
reserved
[
2
];
u8
cdb
[
16
];
u8
sense
[
64
];
}
__attribute__
((
packed
))
cdbSense
;
struct
megasas_evtarg_ld
ld
;
struct
{
struct
megasas_evtarg_ld
ld
;
u64
count
;
}
__attribute__
((
packed
))
ld_count
;
struct
{
u64
lba
;
struct
megasas_evtarg_ld
ld
;
}
__attribute__
((
packed
))
ld_lba
;
struct
{
struct
megasas_evtarg_ld
ld
;
u32
prevOwner
;
u32
newOwner
;
}
__attribute__
((
packed
))
ld_owner
;
struct
{
u64
ld_lba
;
u64
pd_lba
;
struct
megasas_evtarg_ld
ld
;
struct
megasas_evtarg_pd
pd
;
}
__attribute__
((
packed
))
ld_lba_pd_lba
;
struct
{
struct
megasas_evtarg_ld
ld
;
struct
megasas_progress
prog
;
}
__attribute__
((
packed
))
ld_prog
;
struct
{
struct
megasas_evtarg_ld
ld
;
u32
prev_state
;
u32
new_state
;
}
__attribute__
((
packed
))
ld_state
;
struct
{
u64
strip
;
struct
megasas_evtarg_ld
ld
;
}
__attribute__
((
packed
))
ld_strip
;
struct
megasas_evtarg_pd
pd
;
struct
{
struct
megasas_evtarg_pd
pd
;
u32
err
;
}
__attribute__
((
packed
))
pd_err
;
struct
{
u64
lba
;
struct
megasas_evtarg_pd
pd
;
}
__attribute__
((
packed
))
pd_lba
;
struct
{
u64
lba
;
struct
megasas_evtarg_pd
pd
;
struct
megasas_evtarg_ld
ld
;
}
__attribute__
((
packed
))
pd_lba_ld
;
struct
{
struct
megasas_evtarg_pd
pd
;
struct
megasas_progress
prog
;
}
__attribute__
((
packed
))
pd_prog
;
struct
{
struct
megasas_evtarg_pd
pd
;
u32
prevState
;
u32
newState
;
}
__attribute__
((
packed
))
pd_state
;
struct
{
u16
vendorId
;
u16
deviceId
;
u16
subVendorId
;
u16
subDeviceId
;
}
__attribute__
((
packed
))
pci
;
u32
rate
;
char
str
[
96
];
struct
{
u32
rtc
;
u32
elapsedSeconds
;
}
__attribute__
((
packed
))
time
;
struct
{
u32
ecar
;
u32
elog
;
char
str
[
64
];
}
__attribute__
((
packed
))
ecc
;
u8
b
[
96
];
u16
s
[
48
];
u32
w
[
24
];
u64
d
[
12
];
}
args
;
char
description
[
128
];
}
__attribute__
((
packed
));
struct
megasas_instance
{
u32
*
producer
;
dma_addr_t
producer_h
;
u32
*
consumer
;
dma_addr_t
consumer_h
;
u32
*
reply_queue
;
dma_addr_t
reply_queue_h
;
unsigned
long
base_addr
;
struct
megasas_register_set
__iomem
*
reg_set
;
s8
init_id
;
u8
reserved
[
3
];
u16
max_num_sge
;
u16
max_fw_cmds
;
u32
max_sectors_per_req
;
struct
megasas_cmd
**
cmd_list
;
struct
list_head
cmd_pool
;
spinlock_t
cmd_pool_lock
;
struct
dma_pool
*
frame_dma_pool
;
struct
dma_pool
*
sense_dma_pool
;
struct
megasas_evt_detail
*
evt_detail
;
dma_addr_t
evt_detail_h
;
struct
megasas_cmd
*
aen_cmd
;
struct
semaphore
aen_mutex
;
struct
semaphore
ioctl_sem
;
struct
Scsi_Host
*
host
;
wait_queue_head_t
int_cmd_wait_q
;
wait_queue_head_t
abort_cmd_wait_q
;
struct
pci_dev
*
pdev
;
u32
unique_id
;
u32
fw_outstanding
;
u32
hw_crit_error
;
spinlock_t
instance_lock
;
};
#define MEGASAS_IS_LOGICAL(scp) \
(scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
#define MEGASAS_DEV_INDEX(inst, scp) \
((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
scp->device->id
struct
megasas_cmd
{
union
megasas_frame
*
frame
;
dma_addr_t
frame_phys_addr
;
u8
*
sense
;
dma_addr_t
sense_phys_addr
;
u32
index
;
u8
sync_cmd
;
u8
cmd_status
;
u16
abort_aen
;
struct
list_head
list
;
struct
scsi_cmnd
*
scmd
;
struct
megasas_instance
*
instance
;
u32
frame_count
;
};
#define MAX_MGMT_ADAPTERS 1024
#define MAX_IOCTL_SGE 16
struct
megasas_iocpacket
{
u16
host_no
;
u16
__pad1
;
u32
sgl_off
;
u32
sge_count
;
u32
sense_off
;
u32
sense_len
;
union
{
u8
raw
[
128
];
struct
megasas_header
hdr
;
}
frame
;
struct
iovec
sgl
[
MAX_IOCTL_SGE
];
}
__attribute__
((
packed
));
struct
megasas_aen
{
u16
host_no
;
u16
__pad1
;
u32
seq_num
;
u32
class_locale_word
;
}
__attribute__
((
packed
));
#ifdef CONFIG_COMPAT
struct
compat_megasas_iocpacket
{
u16
host_no
;
u16
__pad1
;
u32
sgl_off
;
u32
sge_count
;
u32
sense_off
;
u32
sense_len
;
union
{
u8
raw
[
128
];
struct
megasas_header
hdr
;
}
frame
;
struct
compat_iovec
sgl
[
MAX_IOCTL_SGE
];
}
__attribute__
((
packed
));
#define MEGASAS_IOC_FIRMWARE _IOWR('M', 1, struct compat_megasas_iocpacket)
#else
#define MEGASAS_IOC_FIRMWARE _IOWR('M', 1, struct megasas_iocpacket)
#endif
#define MEGASAS_IOC_GET_AEN _IOW('M', 3, struct megasas_aen)
struct
megasas_mgmt_info
{
u16
count
;
struct
megasas_instance
*
instance
[
MAX_MGMT_ADAPTERS
];
int
max_index
;
};
#endif
/*LSI_MEGARAID_SAS_H */
drivers/scsi/qla2xxx/qla_rscn.c
View file @
7d6322b4
...
...
@@ -330,6 +330,8 @@ qla2x00_update_login_fcport(scsi_qla_host_t *ha, struct mbx_entry *mbxstat,
fcport
->
flags
&=
~
FCF_FAILOVER_NEEDED
;
fcport
->
iodesc_idx_sent
=
IODESC_INVALID_INDEX
;
atomic_set
(
&
fcport
->
state
,
FCS_ONLINE
);
if
(
fcport
->
rport
)
fc_remote_port_unblock
(
fcport
->
rport
);
}
...
...
drivers/scsi/scsi_scan.c
View file @
7d6322b4
...
...
@@ -587,6 +587,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, char *inq_result,
if
(
sdev
->
scsi_level
>=
2
||
(
sdev
->
scsi_level
==
1
&&
(
inq_result
[
3
]
&
0x0f
)
==
1
))
sdev
->
scsi_level
++
;
sdev
->
sdev_target
->
scsi_level
=
sdev
->
scsi_level
;
return
0
;
}
...
...
@@ -771,6 +772,15 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
return
SCSI_SCAN_LUN_PRESENT
;
}
static
inline
void
scsi_destroy_sdev
(
struct
scsi_device
*
sdev
)
{
if
(
sdev
->
host
->
hostt
->
slave_destroy
)
sdev
->
host
->
hostt
->
slave_destroy
(
sdev
);
transport_destroy_device
(
&
sdev
->
sdev_gendev
);
put_device
(
&
sdev
->
sdev_gendev
);
}
/**
* scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it
* @starget: pointer to target device structure
...
...
@@ -803,9 +813,9 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
* The rescan flag is used as an optimization, the first scan of a
* host adapter calls into here with rescan == 0.
*/
if
(
rescan
)
{
sdev
=
scsi_device_lookup_by_target
(
starget
,
lun
);
if
(
sdev
)
{
sdev
=
scsi_device_lookup_by_target
(
starget
,
lun
);
if
(
sdev
)
{
if
(
rescan
||
sdev
->
sdev_state
!=
SDEV_CREATED
)
{
SCSI_LOG_SCAN_BUS
(
3
,
printk
(
KERN_INFO
"scsi scan: device exists on %s
\n
"
,
sdev
->
sdev_gendev
.
bus_id
));
...
...
@@ -820,9 +830,9 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
sdev
->
model
);
return
SCSI_SCAN_LUN_PRESENT
;
}
}
sdev
=
scsi_alloc_sdev
(
starget
,
lun
,
hostdata
);
scsi_device_put
(
sdev
);
}
else
sdev
=
scsi_alloc_sdev
(
starget
,
lun
,
hostdata
);
if
(
!
sdev
)
goto
out
;
...
...
@@ -877,12 +887,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
res
=
SCSI_SCAN_NO_RESPONSE
;
}
}
}
else
{
if
(
sdev
->
host
->
hostt
->
slave_destroy
)
sdev
->
host
->
hostt
->
slave_destroy
(
sdev
);
transport_destroy_device
(
&
sdev
->
sdev_gendev
);
put_device
(
&
sdev
->
sdev_gendev
);
}
}
else
scsi_destroy_sdev
(
sdev
);
out:
return
res
;
}
...
...
@@ -1054,7 +1060,7 @@ EXPORT_SYMBOL(int_to_scsilun);
* 0: scan completed (or no memory, so further scanning is futile)
* 1: no report lun scan, or not configured
**/
static
int
scsi_report_lun_scan
(
struct
scsi_
device
*
sdev
,
int
bflags
,
static
int
scsi_report_lun_scan
(
struct
scsi_
target
*
starget
,
int
bflags
,
int
rescan
)
{
char
devname
[
64
];
...
...
@@ -1067,7 +1073,8 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
struct
scsi_lun
*
lunp
,
*
lun_data
;
u8
*
data
;
struct
scsi_sense_hdr
sshdr
;
struct
scsi_target
*
starget
=
scsi_target
(
sdev
);
struct
scsi_device
*
sdev
;
struct
Scsi_Host
*
shost
=
dev_to_shost
(
&
starget
->
dev
);
/*
* Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set.
...
...
@@ -1075,15 +1082,23 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
* support more than 8 LUNs.
*/
if
((
bflags
&
BLIST_NOREPORTLUN
)
||
s
dev
->
scsi_level
<
SCSI_2
||
(
s
dev
->
scsi_level
<
SCSI_3
&&
(
!
(
bflags
&
BLIST_REPORTLUN2
)
||
s
dev
->
host
->
max_lun
<=
8
))
)
s
target
->
scsi_level
<
SCSI_2
||
(
s
target
->
scsi_level
<
SCSI_3
&&
(
!
(
bflags
&
BLIST_REPORTLUN2
)
||
shost
->
max_lun
<=
8
))
)
return
1
;
if
(
bflags
&
BLIST_NOLUN
)
return
0
;
if
(
!
(
sdev
=
scsi_device_lookup_by_target
(
starget
,
0
)))
{
sdev
=
scsi_alloc_sdev
(
starget
,
0
,
NULL
);
if
(
!
sdev
)
return
0
;
if
(
scsi_device_get
(
sdev
))
return
0
;
}
sprintf
(
devname
,
"host %d channel %d id %d"
,
s
dev
->
host
->
host_no
,
sdev
->
channel
,
sdev
->
id
);
shost
->
host_no
,
sdev
->
channel
,
sdev
->
id
);
/*
* Allocate enough to hold the header (the same size as one scsi_lun)
...
...
@@ -1098,8 +1113,10 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
length
=
(
max_scsi_report_luns
+
1
)
*
sizeof
(
struct
scsi_lun
);
lun_data
=
kmalloc
(
length
,
GFP_ATOMIC
|
(
sdev
->
host
->
unchecked_isa_dma
?
__GFP_DMA
:
0
));
if
(
!
lun_data
)
if
(
!
lun_data
)
{
printk
(
ALLOC_FAILURE_MSG
,
__FUNCTION__
);
goto
out
;
}
scsi_cmd
[
0
]
=
REPORT_LUNS
;
...
...
@@ -1201,10 +1218,6 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
for
(
i
=
0
;
i
<
sizeof
(
struct
scsi_lun
);
i
++
)
printk
(
"%02x"
,
data
[
i
]);
printk
(
" has a LUN larger than currently supported.
\n
"
);
}
else
if
(
lun
==
0
)
{
/*
* LUN 0 has already been scanned.
*/
}
else
if
(
lun
>
sdev
->
host
->
max_lun
)
{
printk
(
KERN_WARNING
"scsi: %s lun%d has a LUN larger"
" than allowed by the host adapter
\n
"
,
...
...
@@ -1227,13 +1240,13 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
}
kfree
(
lun_data
);
return
0
;
out:
/*
* We are out of memory, don't try scanning any further.
*/
printk
(
ALLOC_FAILURE_MSG
,
__FUNCTION__
);
scsi_device_put
(
sdev
);
if
(
sdev
->
sdev_state
==
SDEV_CREATED
)
/*
* the sdev we used didn't appear in the report luns scan
*/
scsi_destroy_sdev
(
sdev
);
return
0
;
}
...
...
@@ -1299,7 +1312,6 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
struct
Scsi_Host
*
shost
=
dev_to_shost
(
parent
);
int
bflags
=
0
;
int
res
;
struct
scsi_device
*
sdev
=
NULL
;
struct
scsi_target
*
starget
;
if
(
shost
->
this_id
==
id
)
...
...
@@ -1325,27 +1337,16 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
* Scan LUN 0, if there is some response, scan further. Ideally, we
* would not configure LUN 0 until all LUNs are scanned.
*/
res
=
scsi_probe_and_add_lun
(
starget
,
0
,
&
bflags
,
&
sdev
,
rescan
,
NULL
);
if
(
res
==
SCSI_SCAN_LUN_PRESENT
)
{
if
(
scsi_report_lun_scan
(
s
dev
,
bflags
,
rescan
)
!=
0
)
res
=
scsi_probe_and_add_lun
(
starget
,
0
,
&
bflags
,
NULL
,
rescan
,
NULL
);
if
(
res
==
SCSI_SCAN_LUN_PRESENT
||
res
==
SCSI_SCAN_TARGET_PRESENT
)
{
if
(
scsi_report_lun_scan
(
s
target
,
bflags
,
rescan
)
!=
0
)
/*
* The REPORT LUN did not scan the target,
* do a sequential scan.
*/
scsi_sequential_lun_scan
(
starget
,
bflags
,
res
,
sdev
->
scsi_level
,
rescan
);
}
else
if
(
res
==
SCSI_SCAN_TARGET_PRESENT
)
{
/*
* There's a target here, but lun 0 is offline so we
* can't use the report_lun scan. Fall back to a
* sequential lun scan with a bflags of SPARSELUN and
* a default scsi level of SCSI_2
*/
scsi_sequential_lun_scan
(
starget
,
BLIST_SPARSELUN
,
SCSI_SCAN_TARGET_PRESENT
,
SCSI_2
,
rescan
);
res
,
starget
->
scsi_level
,
rescan
);
}
if
(
sdev
)
scsi_device_put
(
sdev
);
out_reap:
/* now determine if the target has any children at all
...
...
@@ -1542,10 +1543,7 @@ void scsi_free_host_dev(struct scsi_device *sdev)
{
BUG_ON
(
sdev
->
id
!=
sdev
->
host
->
this_id
);
if
(
sdev
->
host
->
hostt
->
slave_destroy
)
sdev
->
host
->
hostt
->
slave_destroy
(
sdev
);
transport_destroy_device
(
&
sdev
->
sdev_gendev
);
put_device
(
&
sdev
->
sdev_gendev
);
scsi_destroy_sdev
(
sdev
);
}
EXPORT_SYMBOL
(
scsi_free_host_dev
);
drivers/scsi/scsi_transport_sas.c
View file @
7d6322b4
...
...
@@ -628,17 +628,16 @@ sas_rphy_delete(struct sas_rphy *rphy)
struct
Scsi_Host
*
shost
=
dev_to_shost
(
parent
->
dev
.
parent
);
struct
sas_host_attrs
*
sas_host
=
to_sas_host_attrs
(
shost
);
transport_destroy_device
(
&
rphy
->
dev
);
scsi_remove_target
(
dev
);
scsi_remove_target
(
&
rphy
->
dev
);
transport_remove_device
(
dev
);
device_del
(
dev
);
transport_destroy_device
(
dev
);
spin_lock
(
&
sas_host
->
lock
);
list_del
(
&
rphy
->
list
);
spin_unlock
(
&
sas_host
->
lock
);
transport_remove_device
(
dev
);
device_del
(
dev
);
transport_destroy_device
(
dev
);
put_device
(
&
parent
->
dev
);
}
EXPORT_SYMBOL
(
sas_rphy_delete
);
...
...
include/linux/pci_ids.h
View file @
7d6322b4
...
...
@@ -185,6 +185,7 @@
#define PCI_DEVICE_ID_LSI_61C102 0x0901
#define PCI_DEVICE_ID_LSI_63C815 0x1000
#define PCI_DEVICE_ID_LSI_SAS1064 0x0050
#define PCI_DEVICE_ID_LSI_SAS1064R 0x0411
#define PCI_DEVICE_ID_LSI_SAS1066 0x005E
#define PCI_DEVICE_ID_LSI_SAS1068 0x0054
#define PCI_DEVICE_ID_LSI_SAS1064A 0x005C
...
...
@@ -560,6 +561,7 @@
#define PCI_VENDOR_ID_DELL 0x1028
#define PCI_DEVICE_ID_DELL_RACIII 0x0008
#define PCI_DEVICE_ID_DELL_RAC4 0x0012
#define PCI_DEVICE_ID_DELL_PERC5 0x0015
#define PCI_VENDOR_ID_MATROX 0x102B
#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518
...
...
include/scsi/scsi_device.h
View file @
7d6322b4
...
...
@@ -163,6 +163,7 @@ struct scsi_target {
unsigned
int
id
;
/* target id ... replace
* scsi_device.id eventually */
unsigned
long
create
:
1
;
/* signal that it needs to be added */
char
scsi_level
;
void
*
hostdata
;
/* available to low-level driver */
unsigned
long
starget_data
[
0
];
/* for the transport */
/* starget_data must be the last element!!!! */
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment