Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
2e9977c2
Commit
2e9977c2
authored
Apr 14, 2011
by
Konrad Rzeszutek Wilk
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
xen/blkback: Fix checkpatch warnings in blkback.c
Signed-off-by:
Konrad Rzeszutek Wilk
<
konrad.wilk@oracle.com
>
parent
d6091b21
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
47 additions
and
34 deletions
+47
-34
drivers/xen/blkback/blkback.c
drivers/xen/blkback/blkback.c
+47
-34
No files found.
drivers/xen/blkback/blkback.c
View file @
2e9977c2
...
@@ -63,8 +63,8 @@ module_param_named(reqs, blkif_reqs, int, 0);
...
@@ -63,8 +63,8 @@ module_param_named(reqs, blkif_reqs, int, 0);
MODULE_PARM_DESC
(
reqs
,
"Number of blkback requests to allocate"
);
MODULE_PARM_DESC
(
reqs
,
"Number of blkback requests to allocate"
);
/* Run-time switchable: /sys/module/blkback/parameters/ */
/* Run-time switchable: /sys/module/blkback/parameters/ */
static
unsigned
int
log_stats
=
0
;
static
unsigned
int
log_stats
;
static
unsigned
int
debug_lvl
=
0
;
static
unsigned
int
debug_lvl
;
module_param
(
log_stats
,
int
,
0644
);
module_param
(
log_stats
,
int
,
0644
);
module_param
(
debug_lvl
,
int
,
0644
);
module_param
(
debug_lvl
,
int
,
0644
);
...
@@ -74,7 +74,7 @@ module_param(debug_lvl, int, 0644);
...
@@ -74,7 +74,7 @@ module_param(debug_lvl, int, 0644);
* the pendcnt towards zero. When it hits zero, the specified domain has a
* the pendcnt towards zero. When it hits zero, the specified domain has a
* response queued for it, with the saved 'id' passed back.
* response queued for it, with the saved 'id' passed back.
*/
*/
typedef
struct
{
struct
pending_req
{
struct
blkif_st
*
blkif
;
struct
blkif_st
*
blkif
;
u64
id
;
u64
id
;
int
nr_pages
;
int
nr_pages
;
...
@@ -82,12 +82,12 @@ typedef struct {
...
@@ -82,12 +82,12 @@ typedef struct {
unsigned
short
operation
;
unsigned
short
operation
;
int
status
;
int
status
;
struct
list_head
free_list
;
struct
list_head
free_list
;
}
pending_req_t
;
};
#define BLKBACK_INVALID_HANDLE (~0)
#define BLKBACK_INVALID_HANDLE (~0)
struct
xen_blkbk
{
struct
xen_blkbk
{
pending_req_t
*
pending_reqs
;
struct
pending_req
*
pending_reqs
;
/* List of all 'pending_req' available */
/* List of all 'pending_req' available */
struct
list_head
pending_free
;
struct
list_head
pending_free
;
/* And its spinlock. */
/* And its spinlock. */
...
@@ -106,14 +106,15 @@ static struct xen_blkbk *blkbk;
...
@@ -106,14 +106,15 @@ static struct xen_blkbk *blkbk;
* pending_pages[..]. For each 'pending_req' we have have up to
* pending_pages[..]. For each 'pending_req' we have have up to
* BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
* BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
* 10 and would index in the pending_pages[..]. */
* 10 and would index in the pending_pages[..]. */
static
inline
int
vaddr_pagenr
(
pending_req_t
*
req
,
int
seg
)
static
inline
int
vaddr_pagenr
(
struct
pending_req
*
req
,
int
seg
)
{
{
return
(
req
-
blkbk
->
pending_reqs
)
*
BLKIF_MAX_SEGMENTS_PER_REQUEST
+
seg
;
return
(
req
-
blkbk
->
pending_reqs
)
*
BLKIF_MAX_SEGMENTS_PER_REQUEST
+
seg
;
}
}
#define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
#define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
static
inline
unsigned
long
vaddr
(
pending_req_t
*
req
,
int
seg
)
static
inline
unsigned
long
vaddr
(
struct
pending_req
*
req
,
int
seg
)
{
{
unsigned
long
pfn
=
page_to_pfn
(
blkbk
->
pending_page
(
req
,
seg
));
unsigned
long
pfn
=
page_to_pfn
(
blkbk
->
pending_page
(
req
,
seg
));
return
(
unsigned
long
)
pfn_to_kaddr
(
pfn
);
return
(
unsigned
long
)
pfn_to_kaddr
(
pfn
);
...
@@ -126,21 +127,22 @@ static inline unsigned long vaddr(pending_req_t *req, int seg)
...
@@ -126,21 +127,22 @@ static inline unsigned long vaddr(pending_req_t *req, int seg)
static
int
do_block_io_op
(
struct
blkif_st
*
blkif
);
static
int
do_block_io_op
(
struct
blkif_st
*
blkif
);
static
void
dispatch_rw_block_io
(
struct
blkif_st
*
blkif
,
static
void
dispatch_rw_block_io
(
struct
blkif_st
*
blkif
,
struct
blkif_request
*
req
,
struct
blkif_request
*
req
,
pending_req_t
*
pending_req
);
struct
pending_req
*
pending_req
);
static
void
make_response
(
struct
blkif_st
*
blkif
,
u64
id
,
static
void
make_response
(
struct
blkif_st
*
blkif
,
u64
id
,
unsigned
short
op
,
int
st
);
unsigned
short
op
,
int
st
);
/*
/*
* Retrieve from the 'pending_reqs' a free pending_req structure to be used.
* Retrieve from the 'pending_reqs' a free pending_req structure to be used.
*/
*/
static
pending_req_t
*
alloc_req
(
void
)
static
struct
pending_req
*
alloc_req
(
void
)
{
{
pending_req_t
*
req
=
NULL
;
struct
pending_req
*
req
=
NULL
;
unsigned
long
flags
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
blkbk
->
pending_free_lock
,
flags
);
spin_lock_irqsave
(
&
blkbk
->
pending_free_lock
,
flags
);
if
(
!
list_empty
(
&
blkbk
->
pending_free
))
{
if
(
!
list_empty
(
&
blkbk
->
pending_free
))
{
req
=
list_entry
(
blkbk
->
pending_free
.
next
,
pending_req_t
,
free_list
);
req
=
list_entry
(
blkbk
->
pending_free
.
next
,
struct
pending_req
,
free_list
);
list_del
(
&
req
->
free_list
);
list_del
(
&
req
->
free_list
);
}
}
spin_unlock_irqrestore
(
&
blkbk
->
pending_free_lock
,
flags
);
spin_unlock_irqrestore
(
&
blkbk
->
pending_free_lock
,
flags
);
...
@@ -151,7 +153,7 @@ static pending_req_t* alloc_req(void)
...
@@ -151,7 +153,7 @@ static pending_req_t* alloc_req(void)
* Return the 'pending_req' structure back to the freepool. We also
* Return the 'pending_req' structure back to the freepool. We also
* wake up the thread if it was waiting for a free page.
* wake up the thread if it was waiting for a free page.
*/
*/
static
void
free_req
(
pending_req_t
*
req
)
static
void
free_req
(
struct
pending_req
*
req
)
{
{
unsigned
long
flags
;
unsigned
long
flags
;
int
was_empty
;
int
was_empty
;
...
@@ -200,7 +202,7 @@ static void plug_queue(struct blkif_st *blkif, struct block_device *bdev)
...
@@ -200,7 +202,7 @@ static void plug_queue(struct blkif_st *blkif, struct block_device *bdev)
* Unmap the grant references, and also remove the M2P over-rides
* Unmap the grant references, and also remove the M2P over-rides
* used in the 'pending_req'.
* used in the 'pending_req'.
*/
*/
static
void
fast_flush_area
(
pending_req_t
*
req
)
static
void
fast_flush_area
(
struct
pending_req
*
req
)
{
{
struct
gnttab_unmap_grant_ref
unmap
[
BLKIF_MAX_SEGMENTS_PER_REQUEST
];
struct
gnttab_unmap_grant_ref
unmap
[
BLKIF_MAX_SEGMENTS_PER_REQUEST
];
unsigned
int
i
,
invcount
=
0
;
unsigned
int
i
,
invcount
=
0
;
...
@@ -221,7 +223,8 @@ static void fast_flush_area(pending_req_t *req)
...
@@ -221,7 +223,8 @@ static void fast_flush_area(pending_req_t *req)
GNTTABOP_unmap_grant_ref
,
unmap
,
invcount
);
GNTTABOP_unmap_grant_ref
,
unmap
,
invcount
);
BUG_ON
(
ret
);
BUG_ON
(
ret
);
/* Note, we use invcount, so nr->pages, so we can't index
/* Note, we use invcount, so nr->pages, so we can't index
* using vaddr(req, i). */
* using vaddr(req, i).
*/
for
(
i
=
0
;
i
<
invcount
;
i
++
)
{
for
(
i
=
0
;
i
<
invcount
;
i
++
)
{
ret
=
m2p_remove_override
(
ret
=
m2p_remove_override
(
virt_to_page
(
unmap
[
i
].
host_addr
),
false
);
virt_to_page
(
unmap
[
i
].
host_addr
),
false
);
...
@@ -233,7 +236,7 @@ static void fast_flush_area(pending_req_t *req)
...
@@ -233,7 +236,7 @@ static void fast_flush_area(pending_req_t *req)
}
}
}
}
/*
*****************************************************************
/*
* SCHEDULER FUNCTIONS
* SCHEDULER FUNCTIONS
*/
*/
...
@@ -269,7 +272,8 @@ int blkif_schedule(void *arg)
...
@@ -269,7 +272,8 @@ int blkif_schedule(void *arg)
blkif
->
waiting_reqs
||
kthread_should_stop
());
blkif
->
waiting_reqs
||
kthread_should_stop
());
wait_event_interruptible
(
wait_event_interruptible
(
blkbk
->
pending_free_wq
,
blkbk
->
pending_free_wq
,
!
list_empty
(
&
blkbk
->
pending_free
)
||
kthread_should_stop
());
!
list_empty
(
&
blkbk
->
pending_free
)
||
kthread_should_stop
());
blkif
->
waiting_reqs
=
0
;
blkif
->
waiting_reqs
=
0
;
smp_mb
();
/* clear flag *before* checking for work */
smp_mb
();
/* clear flag *before* checking for work */
...
@@ -297,7 +301,7 @@ int blkif_schedule(void *arg)
...
@@ -297,7 +301,7 @@ int blkif_schedule(void *arg)
* Completion callback on the bio's. Called as bh->b_end_io()
* Completion callback on the bio's. Called as bh->b_end_io()
*/
*/
static
void
__end_block_io_op
(
pending_req_t
*
pending_req
,
int
error
)
static
void
__end_block_io_op
(
struct
pending_req
*
pending_req
,
int
error
)
{
{
/* An error fails the entire request. */
/* An error fails the entire request. */
if
((
pending_req
->
operation
==
BLKIF_OP_WRITE_BARRIER
)
&&
if
((
pending_req
->
operation
==
BLKIF_OP_WRITE_BARRIER
)
&&
...
@@ -313,7 +317,8 @@ static void __end_block_io_op(pending_req_t *pending_req, int error)
...
@@ -313,7 +317,8 @@ static void __end_block_io_op(pending_req_t *pending_req, int error)
/* If all of the bio's have completed it is time to unmap
/* If all of the bio's have completed it is time to unmap
* the grant references associated with 'request' and provide
* the grant references associated with 'request' and provide
* the proper response on the ring. */
* the proper response on the ring.
*/
if
(
atomic_dec_and_test
(
&
pending_req
->
pendcnt
))
{
if
(
atomic_dec_and_test
(
&
pending_req
->
pendcnt
))
{
fast_flush_area
(
pending_req
);
fast_flush_area
(
pending_req
);
make_response
(
pending_req
->
blkif
,
pending_req
->
id
,
make_response
(
pending_req
->
blkif
,
pending_req
->
id
,
...
@@ -360,7 +365,7 @@ static int do_block_io_op(struct blkif_st *blkif)
...
@@ -360,7 +365,7 @@ static int do_block_io_op(struct blkif_st *blkif)
{
{
union
blkif_back_rings
*
blk_rings
=
&
blkif
->
blk_rings
;
union
blkif_back_rings
*
blk_rings
=
&
blkif
->
blk_rings
;
struct
blkif_request
req
;
struct
blkif_request
req
;
pending_req_t
*
pending_req
;
struct
pending_req
*
pending_req
;
RING_IDX
rc
,
rp
;
RING_IDX
rc
,
rp
;
int
more_to_do
=
0
;
int
more_to_do
=
0
;
...
@@ -440,7 +445,7 @@ static int do_block_io_op(struct blkif_st *blkif)
...
@@ -440,7 +445,7 @@ static int do_block_io_op(struct blkif_st *blkif)
*/
*/
static
void
dispatch_rw_block_io
(
struct
blkif_st
*
blkif
,
static
void
dispatch_rw_block_io
(
struct
blkif_st
*
blkif
,
struct
blkif_request
*
req
,
struct
blkif_request
*
req
,
pending_req_t
*
pending_req
)
struct
pending_req
*
pending_req
)
{
{
struct
gnttab_map_grant_ref
map
[
BLKIF_MAX_SEGMENTS_PER_REQUEST
];
struct
gnttab_map_grant_ref
map
[
BLKIF_MAX_SEGMENTS_PER_REQUEST
];
struct
phys_req
preq
;
struct
phys_req
preq
;
...
@@ -487,7 +492,8 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
...
@@ -487,7 +492,8 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
/* Fill out preq.nr_sects with proper amount of sectors, and setup
/* Fill out preq.nr_sects with proper amount of sectors, and setup
* assign map[..] with the PFN of the page in our domain with the
* assign map[..] with the PFN of the page in our domain with the
* corresponding grant reference for each page.*/
* corresponding grant reference for each page.
*/
for
(
i
=
0
;
i
<
nseg
;
i
++
)
{
for
(
i
=
0
;
i
<
nseg
;
i
++
)
{
uint32_t
flags
;
uint32_t
flags
;
...
@@ -509,8 +515,9 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
...
@@ -509,8 +515,9 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
BUG_ON
(
ret
);
BUG_ON
(
ret
);
/* Now swizzel the MFN in our domain with the MFN from the other domain
/* Now swizzel the MFN in our domain with the MFN from the other domain
* so that when we access vaddr(pending_req,i) it has the contents of the
* so that when we access vaddr(pending_req,i) it has the contents of
* page from the other domain. */
* the page from the other domain.
*/
for
(
i
=
0
;
i
<
nseg
;
i
++
)
{
for
(
i
=
0
;
i
<
nseg
;
i
++
)
{
if
(
unlikely
(
map
[
i
].
status
!=
0
))
{
if
(
unlikely
(
map
[
i
].
status
!=
0
))
{
DPRINTK
(
"invalid buffer -- could not remap it
\n
"
);
DPRINTK
(
"invalid buffer -- could not remap it
\n
"
);
...
@@ -522,12 +529,13 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
...
@@ -522,12 +529,13 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
if
(
ret
)
if
(
ret
)
continue
;
continue
;
ret
=
m2p_add_override
(
PFN_DOWN
(
map
[
i
].
dev_bus_addr
),
ret
=
m2p_add_override
(
PFN_DOWN
(
map
[
i
].
dev_bus_addr
),
blkbk
->
pending_page
(
pending_req
,
i
),
false
);
blkbk
->
pending_page
(
pending_req
,
i
),
false
);
if
(
ret
)
{
if
(
ret
)
{
printk
(
KERN_ALERT
"Failed to install M2P override for"
\
printk
(
KERN_ALERT
"Failed to install M2P override for"
\
" %lx (ret: %d)
\n
"
,
(
unsigned
long
)
map
[
i
].
dev_bus_addr
,
ret
);
" %lx (ret: %d)
\n
"
,
(
unsigned
long
)
map
[
i
].
dev_bus_addr
,
ret
);
/* We could switch over to GNTTABOP_copy */
/* We could switch over to GNTTABOP_copy */
continue
;
continue
;
}
}
...
@@ -536,9 +544,11 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
...
@@ -536,9 +544,11 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
(
req
->
u
.
rw
.
seg
[
i
].
first_sect
<<
9
);
(
req
->
u
.
rw
.
seg
[
i
].
first_sect
<<
9
);
}
}
/* If we have failed at this point, we need to undo the M2P override, set
/* If we have failed at this point, we need to undo the M2P override,
* gnttab_set_unmap_op on all of the grant references and perform the
* set gnttab_set_unmap_op on all of the grant references and perform
* hypercall to unmap the grants - that is all done in fast_flush_area. */
* the hypercall to unmap the grants - that is all done in
* fast_flush_area.
*/
if
(
ret
)
if
(
ret
)
goto
fail_flush
;
goto
fail_flush
;
...
@@ -554,7 +564,8 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
...
@@ -554,7 +564,8 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
plug_queue
(
blkif
,
preq
.
bdev
);
plug_queue
(
blkif
,
preq
.
bdev
);
/* We set it one so that the last submit_bio does not have to call
/* We set it one so that the last submit_bio does not have to call
* atomic_inc. */
* atomic_inc.
*/
atomic_set
(
&
pending_req
->
pendcnt
,
1
);
atomic_set
(
&
pending_req
->
pendcnt
,
1
);
blkif_get
(
blkif
);
blkif_get
(
blkif
);
...
@@ -575,7 +586,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
...
@@ -575,7 +586,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
atomic_inc
(
&
pending_req
->
pendcnt
);
atomic_inc
(
&
pending_req
->
pendcnt
);
submit_bio
(
operation
,
bio
);
submit_bio
(
operation
,
bio
);
}
}
bio
=
bio_alloc
(
GFP_KERNEL
,
nseg
-
i
);
bio
=
bio_alloc
(
GFP_KERNEL
,
nseg
-
i
);
if
(
unlikely
(
bio
==
NULL
))
if
(
unlikely
(
bio
==
NULL
))
goto
fail_put_bio
;
goto
fail_put_bio
;
...
@@ -694,7 +705,7 @@ static int __init blkif_init(void)
...
@@ -694,7 +705,7 @@ static int __init blkif_init(void)
if
(
!
xen_pv_domain
())
if
(
!
xen_pv_domain
())
return
-
ENODEV
;
return
-
ENODEV
;
blkbk
=
(
struct
xen_blkbk
*
)
kzalloc
(
sizeof
(
struct
xen_blkbk
),
GFP_KERNEL
);
blkbk
=
kzalloc
(
sizeof
(
struct
xen_blkbk
),
GFP_KERNEL
);
if
(
!
blkbk
)
{
if
(
!
blkbk
)
{
printk
(
KERN_ALERT
"%s: out of memory!
\n
"
,
__func__
);
printk
(
KERN_ALERT
"%s: out of memory!
\n
"
,
__func__
);
return
-
ENOMEM
;
return
-
ENOMEM
;
...
@@ -709,7 +720,8 @@ static int __init blkif_init(void)
...
@@ -709,7 +720,8 @@ static int __init blkif_init(void)
blkbk
->
pending_pages
=
kzalloc
(
sizeof
(
blkbk
->
pending_pages
[
0
])
*
blkbk
->
pending_pages
=
kzalloc
(
sizeof
(
blkbk
->
pending_pages
[
0
])
*
mmap_pages
,
GFP_KERNEL
);
mmap_pages
,
GFP_KERNEL
);
if
(
!
blkbk
->
pending_reqs
||
!
blkbk
->
pending_grant_handles
||
!
blkbk
->
pending_pages
)
{
if
(
!
blkbk
->
pending_reqs
||
!
blkbk
->
pending_grant_handles
||
!
blkbk
->
pending_pages
)
{
rc
=
-
ENOMEM
;
rc
=
-
ENOMEM
;
goto
out_of_memory
;
goto
out_of_memory
;
}
}
...
@@ -733,7 +745,8 @@ static int __init blkif_init(void)
...
@@ -733,7 +745,8 @@ static int __init blkif_init(void)
init_waitqueue_head
(
&
blkbk
->
pending_free_wq
);
init_waitqueue_head
(
&
blkbk
->
pending_free_wq
);
for
(
i
=
0
;
i
<
blkif_reqs
;
i
++
)
for
(
i
=
0
;
i
<
blkif_reqs
;
i
++
)
list_add_tail
(
&
blkbk
->
pending_reqs
[
i
].
free_list
,
&
blkbk
->
pending_free
);
list_add_tail
(
&
blkbk
->
pending_reqs
[
i
].
free_list
,
&
blkbk
->
pending_free
);
rc
=
blkif_xenbus_init
();
rc
=
blkif_xenbus_init
();
if
(
rc
)
if
(
rc
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment