Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
3ffa3c0e
Commit
3ffa3c0e
authored
Jun 24, 2012
by
Al Viro
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
aio: now fput() is OK from interrupt context; get rid of manual delayed __fput()
Signed-off-by:
Al Viro
<
viro@zeniv.linux.org.uk
>
parent
4a9d4b02
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
3 additions
and
70 deletions
+3
-70
fs/aio.c
fs/aio.c
+3
-70
No files found.
fs/aio.c
View file @
3ffa3c0e
...
@@ -56,13 +56,6 @@ static struct kmem_cache *kioctx_cachep;
...
@@ -56,13 +56,6 @@ static struct kmem_cache *kioctx_cachep;
static
struct
workqueue_struct
*
aio_wq
;
static
struct
workqueue_struct
*
aio_wq
;
/* Used for rare fput completion. */
static
void
aio_fput_routine
(
struct
work_struct
*
);
static
DECLARE_WORK
(
fput_work
,
aio_fput_routine
);
static
DEFINE_SPINLOCK
(
fput_lock
);
static
LIST_HEAD
(
fput_head
);
static
void
aio_kick_handler
(
struct
work_struct
*
);
static
void
aio_kick_handler
(
struct
work_struct
*
);
static
void
aio_queue_work
(
struct
kioctx
*
);
static
void
aio_queue_work
(
struct
kioctx
*
);
...
@@ -479,7 +472,6 @@ static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch)
...
@@ -479,7 +472,6 @@ static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch)
{
{
unsigned
short
allocated
,
to_alloc
;
unsigned
short
allocated
,
to_alloc
;
long
avail
;
long
avail
;
bool
called_fput
=
false
;
struct
kiocb
*
req
,
*
n
;
struct
kiocb
*
req
,
*
n
;
struct
aio_ring
*
ring
;
struct
aio_ring
*
ring
;
...
@@ -495,28 +487,11 @@ static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch)
...
@@ -495,28 +487,11 @@ static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch)
if
(
allocated
==
0
)
if
(
allocated
==
0
)
goto
out
;
goto
out
;
retry:
spin_lock_irq
(
&
ctx
->
ctx_lock
);
spin_lock_irq
(
&
ctx
->
ctx_lock
);
ring
=
kmap_atomic
(
ctx
->
ring_info
.
ring_pages
[
0
]);
ring
=
kmap_atomic
(
ctx
->
ring_info
.
ring_pages
[
0
]);
avail
=
aio_ring_avail
(
&
ctx
->
ring_info
,
ring
)
-
ctx
->
reqs_active
;
avail
=
aio_ring_avail
(
&
ctx
->
ring_info
,
ring
)
-
ctx
->
reqs_active
;
BUG_ON
(
avail
<
0
);
BUG_ON
(
avail
<
0
);
if
(
avail
==
0
&&
!
called_fput
)
{
/*
* Handle a potential starvation case. It is possible that
* we hold the last reference on a struct file, causing us
* to delay the final fput to non-irq context. In this case,
* ctx->reqs_active is artificially high. Calling the fput
* routine here may free up a slot in the event completion
* ring, allowing this allocation to succeed.
*/
kunmap_atomic
(
ring
);
spin_unlock_irq
(
&
ctx
->
ctx_lock
);
aio_fput_routine
(
NULL
);
called_fput
=
true
;
goto
retry
;
}
if
(
avail
<
allocated
)
{
if
(
avail
<
allocated
)
{
/* Trim back the number of requests. */
/* Trim back the number of requests. */
list_for_each_entry_safe
(
req
,
n
,
&
batch
->
head
,
ki_batch
)
{
list_for_each_entry_safe
(
req
,
n
,
&
batch
->
head
,
ki_batch
)
{
...
@@ -570,36 +545,6 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
...
@@ -570,36 +545,6 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
wake_up_all
(
&
ctx
->
wait
);
wake_up_all
(
&
ctx
->
wait
);
}
}
static
void
aio_fput_routine
(
struct
work_struct
*
data
)
{
spin_lock_irq
(
&
fput_lock
);
while
(
likely
(
!
list_empty
(
&
fput_head
)))
{
struct
kiocb
*
req
=
list_kiocb
(
fput_head
.
next
);
struct
kioctx
*
ctx
=
req
->
ki_ctx
;
list_del
(
&
req
->
ki_list
);
spin_unlock_irq
(
&
fput_lock
);
/* Complete the fput(s) */
if
(
req
->
ki_filp
!=
NULL
)
fput
(
req
->
ki_filp
);
/* Link the iocb into the context's free list */
rcu_read_lock
();
spin_lock_irq
(
&
ctx
->
ctx_lock
);
really_put_req
(
ctx
,
req
);
/*
* at that point ctx might've been killed, but actual
* freeing is RCU'd
*/
spin_unlock_irq
(
&
ctx
->
ctx_lock
);
rcu_read_unlock
();
spin_lock_irq
(
&
fput_lock
);
}
spin_unlock_irq
(
&
fput_lock
);
}
/* __aio_put_req
/* __aio_put_req
* Returns true if this put was the last user of the request.
* Returns true if this put was the last user of the request.
*/
*/
...
@@ -618,21 +563,9 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
...
@@ -618,21 +563,9 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
req
->
ki_cancel
=
NULL
;
req
->
ki_cancel
=
NULL
;
req
->
ki_retry
=
NULL
;
req
->
ki_retry
=
NULL
;
/*
fput
(
req
->
ki_filp
);
* Try to optimize the aio and eventfd file* puts, by avoiding to
* schedule work in case it is not final fput() time. In normal cases,
* we would not be holding the last reference to the file*, so
* this function will be executed w/out any aio kthread wakeup.
*/
if
(
unlikely
(
!
fput_atomic
(
req
->
ki_filp
)))
{
spin_lock
(
&
fput_lock
);
list_add
(
&
req
->
ki_list
,
&
fput_head
);
spin_unlock
(
&
fput_lock
);
schedule_work
(
&
fput_work
);
}
else
{
req
->
ki_filp
=
NULL
;
req
->
ki_filp
=
NULL
;
really_put_req
(
ctx
,
req
);
really_put_req
(
ctx
,
req
);
}
return
1
;
return
1
;
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment