Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
2c66783a
Commit
2c66783a
authored
May 02, 2002
by
Jens Axboe
Committed by
Linus Torvalds
May 02, 2002
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[PATCH] bio tagged command support
Add support for request tagging of the block queue.
parent
5da3d2ca
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
238 additions
and
4 deletions
+238
-4
drivers/block/ll_rw_blk.c
drivers/block/ll_rw_blk.c
+207
-4
include/linux/blkdev.h
include/linux/blkdev.h
+31
-0
No files found.
drivers/block/ll_rw_blk.c
View file @
2c66783a
...
...
@@ -302,9 +302,203 @@ void blk_queue_assign_lock(request_queue_t *q, spinlock_t *lock)
q
->
queue_lock
=
lock
;
}
/**
* blk_queue_free_tags - release tag maintenance info
* @q: the request queue for the device
*
* Notes:
* blk_cleanup_queue() will take care of calling this function, if tagging
* has been used. So there's usually no need to call this directly, unless
* tagging is just being disabled but the queue remains in function.
**/
void
blk_queue_free_tags
(
request_queue_t
*
q
)
{
struct
blk_queue_tag
*
bqt
=
q
->
queue_tags
;
if
(
!
bqt
)
return
;
BUG_ON
(
bqt
->
busy
);
BUG_ON
(
!
list_empty
(
&
bqt
->
busy_list
));
kfree
(
bqt
->
tag_index
);
bqt
->
tag_index
=
NULL
;
kfree
(
bqt
->
tag_map
);
bqt
->
tag_map
=
NULL
;
kfree
(
bqt
);
q
->
queue_tags
=
NULL
;
q
->
queue_flags
&=
~
(
1
<<
QUEUE_FLAG_QUEUED
);
}
/**
* blk_queue_init_tags - initialize the queue tag info
* @q: the request queue for the device
* @depth: the maximum queue depth supported
**/
int
blk_queue_init_tags
(
request_queue_t
*
q
,
int
depth
)
{
struct
blk_queue_tag
*
tags
;
int
bits
,
i
;
if
(
depth
>
queue_nr_requests
)
depth
=
queue_nr_requests
;
tags
=
kmalloc
(
sizeof
(
struct
blk_queue_tag
),
GFP_ATOMIC
);
if
(
!
tags
)
goto
fail
;
tags
->
tag_index
=
kmalloc
(
depth
*
sizeof
(
struct
request
*
),
GFP_ATOMIC
);
if
(
!
tags
->
tag_index
)
goto
fail_index
;
bits
=
(
depth
/
BLK_TAGS_PER_LONG
)
+
1
;
tags
->
tag_map
=
kmalloc
(
bits
*
sizeof
(
unsigned
long
),
GFP_ATOMIC
);
if
(
!
tags
->
tag_map
)
goto
fail_map
;
memset
(
tags
->
tag_index
,
depth
*
sizeof
(
struct
request
*
),
0
);
memset
(
tags
->
tag_map
,
bits
*
sizeof
(
unsigned
long
),
0
);
INIT_LIST_HEAD
(
&
tags
->
busy_list
);
tags
->
busy
=
0
;
tags
->
max_depth
=
depth
;
/*
* set the upper bits if the depth isn't a multiple of the word size
*/
for
(
i
=
depth
;
i
<
bits
*
BLK_TAGS_PER_LONG
;
i
++
)
set_bit
(
i
,
tags
->
tag_map
);
/*
* assign it, all done
*/
q
->
queue_tags
=
tags
;
q
->
queue_flags
|=
(
1
<<
QUEUE_FLAG_QUEUED
);
return
0
;
fail_map:
kfree
(
tags
->
tag_index
);
fail_index:
kfree
(
tags
);
fail:
return
-
ENOMEM
;
}
/**
* blk_queue_end_tag - end tag operations for a request
* @q: the request queue for the device
* @tag: the tag that has completed
*
* Description:
* Typically called when end_that_request_first() returns 0, meaning
* all transfers have been done for a request. It's important to call
* this function before end_that_request_last(), as that will put the
* request back on the free list thus corrupting the internal tag list.
*
* Notes:
* queue lock must be held.
**/
void
blk_queue_end_tag
(
request_queue_t
*
q
,
struct
request
*
rq
)
{
struct
blk_queue_tag
*
bqt
=
q
->
queue_tags
;
int
tag
=
rq
->
tag
;
BUG_ON
(
tag
==
-
1
);
if
(
unlikely
(
tag
>=
bqt
->
max_depth
))
return
;
if
(
unlikely
(
!
__test_and_clear_bit
(
tag
,
bqt
->
tag_map
)))
{
printk
(
"attempt to clear non-busy tag (%d)
\n
"
,
tag
);
return
;
}
list_del
(
&
rq
->
queuelist
);
rq
->
flags
&=
~
REQ_QUEUED
;
rq
->
tag
=
-
1
;
if
(
unlikely
(
bqt
->
tag_index
[
tag
]
==
NULL
))
printk
(
"tag %d is missing
\n
"
,
tag
);
bqt
->
tag_index
[
tag
]
=
NULL
;
bqt
->
busy
--
;
}
/**
* blk_queue_start_tag - find a free tag and assign it
* @q: the request queue for the device
* @rq: the block request that needs tagging
*
* Description:
* This can either be used as a stand-alone helper, or possibly be
* assigned as the queue &prep_rq_fn (in which case &struct request
* automagically gets a tag assigned). Note that this function assumes
* that only REQ_CMD requests can be queued! The request will also be
* removed from the request queue, so it's the drivers responsibility to
* readd it if it should need to be restarted for some reason.
*
* Notes:
* queue lock must be held.
**/
int
blk_queue_start_tag
(
request_queue_t
*
q
,
struct
request
*
rq
)
{
struct
blk_queue_tag
*
bqt
=
q
->
queue_tags
;
unsigned
long
*
map
=
bqt
->
tag_map
;
int
tag
=
0
;
if
(
unlikely
(
!
(
rq
->
flags
&
REQ_CMD
)))
return
1
;
for
(
map
=
bqt
->
tag_map
;
*
map
==
-
1UL
;
map
++
)
{
tag
+=
BLK_TAGS_PER_LONG
;
if
(
tag
>=
bqt
->
max_depth
)
return
1
;
}
tag
+=
ffz
(
*
map
);
__set_bit
(
tag
,
bqt
->
tag_map
);
rq
->
flags
|=
REQ_QUEUED
;
rq
->
tag
=
tag
;
bqt
->
tag_index
[
tag
]
=
rq
;
blkdev_dequeue_request
(
rq
);
list_add
(
&
rq
->
queuelist
,
&
bqt
->
busy_list
);
bqt
->
busy
++
;
return
0
;
}
/**
* blk_queue_invalidate_tags - invalidate all pending tags
* @q: the request queue for the device
*
* Description:
* Hardware conditions may dictate a need to stop all pending requests.
* In this case, we will safely clear the block side of the tag queue and
* readd all requests to the request queue in the right order.
*
* Notes:
* queue lock must be held.
**/
void
blk_queue_invalidate_tags
(
request_queue_t
*
q
)
{
struct
blk_queue_tag
*
bqt
=
q
->
queue_tags
;
struct
list_head
*
tmp
;
struct
request
*
rq
;
list_for_each
(
tmp
,
&
bqt
->
busy_list
)
{
rq
=
list_entry_rq
(
tmp
);
blk_queue_end_tag
(
q
,
rq
);
rq
->
flags
&=
~
REQ_STARTED
;
elv_add_request
(
q
,
rq
,
0
);
}
}
static
char
*
rq_flags
[]
=
{
"REQ_RW"
,
"REQ_RW_AHEAD"
,
"REQ_BARRIER"
,
"REQ_CMD"
,
"REQ_NOMERGE"
,
"REQ_STARTED"
,
"REQ_DONTPREP"
,
"REQ_DRIVE_CMD"
,
"REQ_DONTPREP"
,
"REQ_
QUEUED"
,
"REQ_
DRIVE_CMD"
,
"REQ_DRIVE_ACB"
,
"REQ_PC"
,
"REQ_BLOCK_PC"
,
"REQ_SENSE"
,
"REQ_SPECIAL"
};
...
...
@@ -724,6 +918,9 @@ void blk_cleanup_queue(request_queue_t * q)
if
(
count
)
printk
(
"blk_cleanup_queue: leaked requests (%d)
\n
"
,
count
);
if
(
blk_queue_tagged
(
q
))
blk_queue_free_tags
(
q
);
elevator_exit
(
q
,
&
q
->
elevator
);
memset
(
q
,
0
,
sizeof
(
*
q
));
...
...
@@ -1744,3 +1941,9 @@ EXPORT_SYMBOL(blk_hw_contig_segment);
EXPORT_SYMBOL
(
ll_10byte_cmd_build
);
EXPORT_SYMBOL
(
blk_queue_prep_rq
);
EXPORT_SYMBOL
(
blk_queue_init_tags
);
EXPORT_SYMBOL
(
blk_queue_free_tags
);
EXPORT_SYMBOL
(
blk_queue_start_tag
);
EXPORT_SYMBOL
(
blk_queue_end_tag
);
EXPORT_SYMBOL
(
blk_queue_invalidate_tags
);
include/linux/blkdev.h
View file @
2c66783a
...
...
@@ -56,6 +56,7 @@ struct request {
unsigned
int
current_nr_sectors
;
unsigned
int
hard_cur_sectors
;
int
tag
;
void
*
special
;
char
*
buffer
;
struct
completion
*
waiting
;
...
...
@@ -75,6 +76,7 @@ enum rq_flag_bits {
__REQ_NOMERGE
,
/* don't touch this for merging */
__REQ_STARTED
,
/* drive already may have started this one */
__REQ_DONTPREP
,
/* don't call prep for this one */
__REQ_QUEUED
,
/* uses queueing */
/*
* for ATA/ATAPI devices
*/
...
...
@@ -97,6 +99,7 @@ enum rq_flag_bits {
#define REQ_NOMERGE (1 << __REQ_NOMERGE)
#define REQ_STARTED (1 << __REQ_STARTED)
#define REQ_DONTPREP (1 << __REQ_DONTPREP)
#define REQ_QUEUED (1 << __REQ_QUEUED)
#define REQ_DRIVE_CMD (1 << __REQ_DRIVE_CMD)
#define REQ_DRIVE_ACB (1 << __REQ_DRIVE_ACB)
#define REQ_PC (1 << __REQ_PC)
...
...
@@ -121,6 +124,17 @@ enum blk_queue_state {
Queue_up
,
};
#define BLK_TAGS_PER_LONG (sizeof(unsigned long) * 8)
#define BLK_TAGS_MASK (BLK_TAGS_PER_LONG - 1)
struct
blk_queue_tag
{
struct
request
**
tag_index
;
/* map of busy tags */
unsigned
long
*
tag_map
;
/* bit map of free/busy tags */
struct
list_head
busy_list
;
/* fifo list of busy tags */
int
busy
;
/* current depth */
int
max_depth
;
};
/*
* Default nr free requests per queue, ll_rw_blk will scale it down
* according to available RAM at init time
...
...
@@ -193,6 +207,8 @@ struct request_queue
unsigned
long
seg_boundary_mask
;
wait_queue_head_t
queue_wait
;
struct
blk_queue_tag
*
queue_tags
;
};
#define RQ_INACTIVE (-1)
...
...
@@ -203,9 +219,11 @@ struct request_queue
#define QUEUE_FLAG_PLUGGED 0
/* queue is plugged */
#define QUEUE_FLAG_CLUSTER 1
/* cluster several segments into 1 */
#define QUEUE_FLAG_QUEUED 2
/* uses generic tag queueing */
#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
#define blk_mark_plugged(q) set_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_empty(q) elv_queue_empty(q)
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
...
...
@@ -316,6 +334,19 @@ extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist
extern
void
blk_dump_rq_flags
(
struct
request
*
,
char
*
);
extern
void
generic_unplug_device
(
void
*
);
/*
* tag stuff
*/
#define blk_queue_tag_request(q, tag) ((q)->queue_tags->tag_index[(tag)])
#define blk_queue_tag_depth(q) ((q)->queue_tags->busy)
#define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth)
#define blk_rq_tagged(rq) ((rq)->flags & REQ_QUEUED)
extern
int
blk_queue_start_tag
(
request_queue_t
*
,
struct
request
*
);
extern
void
blk_queue_end_tag
(
request_queue_t
*
,
struct
request
*
);
extern
int
blk_queue_init_tags
(
request_queue_t
*
,
int
);
extern
void
blk_queue_free_tags
(
request_queue_t
*
);
extern
void
blk_queue_invalidate_tags
(
request_queue_t
*
);
extern
int
*
blk_size
[
MAX_BLKDEV
];
/* in units of 1024 bytes */
#define MAX_PHYS_SEGMENTS 128
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment