Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
4f990f49
Commit
4f990f49
authored
Apr 17, 2004
by
Andrew Morton
Committed by
Linus Torvalds
Apr 17, 2004
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[PATCH] remove buffer_error()
From: Jeff Garzik <jgarzik@pobox.com> It was debug code, no longer required.
parent
d012f668
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
8 additions
and
111 deletions
+8
-111
fs/buffer.c
fs/buffer.c
+7
-93
fs/ext3/inode.c
fs/ext3/inode.c
+0
-2
fs/mpage.c
fs/mpage.c
+1
-2
fs/ntfs/aops.c
fs/ntfs/aops.c
+0
-2
fs/reiserfs/inode.c
fs/reiserfs/inode.c
+0
-5
include/linux/buffer_head.h
include/linux/buffer_head.h
+0
-7
No files found.
fs/buffer.c
View file @
4f990f49
...
@@ -51,25 +51,6 @@ static struct bh_wait_queue_head {
...
@@ -51,25 +51,6 @@ static struct bh_wait_queue_head {
wait_queue_head_t
wqh
;
wait_queue_head_t
wqh
;
}
____cacheline_aligned_in_smp
bh_wait_queue_heads
[
1
<<
BH_WAIT_TABLE_ORDER
];
}
____cacheline_aligned_in_smp
bh_wait_queue_heads
[
1
<<
BH_WAIT_TABLE_ORDER
];
/*
* Debug/devel support stuff
*/
void
__buffer_error
(
char
*
file
,
int
line
)
{
static
int
enough
;
if
(
enough
>
10
)
return
;
enough
++
;
printk
(
"buffer layer error at %s:%d
\n
"
,
file
,
line
);
#ifndef CONFIG_KALLSYMS
printk
(
"Pass this trace through ksymoops for reporting
\n
"
);
#endif
dump_stack
();
}
EXPORT_SYMBOL
(
__buffer_error
);
inline
void
inline
void
init_buffer
(
struct
buffer_head
*
bh
,
bh_end_io_t
*
handler
,
void
*
private
)
init_buffer
(
struct
buffer_head
*
bh
,
bh_end_io_t
*
handler
,
void
*
private
)
{
{
...
@@ -99,17 +80,6 @@ EXPORT_SYMBOL(wake_up_buffer);
...
@@ -99,17 +80,6 @@ EXPORT_SYMBOL(wake_up_buffer);
void
fastcall
unlock_buffer
(
struct
buffer_head
*
bh
)
void
fastcall
unlock_buffer
(
struct
buffer_head
*
bh
)
{
{
/*
* unlock_buffer against a zero-count bh is a bug, if the page
* is not locked. Because then nothing protects the buffer's
* waitqueue, which is used here. (Well. Other locked buffers
* against the page will pin it. But complain anyway).
*/
if
(
atomic_read
(
&
bh
->
b_count
)
==
0
&&
!
PageLocked
(
bh
->
b_page
)
&&
!
PageWriteback
(
bh
->
b_page
))
buffer_error
();
clear_buffer_locked
(
bh
);
clear_buffer_locked
(
bh
);
smp_mb__after_clear_bit
();
smp_mb__after_clear_bit
();
wake_up_buffer
(
bh
);
wake_up_buffer
(
bh
);
...
@@ -125,10 +95,6 @@ void __wait_on_buffer(struct buffer_head * bh)
...
@@ -125,10 +95,6 @@ void __wait_on_buffer(struct buffer_head * bh)
wait_queue_head_t
*
wqh
=
bh_waitq_head
(
bh
);
wait_queue_head_t
*
wqh
=
bh_waitq_head
(
bh
);
DEFINE_WAIT
(
wait
);
DEFINE_WAIT
(
wait
);
if
(
atomic_read
(
&
bh
->
b_count
)
==
0
&&
(
!
bh
->
b_page
||
!
PageLocked
(
bh
->
b_page
)))
buffer_error
();
do
{
do
{
prepare_to_wait
(
wqh
,
&
wait
,
TASK_UNINTERRUPTIBLE
);
prepare_to_wait
(
wqh
,
&
wait
,
TASK_UNINTERRUPTIBLE
);
if
(
buffer_locked
(
bh
))
{
if
(
buffer_locked
(
bh
))
{
...
@@ -146,8 +112,6 @@ void __wait_on_buffer(struct buffer_head * bh)
...
@@ -146,8 +112,6 @@ void __wait_on_buffer(struct buffer_head * bh)
static
void
static
void
__set_page_buffers
(
struct
page
*
page
,
struct
buffer_head
*
head
)
__set_page_buffers
(
struct
page
*
page
,
struct
buffer_head
*
head
)
{
{
if
(
page_has_buffers
(
page
))
buffer_error
();
page_cache_get
(
page
);
page_cache_get
(
page
);
SetPagePrivate
(
page
);
SetPagePrivate
(
page
);
page
->
private
=
(
unsigned
long
)
head
;
page
->
private
=
(
unsigned
long
)
head
;
...
@@ -433,10 +397,12 @@ __find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
...
@@ -433,10 +397,12 @@ __find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
}
}
bh
=
bh
->
b_this_page
;
bh
=
bh
->
b_this_page
;
}
while
(
bh
!=
head
);
}
while
(
bh
!=
head
);
buffer_error
();
printk
(
"block=%llu, b_blocknr=%llu
\n
"
,
printk
(
"__find_get_block_slow() failed. "
"block=%llu, b_blocknr=%llu
\n
"
,
(
unsigned
long
long
)
block
,
(
unsigned
long
long
)
bh
->
b_blocknr
);
(
unsigned
long
long
)
block
,
(
unsigned
long
long
)
bh
->
b_blocknr
);
printk
(
"b_state=0x%08lx, b_size=%u
\n
"
,
bh
->
b_state
,
bh
->
b_size
);
printk
(
"b_state=0x%08lx, b_size=%u
\n
"
,
bh
->
b_state
,
bh
->
b_size
);
printk
(
"device blocksize: %d
\n
"
,
1
<<
bd_inode
->
i_blkbits
);
out_unlock:
out_unlock:
spin_unlock
(
&
bd_mapping
->
private_lock
);
spin_unlock
(
&
bd_mapping
->
private_lock
);
page_cache_release
(
page
);
page_cache_release
(
page
);
...
@@ -847,10 +813,7 @@ int __set_page_dirty_buffers(struct page *page)
...
@@ -847,10 +813,7 @@ int __set_page_dirty_buffers(struct page *page)
struct
buffer_head
*
bh
=
head
;
struct
buffer_head
*
bh
=
head
;
do
{
do
{
if
(
buffer_uptodate
(
bh
))
set_buffer_dirty
(
bh
);
set_buffer_dirty
(
bh
);
else
buffer_error
();
bh
=
bh
->
b_this_page
;
bh
=
bh
->
b_this_page
;
}
while
(
bh
!=
head
);
}
while
(
bh
!=
head
);
}
}
...
@@ -1151,7 +1114,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
...
@@ -1151,7 +1114,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
return
page
;
return
page
;
failed:
failed:
buffer_error
();
BUG
();
unlock_page
(
page
);
unlock_page
(
page
);
page_cache_release
(
page
);
page_cache_release
(
page
);
return
NULL
;
return
NULL
;
...
@@ -1247,8 +1210,6 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
...
@@ -1247,8 +1210,6 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
*/
*/
void
fastcall
mark_buffer_dirty
(
struct
buffer_head
*
bh
)
void
fastcall
mark_buffer_dirty
(
struct
buffer_head
*
bh
)
{
{
if
(
!
buffer_uptodate
(
bh
))
buffer_error
();
if
(
!
buffer_dirty
(
bh
)
&&
!
test_set_buffer_dirty
(
bh
))
if
(
!
buffer_dirty
(
bh
)
&&
!
test_set_buffer_dirty
(
bh
))
__set_page_dirty_nobuffers
(
bh
->
b_page
);
__set_page_dirty_nobuffers
(
bh
->
b_page
);
}
}
...
@@ -1267,7 +1228,7 @@ void __brelse(struct buffer_head * buf)
...
@@ -1267,7 +1228,7 @@ void __brelse(struct buffer_head * buf)
return
;
return
;
}
}
printk
(
KERN_ERR
"VFS: brelse: Trying to free free buffer
\n
"
);
printk
(
KERN_ERR
"VFS: brelse: Trying to free free buffer
\n
"
);
buffer_error
();
/* For the stack backtrace */
WARN_ON
(
1
);
}
}
/*
/*
...
@@ -1294,8 +1255,6 @@ static struct buffer_head *__bread_slow(struct buffer_head *bh)
...
@@ -1294,8 +1255,6 @@ static struct buffer_head *__bread_slow(struct buffer_head *bh)
unlock_buffer
(
bh
);
unlock_buffer
(
bh
);
return
bh
;
return
bh
;
}
else
{
}
else
{
if
(
buffer_dirty
(
bh
))
buffer_error
();
get_bh
(
bh
);
get_bh
(
bh
);
bh
->
b_end_io
=
end_buffer_read_sync
;
bh
->
b_end_io
=
end_buffer_read_sync
;
submit_bh
(
READ
,
bh
);
submit_bh
(
READ
,
bh
);
...
@@ -1686,10 +1645,6 @@ void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
...
@@ -1686,10 +1645,6 @@ void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
old_bh
=
__find_get_block_slow
(
bdev
,
block
,
0
);
old_bh
=
__find_get_block_slow
(
bdev
,
block
,
0
);
if
(
old_bh
)
{
if
(
old_bh
)
{
#if 0 /* This happens. Later. */
if (buffer_dirty(old_bh))
buffer_error();
#endif
clear_buffer_dirty
(
old_bh
);
clear_buffer_dirty
(
old_bh
);
wait_on_buffer
(
old_bh
);
wait_on_buffer
(
old_bh
);
clear_buffer_req
(
old_bh
);
clear_buffer_req
(
old_bh
);
...
@@ -1737,8 +1692,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
...
@@ -1737,8 +1692,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
last_block
=
(
i_size_read
(
inode
)
-
1
)
>>
inode
->
i_blkbits
;
last_block
=
(
i_size_read
(
inode
)
-
1
)
>>
inode
->
i_blkbits
;
if
(
!
page_has_buffers
(
page
))
{
if
(
!
page_has_buffers
(
page
))
{
if
(
!
PageUptodate
(
page
))
buffer_error
();
create_empty_buffers
(
page
,
1
<<
inode
->
i_blkbits
,
create_empty_buffers
(
page
,
1
<<
inode
->
i_blkbits
,
(
1
<<
BH_Dirty
)
|
(
1
<<
BH_Uptodate
));
(
1
<<
BH_Dirty
)
|
(
1
<<
BH_Uptodate
));
}
}
...
@@ -1767,9 +1720,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
...
@@ -1767,9 +1720,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
* mapped buffers outside i_size will occur, because
* mapped buffers outside i_size will occur, because
* this page can be outside i_size when there is a
* this page can be outside i_size when there is a
* truncate in progress.
* truncate in progress.
*
* if (buffer_mapped(bh))
* buffer_error();
*/
*/
/*
/*
* The buffer was zeroed by block_write_full_page()
* The buffer was zeroed by block_write_full_page()
...
@@ -1777,8 +1727,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
...
@@ -1777,8 +1727,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
clear_buffer_dirty
(
bh
);
clear_buffer_dirty
(
bh
);
set_buffer_uptodate
(
bh
);
set_buffer_uptodate
(
bh
);
}
else
if
(
!
buffer_mapped
(
bh
)
&&
buffer_dirty
(
bh
))
{
}
else
if
(
!
buffer_mapped
(
bh
)
&&
buffer_dirty
(
bh
))
{
if
(
buffer_new
(
bh
))
buffer_error
();
err
=
get_block
(
inode
,
block
,
bh
,
1
);
err
=
get_block
(
inode
,
block
,
bh
,
1
);
if
(
err
)
if
(
err
)
goto
recover
;
goto
recover
;
...
@@ -1811,8 +1759,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
...
@@ -1811,8 +1759,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
continue
;
continue
;
}
}
if
(
test_clear_buffer_dirty
(
bh
))
{
if
(
test_clear_buffer_dirty
(
bh
))
{
if
(
!
buffer_uptodate
(
bh
))
buffer_error
();
mark_buffer_async_write
(
bh
);
mark_buffer_async_write
(
bh
);
}
else
{
}
else
{
unlock_buffer
(
bh
);
unlock_buffer
(
bh
);
...
@@ -1942,8 +1888,6 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
...
@@ -1942,8 +1888,6 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
unmap_underlying_metadata
(
bh
->
b_bdev
,
unmap_underlying_metadata
(
bh
->
b_bdev
,
bh
->
b_blocknr
);
bh
->
b_blocknr
);
if
(
PageUptodate
(
page
))
{
if
(
PageUptodate
(
page
))
{
if
(
!
buffer_mapped
(
bh
))
buffer_error
();
set_buffer_uptodate
(
bh
);
set_buffer_uptodate
(
bh
);
continue
;
continue
;
}
}
...
@@ -2001,8 +1945,6 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
...
@@ -2001,8 +1945,6 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
void
*
kaddr
;
void
*
kaddr
;
clear_buffer_new
(
bh
);
clear_buffer_new
(
bh
);
if
(
buffer_uptodate
(
bh
))
buffer_error
();
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
memset
(
kaddr
+
block_start
,
0
,
bh
->
b_size
);
memset
(
kaddr
+
block_start
,
0
,
bh
->
b_size
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
...
@@ -2068,8 +2010,6 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
...
@@ -2068,8 +2010,6 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
if
(
!
PageLocked
(
page
))
if
(
!
PageLocked
(
page
))
PAGE_BUG
(
page
);
PAGE_BUG
(
page
);
if
(
PageUptodate
(
page
))
buffer_error
();
blocksize
=
1
<<
inode
->
i_blkbits
;
blocksize
=
1
<<
inode
->
i_blkbits
;
if
(
!
page_has_buffers
(
page
))
if
(
!
page_has_buffers
(
page
))
create_empty_buffers
(
page
,
blocksize
,
0
);
create_empty_buffers
(
page
,
blocksize
,
0
);
...
@@ -2692,13 +2632,6 @@ void submit_bh(int rw, struct buffer_head * bh)
...
@@ -2692,13 +2632,6 @@ void submit_bh(int rw, struct buffer_head * bh)
BUG_ON
(
!
buffer_mapped
(
bh
));
BUG_ON
(
!
buffer_mapped
(
bh
));
BUG_ON
(
!
bh
->
b_end_io
);
BUG_ON
(
!
bh
->
b_end_io
);
if
((
rw
==
READ
||
rw
==
READA
)
&&
buffer_uptodate
(
bh
))
buffer_error
();
if
(
rw
==
WRITE
&&
!
buffer_uptodate
(
bh
))
buffer_error
();
if
(
rw
==
READ
&&
buffer_dirty
(
bh
))
buffer_error
();
/* Only clear out a write error when rewriting */
/* Only clear out a write error when rewriting */
if
(
test_set_buffer_req
(
bh
)
&&
rw
==
WRITE
)
if
(
test_set_buffer_req
(
bh
)
&&
rw
==
WRITE
)
clear_buffer_write_io_error
(
bh
);
clear_buffer_write_io_error
(
bh
);
...
@@ -2797,21 +2730,6 @@ void sync_dirty_buffer(struct buffer_head *bh)
...
@@ -2797,21 +2730,6 @@ void sync_dirty_buffer(struct buffer_head *bh)
}
}
}
}
/*
* Sanity checks for try_to_free_buffers.
*/
static
void
check_ttfb_buffer
(
struct
page
*
page
,
struct
buffer_head
*
bh
)
{
if
(
!
buffer_uptodate
(
bh
)
&&
!
buffer_req
(
bh
))
{
if
(
PageUptodate
(
page
)
&&
page
->
mapping
&&
buffer_mapped
(
bh
)
/* discard_buffer */
&&
S_ISBLK
(
page
->
mapping
->
host
->
i_mode
))
{
buffer_error
();
}
}
}
/*
/*
* try_to_free_buffers() checks if all the buffers on this particular page
* try_to_free_buffers() checks if all the buffers on this particular page
* are unused, and releases them if so.
* are unused, and releases them if so.
...
@@ -2847,7 +2765,6 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
...
@@ -2847,7 +2765,6 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
bh
=
head
;
bh
=
head
;
do
{
do
{
check_ttfb_buffer
(
page
,
bh
);
if
(
buffer_write_io_error
(
bh
))
if
(
buffer_write_io_error
(
bh
))
set_bit
(
AS_EIO
,
&
page
->
mapping
->
flags
);
set_bit
(
AS_EIO
,
&
page
->
mapping
->
flags
);
if
(
buffer_busy
(
bh
))
if
(
buffer_busy
(
bh
))
...
@@ -2857,9 +2774,6 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
...
@@ -2857,9 +2774,6 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
bh
=
bh
->
b_this_page
;
bh
=
bh
->
b_this_page
;
}
while
(
bh
!=
head
);
}
while
(
bh
!=
head
);
if
(
!
was_uptodate
&&
PageUptodate
(
page
)
&&
!
PageError
(
page
))
buffer_error
();
do
{
do
{
struct
buffer_head
*
next
=
bh
->
b_this_page
;
struct
buffer_head
*
next
=
bh
->
b_this_page
;
...
...
fs/ext3/inode.c
View file @
4f990f49
...
@@ -1358,8 +1358,6 @@ static int ext3_ordered_writepage(struct page *page,
...
@@ -1358,8 +1358,6 @@ static int ext3_ordered_writepage(struct page *page,
}
}
if
(
!
page_has_buffers
(
page
))
{
if
(
!
page_has_buffers
(
page
))
{
if
(
!
PageUptodate
(
page
))
buffer_error
();
create_empty_buffers
(
page
,
inode
->
i_sb
->
s_blocksize
,
create_empty_buffers
(
page
,
inode
->
i_sb
->
s_blocksize
,
(
1
<<
BH_Dirty
)
|
(
1
<<
BH_Uptodate
));
(
1
<<
BH_Dirty
)
|
(
1
<<
BH_Uptodate
));
}
}
...
...
fs/mpage.c
View file @
4f990f49
...
@@ -485,8 +485,7 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
...
@@ -485,8 +485,7 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
break
;
break
;
block_in_file
++
;
block_in_file
++
;
}
}
if
(
page_block
==
0
)
BUG_ON
(
page_block
==
0
);
buffer_error
();
first_unmapped
=
page_block
;
first_unmapped
=
page_block
;
...
...
fs/ntfs/aops.c
View file @
4f990f49
...
@@ -1340,8 +1340,6 @@ static int ntfs_prepare_nonresident_write(struct page *page,
...
@@ -1340,8 +1340,6 @@ static int ntfs_prepare_nonresident_write(struct page *page,
void
*
kaddr
;
void
*
kaddr
;
clear_buffer_new
(
bh
);
clear_buffer_new
(
bh
);
if
(
buffer_uptodate
(
bh
))
buffer_error
();
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
kaddr
=
kmap_atomic
(
page
,
KM_USER0
);
memset
(
kaddr
+
block_start
,
0
,
bh
->
b_size
);
memset
(
kaddr
+
block_start
,
0
,
bh
->
b_size
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
kunmap_atomic
(
kaddr
,
KM_USER0
);
...
...
fs/reiserfs/inode.c
View file @
4f990f49
...
@@ -1925,7 +1925,6 @@ static int map_block_for_writepage(struct inode *inode,
...
@@ -1925,7 +1925,6 @@ static int map_block_for_writepage(struct inode *inode,
th
.
t_trans_id
=
0
;
th
.
t_trans_id
=
0
;
if
(
!
buffer_uptodate
(
bh_result
))
{
if
(
!
buffer_uptodate
(
bh_result
))
{
buffer_error
();
return
-
EIO
;
return
-
EIO
;
}
}
...
@@ -2057,8 +2056,6 @@ static int reiserfs_write_full_page(struct page *page, struct writeback_control
...
@@ -2057,8 +2056,6 @@ static int reiserfs_write_full_page(struct page *page, struct writeback_control
* in the BH_Uptodate is just a sanity check.
* in the BH_Uptodate is just a sanity check.
*/
*/
if
(
!
page_has_buffers
(
page
))
{
if
(
!
page_has_buffers
(
page
))
{
if
(
!
PageUptodate
(
page
))
buffer_error
();
create_empty_buffers
(
page
,
inode
->
i_sb
->
s_blocksize
,
create_empty_buffers
(
page
,
inode
->
i_sb
->
s_blocksize
,
(
1
<<
BH_Dirty
)
|
(
1
<<
BH_Uptodate
));
(
1
<<
BH_Dirty
)
|
(
1
<<
BH_Uptodate
));
}
}
...
@@ -2120,8 +2117,6 @@ static int reiserfs_write_full_page(struct page *page, struct writeback_control
...
@@ -2120,8 +2117,6 @@ static int reiserfs_write_full_page(struct page *page, struct writeback_control
}
}
}
}
if
(
test_clear_buffer_dirty
(
bh
))
{
if
(
test_clear_buffer_dirty
(
bh
))
{
if
(
!
buffer_uptodate
(
bh
))
buffer_error
();
mark_buffer_async_write
(
bh
);
mark_buffer_async_write
(
bh
);
}
else
{
}
else
{
unlock_buffer
(
bh
);
unlock_buffer
(
bh
);
...
...
include/linux/buffer_head.h
View file @
4f990f49
...
@@ -61,13 +61,6 @@ struct buffer_head {
...
@@ -61,13 +61,6 @@ struct buffer_head {
struct
list_head
b_assoc_buffers
;
/* associated with another mapping */
struct
list_head
b_assoc_buffers
;
/* associated with another mapping */
};
};
/*
* Debug
*/
void
__buffer_error
(
char
*
file
,
int
line
);
#define buffer_error() __buffer_error(__FILE__, __LINE__)
/*
/*
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
* and buffer_foo() functions.
* and buffer_foo() functions.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment