Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
8ab2897e
Commit
8ab2897e
authored
18 years ago
by
brian@zim.(none)
Browse files
Options
Browse Files
Download
Plain Diff
Merge zim.(none):/home/brian/mysql/tmp_merge
into zim.(none):/home/brian/mysql/merge-5.1
parents
7c4ddc8b
11ec75e3
Branches unavailable
Tags unavailable
No related merge requests found
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
41 additions
and
24 deletions
+41
-24
sql/ha_archive.cc
sql/ha_archive.cc
+39
-24
sql/ha_archive.h
sql/ha_archive.h
+2
-0
No files found.
sql/ha_archive.cc
View file @
8ab2897e
...
@@ -459,12 +459,11 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
...
@@ -459,12 +459,11 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
share
->
table_name_length
=
length
;
share
->
table_name_length
=
length
;
share
->
table_name
=
tmp_name
;
share
->
table_name
=
tmp_name
;
share
->
crashed
=
FALSE
;
share
->
crashed
=
FALSE
;
share
->
archive_write_open
=
FALSE
;
fn_format
(
share
->
data_file_name
,
table_name
,
""
,
fn_format
(
share
->
data_file_name
,
table_name
,
""
,
ARZ
,
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
);
ARZ
,
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
);
fn_format
(
meta_file_name
,
table_name
,
""
,
ARM
,
fn_format
(
meta_file_name
,
table_name
,
""
,
ARM
,
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
);
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
);
DBUG_PRINT
(
"info"
,
(
"archive opening (1) up write at %s"
,
share
->
data_file_name
));
strmov
(
share
->
table_name
,
table_name
);
strmov
(
share
->
table_name
,
table_name
);
/*
/*
We will use this lock for rows.
We will use this lock for rows.
...
@@ -476,38 +475,20 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
...
@@ -476,38 +475,20 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
share
->
data_file_name
));
share
->
data_file_name
));
/*
/*
After we read, we set the file to dirty. When we close, we will do the
We read the meta file, but do not mark it dirty unless we actually do
opposite. If the meta file will not open we assume it is crashed and
a write.
leave it up to the user to fix.
*/
*/
if
(
read_meta_file
(
share
->
meta_file
,
&
share
->
rows_recorded
,
if
(
read_meta_file
(
share
->
meta_file
,
&
share
->
rows_recorded
,
&
share
->
auto_increment_value
,
&
share
->
auto_increment_value
,
&
share
->
forced_flushes
,
&
share
->
forced_flushes
,
share
->
real_path
))
share
->
real_path
))
share
->
crashed
=
TRUE
;
share
->
crashed
=
TRUE
;
else
(
void
)
write_meta_file
(
share
->
meta_file
,
share
->
rows_recorded
,
share
->
auto_increment_value
,
share
->
forced_flushes
,
share
->
real_path
,
TRUE
);
/*
/*
Since we now possibly no real_path, we will use it instead if it exists.
Since we now possibly no real_path, we will use it instead if it exists.
*/
*/
if
(
*
share
->
real_path
)
if
(
*
share
->
real_path
)
fn_format
(
share
->
data_file_name
,
share
->
real_path
,
""
,
ARZ
,
fn_format
(
share
->
data_file_name
,
share
->
real_path
,
""
,
ARZ
,
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
);
MY_REPLACE_EXT
|
MY_UNPACK_FILENAME
);
/*
It is expensive to open and close the data files and since you can't have
a gzip file that can be both read and written we keep a writer open
that is shared amoung all open tables.
*/
if
(
!
(
azopen
(
&
(
share
->
archive_write
),
share
->
data_file_name
,
O_WRONLY
|
O_APPEND
|
O_BINARY
)))
{
DBUG_PRINT
(
"info"
,
(
"Could not open archive write file"
));
share
->
crashed
=
TRUE
;
}
VOID
(
my_hash_insert
(
&
archive_open_tables
,
(
byte
*
)
share
));
VOID
(
my_hash_insert
(
&
archive_open_tables
,
(
byte
*
)
share
));
thr_lock_init
(
&
share
->
lock
);
thr_lock_init
(
&
share
->
lock
);
}
}
...
@@ -554,6 +535,7 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
...
@@ -554,6 +535,7 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
share
->
forced_flushes
,
share
->
forced_flushes
,
share
->
real_path
,
share
->
real_path
,
share
->
crashed
?
TRUE
:
FALSE
);
share
->
crashed
?
TRUE
:
FALSE
);
if
(
share
->
archive_write_open
)
if
(
azclose
(
&
(
share
->
archive_write
)))
if
(
azclose
(
&
(
share
->
archive_write
)))
rc
=
1
;
rc
=
1
;
if
(
my_close
(
share
->
meta_file
,
MYF
(
0
)))
if
(
my_close
(
share
->
meta_file
,
MYF
(
0
)))
...
@@ -565,6 +547,32 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
...
@@ -565,6 +547,32 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
DBUG_RETURN
(
rc
);
DBUG_RETURN
(
rc
);
}
}
int
ha_archive
::
init_archive_writer
()
{
DBUG_ENTER
(
"ha_archive::init_archive_writer"
);
(
void
)
write_meta_file
(
share
->
meta_file
,
share
->
rows_recorded
,
share
->
auto_increment_value
,
share
->
forced_flushes
,
share
->
real_path
,
TRUE
);
/*
It is expensive to open and close the data files and since you can't have
a gzip file that can be both read and written we keep a writer open
that is shared amoung all open tables.
*/
if
(
!
(
azopen
(
&
(
share
->
archive_write
),
share
->
data_file_name
,
O_WRONLY
|
O_APPEND
|
O_BINARY
)))
{
DBUG_PRINT
(
"info"
,
(
"Could not open archive write file"
));
share
->
crashed
=
TRUE
;
DBUG_RETURN
(
1
);
}
share
->
archive_write_open
=
TRUE
;
DBUG_RETURN
(
0
);
}
/*
/*
We just implement one additional file extension.
We just implement one additional file extension.
...
@@ -910,6 +918,9 @@ int ha_archive::write_row(byte *buf)
...
@@ -910,6 +918,9 @@ int ha_archive::write_row(byte *buf)
Notice that the global auto_increment has been increased.
Notice that the global auto_increment has been increased.
In case of a failed row write, we will never try to reuse the value.
In case of a failed row write, we will never try to reuse the value.
*/
*/
if
(
!
share
->
archive_write_open
)
if
(
init_archive_writer
())
DBUG_RETURN
(
HA_ERR_CRASHED_ON_USAGE
);
share
->
rows_recorded
++
;
share
->
rows_recorded
++
;
rc
=
real_write_row
(
buf
,
&
(
share
->
archive_write
));
rc
=
real_write_row
(
buf
,
&
(
share
->
archive_write
));
...
@@ -1221,6 +1232,10 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
...
@@ -1221,6 +1232,10 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
azio_stream
writer
;
azio_stream
writer
;
char
writer_filename
[
FN_REFLEN
];
char
writer_filename
[
FN_REFLEN
];
/* Open up the writer if we haven't yet */
if
(
!
share
->
archive_write_open
)
init_archive_writer
();
/* Flush any waiting data */
/* Flush any waiting data */
azflush
(
&
(
share
->
archive_write
),
Z_SYNC_FLUSH
);
azflush
(
&
(
share
->
archive_write
),
Z_SYNC_FLUSH
);
share
->
forced_flushes
++
;
share
->
forced_flushes
++
;
...
...
This diff is collapsed.
Click to expand it.
sql/ha_archive.h
View file @
8ab2897e
...
@@ -35,6 +35,7 @@ typedef struct st_archive_share {
...
@@ -35,6 +35,7 @@ typedef struct st_archive_share {
THR_LOCK
lock
;
THR_LOCK
lock
;
File
meta_file
;
/* Meta file we use */
File
meta_file
;
/* Meta file we use */
azio_stream
archive_write
;
/* Archive file we are working with */
azio_stream
archive_write
;
/* Archive file we are working with */
bool
archive_write_open
;
bool
dirty
;
/* Flag for if a flush should occur */
bool
dirty
;
/* Flag for if a flush should occur */
bool
crashed
;
/* Meta file is crashed */
bool
crashed
;
/* Meta file is crashed */
ha_rows
rows_recorded
;
/* Number of rows in tables */
ha_rows
rows_recorded
;
/* Number of rows in tables */
...
@@ -112,6 +113,7 @@ class ha_archive: public handler
...
@@ -112,6 +113,7 @@ class ha_archive: public handler
bool
dirty
);
bool
dirty
);
ARCHIVE_SHARE
*
get_share
(
const
char
*
table_name
,
TABLE
*
table
,
int
*
rc
);
ARCHIVE_SHARE
*
get_share
(
const
char
*
table_name
,
TABLE
*
table
,
int
*
rc
);
int
free_share
(
ARCHIVE_SHARE
*
share
);
int
free_share
(
ARCHIVE_SHARE
*
share
);
int
init_archive_writer
();
bool
auto_repair
()
const
{
return
1
;
}
// For the moment we just do this
bool
auto_repair
()
const
{
return
1
;
}
// For the moment we just do this
int
read_data_header
(
azio_stream
*
file_to_read
);
int
read_data_header
(
azio_stream
*
file_to_read
);
int
write_data_header
(
azio_stream
*
file_to_write
);
int
write_data_header
(
azio_stream
*
file_to_write
);
...
...
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment