Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
1b68aa10
Commit
1b68aa10
authored
Jan 06, 2005
by
brian@zim.(none)
Browse files
Options
Browse Files
Download
Plain Diff
Merge zim.(none):/home/brian/mysql/mysql-5.0
into zim.(none):/home/brian/mysql/mysql-5.1
parents
2e006db4
2ee9d854
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
26 additions
and
20 deletions
+26
-20
sql/examples/ha_archive.cc
sql/examples/ha_archive.cc
+25
-19
sql/examples/ha_tina.cc
sql/examples/ha_tina.cc
+1
-1
No files found.
sql/examples/ha_archive.cc
View file @
1b68aa10
...
@@ -520,7 +520,7 @@ int ha_archive::create(const char *name, TABLE *table_arg,
...
@@ -520,7 +520,7 @@ int ha_archive::create(const char *name, TABLE *table_arg,
int
ha_archive
::
write_row
(
byte
*
buf
)
int
ha_archive
::
write_row
(
byte
*
buf
)
{
{
z_off_t
written
;
z_off_t
written
;
Field_blob
**
fiel
d
;
uint
*
ptr
,
*
en
d
;
DBUG_ENTER
(
"ha_archive::write_row"
);
DBUG_ENTER
(
"ha_archive::write_row"
);
if
(
share
->
crashed
)
if
(
share
->
crashed
)
...
@@ -530,25 +530,27 @@ int ha_archive::write_row(byte * buf)
...
@@ -530,25 +530,27 @@ int ha_archive::write_row(byte * buf)
if
(
table
->
timestamp_field_type
&
TIMESTAMP_AUTO_SET_ON_INSERT
)
if
(
table
->
timestamp_field_type
&
TIMESTAMP_AUTO_SET_ON_INSERT
)
table
->
timestamp_field
->
set_time
();
table
->
timestamp_field
->
set_time
();
pthread_mutex_lock
(
&
share
->
mutex
);
pthread_mutex_lock
(
&
share
->
mutex
);
written
=
gzwrite
(
share
->
archive_write
,
buf
,
table
->
reclength
);
written
=
gzwrite
(
share
->
archive_write
,
buf
,
table
->
s
->
reclength
);
DBUG_PRINT
(
"ha_archive::write_row"
,
(
"Wrote %d bytes expected %d"
,
written
,
table
->
reclength
));
DBUG_PRINT
(
"ha_archive::write_row"
,
(
"Wrote %d bytes expected %d"
,
written
,
table
->
s
->
reclength
));
if
(
!
delayed_insert
||
!
bulk_insert
)
if
(
!
delayed_insert
||
!
bulk_insert
)
share
->
dirty
=
TRUE
;
share
->
dirty
=
TRUE
;
if
(
written
!=
table
->
reclength
)
if
(
written
!=
table
->
s
->
reclength
)
goto
error
;
goto
error
;
/*
/*
We should probably mark the table as damagaged if the record is written
We should probably mark the table as damagaged if the record is written
but the blob fails.
but the blob fails.
*/
*/
for
(
field
=
table
->
blob_field
;
*
field
;
field
++
)
for
(
ptr
=
table
->
s
->
blob_field
,
end
=
ptr
+
table
->
s
->
blob_fields
;
ptr
!=
end
;
ptr
++
)
{
{
char
*
ptr
;
char
*
ptr
;
uint32
size
=
(
*
field
)
->
get_length
();
uint32
size
=
(
(
Field_blob
*
)
table
->
field
[
*
ptr
]
)
->
get_length
();
if
(
size
)
if
(
size
)
{
{
(
*
field
)
->
get_ptr
(
&
ptr
);
(
(
Field_blob
*
)
table
->
field
[
*
ptr
]
)
->
get_ptr
(
&
ptr
);
written
=
gzwrite
(
share
->
archive_write
,
ptr
,
(
unsigned
)
size
);
written
=
gzwrite
(
share
->
archive_write
,
ptr
,
(
unsigned
)
size
);
if
(
written
!=
size
)
if
(
written
!=
size
)
goto
error
;
goto
error
;
...
@@ -614,13 +616,13 @@ int ha_archive::rnd_init(bool scan)
...
@@ -614,13 +616,13 @@ int ha_archive::rnd_init(bool scan)
int
ha_archive
::
get_row
(
gzFile
file_to_read
,
byte
*
buf
)
int
ha_archive
::
get_row
(
gzFile
file_to_read
,
byte
*
buf
)
{
{
int
read
;
// Bytes read, gzread() returns int
int
read
;
// Bytes read, gzread() returns int
uint
*
ptr
,
*
end
;
char
*
last
;
char
*
last
;
size_t
total_blob_length
=
0
;
size_t
total_blob_length
=
0
;
Field_blob
**
field
;
DBUG_ENTER
(
"ha_archive::get_row"
);
DBUG_ENTER
(
"ha_archive::get_row"
);
read
=
gzread
(
file_to_read
,
buf
,
table
->
reclength
);
read
=
gzread
(
file_to_read
,
buf
,
table
->
s
->
reclength
);
DBUG_PRINT
(
"ha_archive::get_row"
,
(
"Read %d bytes expected %d"
,
read
,
table
->
reclength
));
DBUG_PRINT
(
"ha_archive::get_row"
,
(
"Read %d bytes expected %d"
,
read
,
table
->
s
->
reclength
));
if
(
read
==
Z_STREAM_ERROR
)
if
(
read
==
Z_STREAM_ERROR
)
DBUG_RETURN
(
HA_ERR_CRASHED_ON_USAGE
);
DBUG_RETURN
(
HA_ERR_CRASHED_ON_USAGE
);
...
@@ -633,27 +635,31 @@ int ha_archive::get_row(gzFile file_to_read, byte *buf)
...
@@ -633,27 +635,31 @@ int ha_archive::get_row(gzFile file_to_read, byte *buf)
If the record is the wrong size, the file is probably damaged, unless
If the record is the wrong size, the file is probably damaged, unless
we are dealing with a delayed insert or a bulk insert.
we are dealing with a delayed insert or a bulk insert.
*/
*/
if
((
ulong
)
read
!=
table
->
reclength
)
if
((
ulong
)
read
!=
table
->
s
->
reclength
)
DBUG_RETURN
(
HA_ERR_END_OF_FILE
);
DBUG_RETURN
(
HA_ERR_END_OF_FILE
);
/* Calculate blob length, we use this for our buffer */
/* Calculate blob length, we use this for our buffer */
for
(
field
=
table
->
blob_field
;
*
field
;
field
++
)
for
(
ptr
=
table
->
s
->
blob_field
,
end
=
ptr
+
table
->
s
->
blob_fields
;
total_blob_length
+=
(
*
field
)
->
get_length
();
ptr
!=
end
;
ptr
++
)
total_blob_length
+=
((
Field_blob
*
)
table
->
field
[
*
ptr
])
->
get_length
();
/* Adjust our row buffer if we need be */
/* Adjust our row buffer if we need be */
buffer
.
alloc
(
total_blob_length
);
buffer
.
alloc
(
total_blob_length
);
last
=
(
char
*
)
buffer
.
ptr
();
last
=
(
char
*
)
buffer
.
ptr
();
/* Loop through our blobs and read them */
/* Loop through our blobs and read them */
for
(
field
=
table
->
blob_field
;
*
field
;
field
++
)
for
(
ptr
=
table
->
s
->
blob_field
,
end
=
ptr
+
table
->
s
->
blob_fields
;
ptr
!=
end
;
ptr
++
)
{
{
size_t
size
=
(
*
field
)
->
get_length
();
size_t
size
=
(
(
Field_blob
*
)
table
->
field
[
*
ptr
]
)
->
get_length
();
if
(
size
)
if
(
size
)
{
{
read
=
gzread
(
file_to_read
,
last
,
size
);
read
=
gzread
(
file_to_read
,
last
,
size
);
if
((
size_t
)
read
!=
size
)
if
((
size_t
)
read
!=
size
)
DBUG_RETURN
(
HA_ERR_END_OF_FILE
);
DBUG_RETURN
(
HA_ERR_END_OF_FILE
);
(
*
field
)
->
set_ptr
(
size
,
last
);
(
(
Field_blob
*
)
table
->
field
[
*
ptr
]
)
->
set_ptr
(
size
,
last
);
last
+=
size
;
last
+=
size
;
}
}
}
}
...
@@ -753,8 +759,8 @@ int ha_archive::repair(THD* thd, HA_CHECK_OPT* check_opt)
...
@@ -753,8 +759,8 @@ int ha_archive::repair(THD* thd, HA_CHECK_OPT* check_opt)
I know, this malloc'ing memory but this should be a very
I know, this malloc'ing memory but this should be a very
rare event.
rare event.
*/
*/
if
(
!
(
buf
=
(
byte
*
)
my_malloc
(
table
->
rec_buff_length
>
sizeof
(
ulonglong
)
+
1
?
if
(
!
(
buf
=
(
byte
*
)
my_malloc
(
table
->
s
->
rec_buff_length
>
sizeof
(
ulonglong
)
+
1
?
table
->
rec_buff_length
:
sizeof
(
ulonglong
)
+
1
,
table
->
s
->
rec_buff_length
:
sizeof
(
ulonglong
)
+
1
,
MYF
(
MY_WME
))))
MYF
(
MY_WME
))))
{
{
rc
=
HA_ERR_CRASHED_ON_USAGE
;
rc
=
HA_ERR_CRASHED_ON_USAGE
;
...
@@ -894,7 +900,7 @@ void ha_archive::info(uint flag)
...
@@ -894,7 +900,7 @@ void ha_archive::info(uint flag)
VOID
(
my_stat
(
share
->
data_file_name
,
&
file_stat
,
MYF
(
MY_WME
)));
VOID
(
my_stat
(
share
->
data_file_name
,
&
file_stat
,
MYF
(
MY_WME
)));
mean_rec_length
=
table
->
reclength
+
buffer
.
alloced_length
();
mean_rec_length
=
table
->
s
->
reclength
+
buffer
.
alloced_length
();
data_file_length
=
file_stat
.
st_size
;
data_file_length
=
file_stat
.
st_size
;
create_time
=
file_stat
.
st_ctime
;
create_time
=
file_stat
.
st_ctime
;
update_time
=
file_stat
.
st_mtime
;
update_time
=
file_stat
.
st_mtime
;
...
...
sql/examples/ha_tina.cc
View file @
1b68aa10
...
@@ -375,7 +375,7 @@ int ha_tina::find_current_row(byte *buf)
...
@@ -375,7 +375,7 @@ int ha_tina::find_current_row(byte *buf)
}
}
next_position
=
(
end_ptr
-
share
->
mapped_file
)
+
1
;
next_position
=
(
end_ptr
-
share
->
mapped_file
)
+
1
;
/* Maybe use \N for null? */
/* Maybe use \N for null? */
memset
(
buf
,
0
,
table
->
null_bytes
);
/* We do not implement nulls! */
memset
(
buf
,
0
,
table
->
s
->
null_bytes
);
/* We do not implement nulls! */
DBUG_RETURN
(
0
);
DBUG_RETURN
(
0
);
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment