Commit a0bc3b7e authored by Michael Widenius's avatar Michael Widenius Committed by Monty

Change read_to_buffer to use ulong instead of uint

This is mostly to document that read_to_buffer can read more than 65K.
Also changed merge_buffers to return bool instead of int
parent 062a3176
......@@ -1508,21 +1508,21 @@ int merge_many_buff(Sort_param *param, uchar *sort_buffer,
Read data to buffer.
@retval Number of bytes read
(uint)-1 if something goes wrong
(ulong)-1 if something goes wrong
*/
uint read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek,
ulong read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek,
uint rec_length)
{
uint count;
uint length= 0;
register ulong count;
ulong length= 0;
if ((count=(uint) MY_MIN((ha_rows) buffpek->max_keys,buffpek->count)))
if ((count= (ulong) MY_MIN((ha_rows) buffpek->max_keys,buffpek->count)))
{
length= rec_length*count;
if (unlikely(my_b_pread(fromfile, (uchar*) buffpek->base, length,
buffpek->file_pos)))
return ((uint) -1);
return ((ulong) -1);
buffpek->key=buffpek->base;
buffpek->file_pos+= length; /* New filepos */
buffpek->count-= count;
......@@ -1582,18 +1582,18 @@ void reuse_freed_buff(QUEUE *queue, BUFFPEK *reuse, uint key_length)
@retval
0 OK
@retval
other error
1 ERROR
*/
int merge_buffers(Sort_param *param, IO_CACHE *from_file,
bool merge_buffers(Sort_param *param, IO_CACHE *from_file,
IO_CACHE *to_file, uchar *sort_buffer,
BUFFPEK *lastbuff, BUFFPEK *Fb, BUFFPEK *Tb,
int flag)
{
int error;
bool error= 0;
uint rec_length,res_length,offset;
size_t sort_length;
ulong maxcount;
ulong maxcount, bytes_read;
ha_rows max_rows,org_max_rows;
my_off_t to_start_filepos;
uchar *strpos;
......@@ -1611,7 +1611,6 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
thd->inc_status_sort_merge_passes();
thd->query_plan_fsort_passes++;
error=0;
rec_length= param->rec_length;
res_length= param->res_length;
sort_length= param->sort_length;
......@@ -1639,18 +1638,18 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
cmp= get_ptr_compare(sort_length);
first_cmp_arg= (void*) &sort_length;
}
if (init_queue(&queue, (uint) (Tb-Fb)+1, offsetof(BUFFPEK,key), 0,
(queue_compare) cmp, first_cmp_arg, 0, 0))
if (unlikely(init_queue(&queue, (uint) (Tb-Fb)+1, offsetof(BUFFPEK,key), 0,
(queue_compare) cmp, first_cmp_arg, 0, 0)))
DBUG_RETURN(1); /* purecov: inspected */
for (buffpek= Fb ; buffpek <= Tb ; buffpek++)
{
buffpek->base= strpos;
buffpek->max_keys= maxcount;
strpos+=
(uint) (error= (int) read_to_buffer(from_file, buffpek, rec_length));
if (unlikely(error == -1))
bytes_read= read_to_buffer(from_file, buffpek, rec_length);
if (unlikely(bytes_read == (ulong) -1))
goto err; /* purecov: inspected */
strpos+= bytes_read;
buffpek->max_keys= buffpek->mem_count; // If less data in buffers than expected
queue_insert(&queue, (uchar*) buffpek);
}
......@@ -1670,13 +1669,13 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
buffpek->key+= rec_length;
if (! --buffpek->mem_count)
{
if (unlikely(!(error= (int) read_to_buffer(from_file, buffpek,
if (unlikely(!(bytes_read= read_to_buffer(from_file, buffpek,
rec_length))))
{
(void) queue_remove_top(&queue);
reuse_freed_buff(&queue, buffpek, rec_length);
}
else if (unlikely(error == -1))
else if (unlikely(bytes_read == (ulong) -1))
goto err; /* purecov: inspected */
}
queue_replace_top(&queue); // Top element has been used
......@@ -1687,9 +1686,8 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
while (queue.elements > 1)
{
if (killable && unlikely(thd->check_killed()))
{
error= 1; goto err; /* purecov: inspected */
}
goto err; /* purecov: inspected */
for (;;)
{
buffpek= (BUFFPEK*) queue_top(&queue);
......@@ -1726,9 +1724,7 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
if (!check_dupl_count || dupl_count >= min_dupl_count)
{
if (my_b_write(to_file, src+wr_offset, wr_len))
{
error=1; goto err; /* purecov: inspected */
}
goto err; /* purecov: inspected */
}
if (cmp)
{
......@@ -1739,7 +1735,7 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
}
if (!--max_rows)
{
error= 0; /* purecov: inspected */
/* Nothing more to do */
goto end; /* purecov: inspected */
}
......@@ -1747,14 +1743,14 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
buffpek->key+= rec_length;
if (! --buffpek->mem_count)
{
if (unlikely(!(error= (int) read_to_buffer(from_file, buffpek,
if (unlikely(!(bytes_read= read_to_buffer(from_file, buffpek,
rec_length))))
{
(void) queue_remove_top(&queue);
reuse_freed_buff(&queue, buffpek, rec_length);
break; /* One buffer have been removed */
}
else if (error == -1)
else if (unlikely(bytes_read == (ulong) -1))
goto err; /* purecov: inspected */
}
queue_replace_top(&queue); /* Top element has been replaced */
......@@ -1790,16 +1786,11 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
{
src= unique_buff;
if (my_b_write(to_file, src+wr_offset, wr_len))
{
error=1; goto err; /* purecov: inspected */
}
goto err; /* purecov: inspected */
if (!--max_rows)
{
error= 0;
goto end;
}
}
}
do
{
......@@ -1813,9 +1804,7 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
{
if (my_b_write(to_file, (uchar*) buffpek->key,
(size_t)(rec_length*buffpek->mem_count)))
{
error= 1; goto err; /* purecov: inspected */
}
goto err; /* purecov: inspected */
}
else
{
......@@ -1832,21 +1821,26 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
continue;
}
if (my_b_write(to_file, src, wr_len))
{
error=1; goto err;
}
goto err;
}
}
}
while (likely((error=(int) read_to_buffer(from_file, buffpek, rec_length))
!= -1 && error != 0));
while (likely(!(error=
(bytes_read= read_to_buffer(from_file, buffpek,
rec_length)) == (ulong) -1)) &&
bytes_read != 0);
end:
lastbuff->count= MY_MIN(org_max_rows-max_rows, param->max_rows);
lastbuff->file_pos= to_start_filepos;
err:
cleanup:
delete_queue(&queue);
DBUG_RETURN(error);
err:
error= 1;
goto cleanup;
} /* merge_buffers */
......
......@@ -100,9 +100,9 @@ class Sort_param {
int merge_many_buff(Sort_param *param, uchar *sort_buffer,
BUFFPEK *buffpek,
uint *maxbuffer, IO_CACHE *t_file);
uint read_to_buffer(IO_CACHE *fromfile,BUFFPEK *buffpek,
ulong read_to_buffer(IO_CACHE *fromfile,BUFFPEK *buffpek,
uint sort_length);
int merge_buffers(Sort_param *param,IO_CACHE *from_file,
bool merge_buffers(Sort_param *param,IO_CACHE *from_file,
IO_CACHE *to_file, uchar *sort_buffer,
BUFFPEK *lastbuff,BUFFPEK *Fb,
BUFFPEK *Tb,int flag);
......
......@@ -509,7 +509,7 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size,
key_length);
/* if piece_size is aligned reuse_freed_buffer will always hit */
uint piece_size= max_key_count_per_piece * key_length;
uint bytes_read; /* to hold return value of read_to_buffer */
ulong bytes_read; /* to hold return value of read_to_buffer */
BUFFPEK *top;
int res= 1;
uint cnt_ofs= key_length - (with_counters ? sizeof(element_count) : 0);
......@@ -525,7 +525,7 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size,
top->base= merge_buffer + (top - begin) * piece_size;
top->max_keys= max_key_count_per_piece;
bytes_read= read_to_buffer(file, top, key_length);
if (unlikely(bytes_read == (uint) (-1)))
if (unlikely(bytes_read == (ulong) -1))
goto end;
DBUG_ASSERT(bytes_read);
queue_insert(&queue, (uchar *) top);
......@@ -554,9 +554,9 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size,
memcpy(save_key_buff, old_key, key_length);
old_key= save_key_buff;
bytes_read= read_to_buffer(file, top, key_length);
if (unlikely(bytes_read == (uint) (-1)))
if (unlikely(bytes_read == (ulong) -1))
goto end;
else if (bytes_read > 0) /* top->key, top->mem_count are reset */
else if (bytes_read) /* top->key, top->mem_count are reset */
queue_replace_top(&queue); /* in read_to_buffer */
else
{
......@@ -602,7 +602,7 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size,
}
while (--top->mem_count);
bytes_read= read_to_buffer(file, top, key_length);
if (unlikely(bytes_read == (uint) (-1)))
if (unlikely(bytes_read == (ulong) -1))
goto end;
}
while (bytes_read);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment