Commit f693b725 authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-24270: Clarify some comments

parent 2de95f7a
...@@ -264,7 +264,7 @@ struct os_file_size_t { ...@@ -264,7 +264,7 @@ struct os_file_size_t {
os_offset_t m_alloc_size; os_offset_t m_alloc_size;
}; };
static const ulint OS_AIO_N_PENDING_IOS_PER_THREAD = 256; constexpr ulint OS_AIO_N_PENDING_IOS_PER_THREAD= 256;
extern ulint os_n_file_reads; extern ulint os_n_file_reads;
extern ulint os_n_file_writes; extern ulint os_n_file_writes;
......
...@@ -22,13 +22,22 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111 - 1301 USA*/ ...@@ -22,13 +22,22 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111 - 1301 USA*/
# include <libaio.h> # include <libaio.h>
# include <sys/syscall.h> # include <sys/syscall.h>
/* /**
A hack, which so far seems to allow allow getevents thread to be interrupted Invoke the io_getevents() system call.
by io_destroy() from another thread
@param ctx context from io_setup()
libaio's io_getevent() would sometimes crash when attempting this feat, @param min_nr minimum number of completion events to wait for
thus the raw syscall. @param nr maximum number of completion events to collect
@param ev the collected events
In https://pagure.io/libaio/c/7cede5af5adf01ad26155061cc476aad0804d3fc
the io_getevents() implementation in libaio was "optimized" so that it
would elide the system call when there are no outstanding requests
and a timeout was specified.
The libaio code for dereferencing ctx would occasionally trigger
SIGSEGV if io_destroy() was concurrently invoked from another thread.
Hence, we use the raw system call.
*/ */
static int my_getevents(io_context_t ctx, long min_nr, long nr, io_event *ev) static int my_getevents(io_context_t ctx, long min_nr, long nr, io_event *ev)
{ {
...@@ -59,8 +68,6 @@ namespace tpool ...@@ -59,8 +68,6 @@ namespace tpool
{ {
#ifdef LINUX_NATIVE_AIO #ifdef LINUX_NATIVE_AIO
#define MAX_EVENTS 256
class aio_linux final : public aio class aio_linux final : public aio
{ {
thread_pool *m_pool; thread_pool *m_pool;
...@@ -70,6 +77,10 @@ class aio_linux final : public aio ...@@ -70,6 +77,10 @@ class aio_linux final : public aio
static void getevent_thread_routine(aio_linux *aio) static void getevent_thread_routine(aio_linux *aio)
{ {
/* We collect this many events at a time. os_aio_init() would
multiply OS_AIO_N_PENDING_THREADS by the number of read and write threads
and ultimately pass it to io_setup() via thread_pool::configure_aio(). */
constexpr unsigned MAX_EVENTS= 256;
io_event events[MAX_EVENTS]; io_event events[MAX_EVENTS];
for (;;) for (;;)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment