Commit b951fc4e authored by Marko Mäkelä's avatar Marko Mäkelä

Merge 10.2 into 10.3

parents 33215edc 8fb39b2c
...@@ -635,7 +635,7 @@ buf_buddy_relocate( ...@@ -635,7 +635,7 @@ buf_buddy_relocate(
if (buf_page_can_relocate(bpage)) { if (buf_page_can_relocate(bpage)) {
/* Relocate the compressed page. */ /* Relocate the compressed page. */
uintmax_t usec = ut_time_us(NULL); const ulonglong ns = my_interval_timer();
ut_a(bpage->zip.data == src); ut_a(bpage->zip.data == src);
...@@ -651,7 +651,7 @@ buf_buddy_relocate( ...@@ -651,7 +651,7 @@ buf_buddy_relocate(
buf_buddy_stat_t* buddy_stat = &buf_pool->buddy_stat[i]; buf_buddy_stat_t* buddy_stat = &buf_pool->buddy_stat[i];
buddy_stat->relocated++; buddy_stat->relocated++;
buddy_stat->relocated_usec += ut_time_us(NULL) - usec; buddy_stat->relocated_usec+= (my_interval_timer() - ns) / 1000;
return(true); return(true);
} }
......
...@@ -1618,19 +1618,18 @@ fil_crypt_get_page_throttle_func( ...@@ -1618,19 +1618,18 @@ fil_crypt_get_page_throttle_func(
state->crypt_stat.pages_read_from_disk++; state->crypt_stat.pages_read_from_disk++;
uintmax_t start = ut_time_us(NULL); const ulonglong start = my_interval_timer();
block = buf_page_get_gen(page_id, page_size, block = buf_page_get_gen(page_id, page_size,
RW_X_LATCH, RW_X_LATCH,
NULL, BUF_GET_POSSIBLY_FREED, NULL, BUF_GET_POSSIBLY_FREED,
file, line, mtr, &err); file, line, mtr, &err);
uintmax_t end = ut_time_us(NULL); const ulonglong end = my_interval_timer();
if (end < start) {
end = start; // safety...
}
state->cnt_waited++; state->cnt_waited++;
state->sum_waited_us += (end - start);
if (end > start) {
state->sum_waited_us += (end - start) / 1000;
}
/* average page load */ /* average page load */
ulint add_sleeptime_ms = 0; ulint add_sleeptime_ms = 0;
...@@ -1954,7 +1953,7 @@ fil_crypt_flush_space( ...@@ -1954,7 +1953,7 @@ fil_crypt_flush_space(
bool success = false; bool success = false;
ulint n_pages = 0; ulint n_pages = 0;
ulint sum_pages = 0; ulint sum_pages = 0;
uintmax_t start = ut_time_us(NULL); const ulonglong start = my_interval_timer();
do { do {
success = buf_flush_lists(ULINT_MAX, end_lsn, &n_pages); success = buf_flush_lists(ULINT_MAX, end_lsn, &n_pages);
...@@ -1962,11 +1961,11 @@ fil_crypt_flush_space( ...@@ -1962,11 +1961,11 @@ fil_crypt_flush_space(
sum_pages += n_pages; sum_pages += n_pages;
} while (!success && !space->is_stopping()); } while (!success && !space->is_stopping());
uintmax_t end = ut_time_us(NULL); const ulonglong end = my_interval_timer();
if (sum_pages && end > start) { if (sum_pages && end > start) {
state->cnt_waited += sum_pages; state->cnt_waited += sum_pages;
state->sum_waited_us += (end - start); state->sum_waited_us += (end - start) / 1000;
/* statistics */ /* statistics */
state->crypt_stat.pages_flushed += sum_pages; state->crypt_stat.pages_flushed += sum_pages;
......
...@@ -1719,12 +1719,13 @@ innobase_srv_conc_enter_innodb( ...@@ -1719,12 +1719,13 @@ innobase_srv_conc_enter_innodb(
} else if (trx->mysql_thd != NULL } else if (trx->mysql_thd != NULL
&& thd_is_replication_slave_thread(trx->mysql_thd)) { && thd_is_replication_slave_thread(trx->mysql_thd)) {
const ulonglong end = my_interval_timer()
UT_WAIT_FOR( + ulonglong(srv_replication_delay) * 1000000;
srv_conc_get_active_threads() while (srv_conc_get_active_threads()
< srv_thread_concurrency, >= srv_thread_concurrency
srv_replication_delay * 1000); || my_interval_timer() >= end) {
os_thread_sleep(2000 /* 2 ms */);
}
} else { } else {
srv_conc_enter_innodb(prebuilt); srv_conc_enter_innodb(prebuilt);
} }
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
Copyright (c) 2010, 2015, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2010, 2015, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc. Copyright (c) 2012, Facebook Inc.
Copyright (c) 2013, 2018, MariaDB Corporation. Copyright (c) 2013, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the under the terms of the GNU General Public License as published by the
...@@ -720,8 +720,8 @@ monitor counter ...@@ -720,8 +720,8 @@ monitor counter
#define MONITOR_INC_TIME_IN_MICRO_SECS(monitor, value) \ #define MONITOR_INC_TIME_IN_MICRO_SECS(monitor, value) \
MONITOR_CHECK_DEFINED(value); \ MONITOR_CHECK_DEFINED(value); \
if (MONITOR_IS_ON(monitor)) { \ if (MONITOR_IS_ON(monitor)) { \
uintmax_t old_time = (value); \ uintmax_t old_time = value; \
value = ut_time_us(NULL); \ value = microsecond_interval_timer(); \
MONITOR_VALUE(monitor) += (mon_type_t) (value - old_time);\ MONITOR_VALUE(monitor) += (mon_type_t) (value - old_time);\
} }
......
...@@ -54,22 +54,6 @@ Created 1/20/1994 Heikki Tuuri ...@@ -54,22 +54,6 @@ Created 1/20/1994 Heikki Tuuri
/** Time stamp */ /** Time stamp */
typedef time_t ib_time_t; typedef time_t ib_time_t;
/*********************************************************************//**
Delays execution for at most max_wait_us microseconds or returns earlier
if cond becomes true.
@param cond in: condition to wait for; evaluated every 2 ms
@param max_wait_us in: maximum delay to wait, in microseconds */
# define UT_WAIT_FOR(cond, max_wait_us) \
do { \
uintmax_t start_us; \
start_us = ut_time_us(NULL); \
while (!(cond) \
&& ut_time_us(NULL) - start_us < (max_wait_us)) {\
\
os_thread_sleep(2000 /* 2 ms */); \
} \
} while (0)
#define ut_max std::max #define ut_max std::max
#define ut_min std::min #define ut_min std::min
...@@ -192,15 +176,6 @@ ut_usectime( ...@@ -192,15 +176,6 @@ ut_usectime(
ulint* ms); /*!< out: microseconds since the Epoch+*sec */ ulint* ms); /*!< out: microseconds since the Epoch+*sec */
/**********************************************************//** /**********************************************************//**
Returns the number of microseconds since epoch. Similar to
time(3), the return value is also stored in *tloc, provided
that tloc is non-NULL.
@return us since epoch */
uintmax_t
ut_time_us(
/*=======*/
uintmax_t* tloc); /*!< out: us since epoch, if non-NULL */
/**********************************************************//**
Returns the number of milliseconds since some epoch. The Returns the number of milliseconds since some epoch. The
value may wrap around. It should only be used for heuristic value may wrap around. It should only be used for heuristic
purposes. purposes.
......
...@@ -41,7 +41,7 @@ number between 0 and 2^64-1 inclusive. The formula and the constants ...@@ -41,7 +41,7 @@ number between 0 and 2^64-1 inclusive. The formula and the constants
being used are: being used are:
X[n+1] = (a * X[n] + c) mod m X[n+1] = (a * X[n] + c) mod m
where: where:
X[0] = ut_time_us(NULL) X[0] = my_interval_timer()
a = 1103515245 (3^5 * 5 * 7 * 129749) a = 1103515245 (3^5 * 5 * 7 * 129749)
c = 12345 (3 * 5 * 823) c = 12345 (3 * 5 * 823)
m = 18446744073709551616 (2^64) m = 18446744073709551616 (2^64)
...@@ -54,12 +54,10 @@ page_cur_lcg_prng(void) ...@@ -54,12 +54,10 @@ page_cur_lcg_prng(void)
{ {
#define LCG_a 1103515245 #define LCG_a 1103515245
#define LCG_c 12345 #define LCG_c 12345
static ib_uint64_t lcg_current = 0; static uint64_t lcg_current;
static ibool initialized = FALSE;
if (!initialized) { if (!lcg_current) {
lcg_current = (ib_uint64_t) ut_time_us(NULL); lcg_current = my_interval_timer();
initialized = TRUE;
} }
/* no need to "% 2^64" explicitly because lcg_current is /* no need to "% 2^64" explicitly because lcg_current is
......
...@@ -1274,7 +1274,7 @@ page_zip_compress( ...@@ -1274,7 +1274,7 @@ page_zip_compress(
byte* storage; /* storage of uncompressed byte* storage; /* storage of uncompressed
columns */ columns */
index_id_t ind_id; index_id_t ind_id;
uintmax_t usec = ut_time_us(NULL); const ulonglong ns = my_interval_timer();
#ifdef PAGE_ZIP_COMPRESS_DBG #ifdef PAGE_ZIP_COMPRESS_DBG
FILE* logfile = NULL; FILE* logfile = NULL;
#endif #endif
...@@ -1561,7 +1561,7 @@ page_zip_compress( ...@@ -1561,7 +1561,7 @@ page_zip_compress(
dict_index_zip_failure(index); dict_index_zip_failure(index);
} }
uintmax_t time_diff = ut_time_us(NULL) - usec; const uint64_t time_diff = (my_interval_timer() - ns) / 1000;
page_zip_stat[page_zip->ssize - 1].compressed_usec page_zip_stat[page_zip->ssize - 1].compressed_usec
+= time_diff; += time_diff;
if (cmp_per_index_enabled) { if (cmp_per_index_enabled) {
...@@ -1627,7 +1627,7 @@ page_zip_compress( ...@@ -1627,7 +1627,7 @@ page_zip_compress(
fclose(logfile); fclose(logfile);
} }
#endif /* PAGE_ZIP_COMPRESS_DBG */ #endif /* PAGE_ZIP_COMPRESS_DBG */
uintmax_t time_diff = ut_time_us(NULL) - usec; const uint64_t time_diff = (my_interval_timer() - ns) / 1000;
page_zip_stat[page_zip->ssize - 1].compressed_ok++; page_zip_stat[page_zip->ssize - 1].compressed_ok++;
page_zip_stat[page_zip->ssize - 1].compressed_usec += time_diff; page_zip_stat[page_zip->ssize - 1].compressed_usec += time_diff;
if (cmp_per_index_enabled) { if (cmp_per_index_enabled) {
...@@ -3250,13 +3250,13 @@ page_zip_decompress( ...@@ -3250,13 +3250,13 @@ page_zip_decompress(
page header fields that should not change page header fields that should not change
after page creation */ after page creation */
{ {
uintmax_t usec = ut_time_us(NULL); const ulonglong ns = my_interval_timer();
if (!page_zip_decompress_low(page_zip, page, all)) { if (!page_zip_decompress_low(page_zip, page, all)) {
return(FALSE); return(FALSE);
} }
uintmax_t time_diff = ut_time_us(NULL) - usec; const uint64_t time_diff = (my_interval_timer() - ns) / 1000;
page_zip_stat[page_zip->ssize - 1].decompressed++; page_zip_stat[page_zip->ssize - 1].decompressed++;
page_zip_stat[page_zip->ssize - 1].decompressed_usec += time_diff; page_zip_stat[page_zip->ssize - 1].decompressed_usec += time_diff;
......
...@@ -897,7 +897,8 @@ row_vers_old_has_index_entry( ...@@ -897,7 +897,8 @@ row_vers_old_has_index_entry(
ut_ad(mtr_memo_contains_page_flagged(mtr, rec, MTR_MEMO_PAGE_X_FIX ut_ad(mtr_memo_contains_page_flagged(mtr, rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX)); | MTR_MEMO_PAGE_S_FIX));
ut_ad(!rw_lock_own(&(purge_sys.latch), RW_LOCK_S)); ut_ad(!rw_lock_own(&purge_sys.latch, RW_LOCK_S));
ut_ad(also_curr || !vcol_info);
clust_index = dict_table_get_first_index(index->table); clust_index = dict_table_get_first_index(index->table);
...@@ -964,7 +965,7 @@ row_vers_old_has_index_entry( ...@@ -964,7 +965,7 @@ row_vers_old_has_index_entry(
entry = row_build_index_entry( entry = row_build_index_entry(
row, ext, index, heap); row, ext, index, heap);
if (entry && !dtuple_coll_cmp(ientry, entry)) { if (entry && !dtuple_coll_cmp(ientry, entry)) {
goto safe_to_purge; goto unsafe_to_purge;
} }
} else { } else {
/* Build index entry out of row */ /* Build index entry out of row */
...@@ -985,7 +986,7 @@ row_vers_old_has_index_entry( ...@@ -985,7 +986,7 @@ row_vers_old_has_index_entry(
clust_index, clust_offsets, clust_index, clust_offsets,
index, ientry, roll_ptr, index, ientry, roll_ptr,
trx_id, NULL, &vrow, mtr)) { trx_id, NULL, &vrow, mtr)) {
goto safe_to_purge; goto unsafe_to_purge;
} }
} }
clust_offsets = rec_get_offsets(rec, clust_index, NULL, clust_offsets = rec_get_offsets(rec, clust_index, NULL,
...@@ -1018,7 +1019,7 @@ row_vers_old_has_index_entry( ...@@ -1018,7 +1019,7 @@ row_vers_old_has_index_entry(
a different binary value in a char field, but the a different binary value in a char field, but the
collation identifies the old and new value anyway! */ collation identifies the old and new value anyway! */
if (entry && !dtuple_coll_cmp(ientry, entry)) { if (entry && !dtuple_coll_cmp(ientry, entry)) {
safe_to_purge: unsafe_to_purge:
mem_heap_free(heap); mem_heap_free(heap);
if (v_heap) { if (v_heap) {
...@@ -1058,7 +1059,6 @@ row_vers_old_has_index_entry( ...@@ -1058,7 +1059,6 @@ row_vers_old_has_index_entry(
if (!prev_version) { if (!prev_version) {
/* Versions end here */ /* Versions end here */
unsafe_to_purge:
mem_heap_free(heap); mem_heap_free(heap);
if (v_heap) { if (v_heap) {
...@@ -1120,7 +1120,7 @@ row_vers_old_has_index_entry( ...@@ -1120,7 +1120,7 @@ row_vers_old_has_index_entry(
and new value anyway! */ and new value anyway! */
if (entry && !dtuple_coll_cmp(ientry, entry)) { if (entry && !dtuple_coll_cmp(ientry, entry)) {
goto safe_to_purge; goto unsafe_to_purge;
} }
} }
......
...@@ -2129,7 +2129,7 @@ srv_master_do_active_tasks(void) ...@@ -2129,7 +2129,7 @@ srv_master_do_active_tasks(void)
/*============================*/ /*============================*/
{ {
ib_time_t cur_time = ut_time(); ib_time_t cur_time = ut_time();
uintmax_t counter_time = ut_time_us(NULL); ulonglong counter_time = microsecond_interval_timer();
/* First do the tasks that we are suppose to do at each /* First do the tasks that we are suppose to do at each
invocation of this function. */ invocation of this function. */
...@@ -2159,7 +2159,7 @@ srv_master_do_active_tasks(void) ...@@ -2159,7 +2159,7 @@ srv_master_do_active_tasks(void)
/* Do an ibuf merge */ /* Do an ibuf merge */
srv_main_thread_op_info = "doing insert buffer merge"; srv_main_thread_op_info = "doing insert buffer merge";
counter_time = ut_time_us(NULL); counter_time = microsecond_interval_timer();
ibuf_merge_in_background(false); ibuf_merge_in_background(false);
MONITOR_INC_TIME_IN_MICRO_SECS( MONITOR_INC_TIME_IN_MICRO_SECS(
MONITOR_SRV_IBUF_MERGE_MICROSECOND, counter_time); MONITOR_SRV_IBUF_MERGE_MICROSECOND, counter_time);
...@@ -2224,8 +2224,6 @@ void ...@@ -2224,8 +2224,6 @@ void
srv_master_do_idle_tasks(void) srv_master_do_idle_tasks(void)
/*==========================*/ /*==========================*/
{ {
uintmax_t counter_time;
++srv_main_idle_loops; ++srv_main_idle_loops;
MONITOR_INC(MONITOR_MASTER_IDLE_LOOPS); MONITOR_INC(MONITOR_MASTER_IDLE_LOOPS);
...@@ -2234,7 +2232,7 @@ srv_master_do_idle_tasks(void) ...@@ -2234,7 +2232,7 @@ srv_master_do_idle_tasks(void)
/* ALTER TABLE in MySQL requires on Unix that the table handler /* ALTER TABLE in MySQL requires on Unix that the table handler
can drop tables lazily after there no longer are SELECT can drop tables lazily after there no longer are SELECT
queries to them. */ queries to them. */
counter_time = ut_time_us(NULL); ulonglong counter_time = microsecond_interval_timer();
srv_main_thread_op_info = "doing background drop tables"; srv_main_thread_op_info = "doing background drop tables";
row_drop_tables_for_mysql_in_background(); row_drop_tables_for_mysql_in_background();
MONITOR_INC_TIME_IN_MICRO_SECS( MONITOR_INC_TIME_IN_MICRO_SECS(
...@@ -2253,7 +2251,7 @@ srv_master_do_idle_tasks(void) ...@@ -2253,7 +2251,7 @@ srv_master_do_idle_tasks(void)
log_free_check(); log_free_check();
/* Do an ibuf merge */ /* Do an ibuf merge */
counter_time = ut_time_us(NULL); counter_time = microsecond_interval_timer();
srv_main_thread_op_info = "doing insert buffer merge"; srv_main_thread_op_info = "doing insert buffer merge";
ibuf_merge_in_background(true); ibuf_merge_in_background(true);
MONITOR_INC_TIME_IN_MICRO_SECS( MONITOR_INC_TIME_IN_MICRO_SECS(
......
...@@ -140,9 +140,8 @@ struct i_s_table_cache_t { ...@@ -140,9 +140,8 @@ struct i_s_table_cache_t {
struct trx_i_s_cache_t { struct trx_i_s_cache_t {
rw_lock_t rw_lock; /*!< read-write lock protecting rw_lock_t rw_lock; /*!< read-write lock protecting
the rest of this structure */ the rest of this structure */
uintmax_t last_read; /*!< last time the cache was read; ulonglong last_read; /*!< last time the cache was read;
measured in microseconds since measured in nanoseconds */
epoch */
ib_mutex_t last_read_mutex;/*!< mutex protecting the ib_mutex_t last_read_mutex;/*!< mutex protecting the
last_read member - it is updated last_read member - it is updated
inside a shared lock of the inside a shared lock of the
...@@ -1183,22 +1182,16 @@ add_trx_relevant_locks_to_cache( ...@@ -1183,22 +1182,16 @@ add_trx_relevant_locks_to_cache(
} }
/** The minimum time that a cache must not be updated after it has been /** The minimum time that a cache must not be updated after it has been
read for the last time; measured in microseconds. We use this technique read for the last time; measured in nanoseconds. We use this technique
to ensure that SELECTs which join several INFORMATION SCHEMA tables read to ensure that SELECTs which join several INFORMATION SCHEMA tables read
the same version of the cache. */ the same version of the cache. */
#define CACHE_MIN_IDLE_TIME_US 100000 /* 0.1 sec */ #define CACHE_MIN_IDLE_TIME_NS 100000000 /* 0.1 sec */
/*******************************************************************//** /*******************************************************************//**
Checks if the cache can safely be updated. Checks if the cache can safely be updated.
@return TRUE if can be updated */ @return whether the cache can be updated */
static static bool can_cache_be_updated(trx_i_s_cache_t* cache)
ibool
can_cache_be_updated(
/*=================*/
trx_i_s_cache_t* cache) /*!< in: cache */
{ {
uintmax_t now;
/* Here we read cache->last_read without acquiring its mutex /* Here we read cache->last_read without acquiring its mutex
because last_read is only updated when a shared rw lock on the because last_read is only updated when a shared rw lock on the
whole cache is being held (see trx_i_s_cache_end_read()) and whole cache is being held (see trx_i_s_cache_end_read()) and
...@@ -1208,13 +1201,7 @@ can_cache_be_updated( ...@@ -1208,13 +1201,7 @@ can_cache_be_updated(
ut_ad(rw_lock_own(&cache->rw_lock, RW_LOCK_X)); ut_ad(rw_lock_own(&cache->rw_lock, RW_LOCK_X));
now = ut_time_us(NULL); return my_interval_timer() - cache->last_read > CACHE_MIN_IDLE_TIME_NS;
if (now - cache->last_read > CACHE_MIN_IDLE_TIME_US) {
return(TRUE);
}
return(FALSE);
} }
/*******************************************************************//** /*******************************************************************//**
...@@ -1314,8 +1301,7 @@ trx_i_s_possibly_fetch_data_into_cache( ...@@ -1314,8 +1301,7 @@ trx_i_s_possibly_fetch_data_into_cache(
lock_mutex_exit(); lock_mutex_exit();
/* update cache last read time */ /* update cache last read time */
time_t now = ut_time_us(NULL); cache->last_read = my_interval_timer();
cache->last_read = now;
return(0); return(0);
} }
...@@ -1405,12 +1391,10 @@ trx_i_s_cache_end_read( ...@@ -1405,12 +1391,10 @@ trx_i_s_cache_end_read(
/*===================*/ /*===================*/
trx_i_s_cache_t* cache) /*!< in: cache */ trx_i_s_cache_t* cache) /*!< in: cache */
{ {
uintmax_t now;
ut_ad(rw_lock_own(&cache->rw_lock, RW_LOCK_S)); ut_ad(rw_lock_own(&cache->rw_lock, RW_LOCK_S));
/* update cache last read time */ /* update cache last read time */
now = ut_time_us(NULL); const ulonglong now = my_interval_timer();
mutex_enter(&cache->last_read_mutex); mutex_enter(&cache->last_read_mutex);
cache->last_read = now; cache->last_read = now;
mutex_exit(&cache->last_read_mutex); mutex_exit(&cache->last_read_mutex);
......
...@@ -618,23 +618,6 @@ ut_crc32_init() ...@@ -618,23 +618,6 @@ ut_crc32_init()
ut_cpuid(vend, &model, &family, &stepping, ut_cpuid(vend, &model, &family, &stepping,
&features_ecx, &features_edx); &features_ecx, &features_edx);
/* Valgrind does not understand the CRC32 instructions:
vex amd64->IR: unhandled instruction bytes: 0xF2 0x48 0xF 0x38 0xF0 0xA
valgrind: Unrecognised instruction at address 0xad3db5.
Your program just tried to execute an instruction that Valgrind
did not recognise. There are two possible reasons for this.
1. Your program has a bug and erroneously jumped to a non-code
location. If you are running Memcheck and you just saw a
warning about a bad jump, it's probably your program's fault.
2. The instruction is legitimate but Valgrind doesn't handle it,
i.e. it's Valgrind's fault. If you think this is the case or
you are not sure, please let us know and we'll try to fix it.
Either way, Valgrind will now raise a SIGILL signal which will
probably kill your program.
*/
if (features_ecx & 1 << 20) { if (features_ecx & 1 << 20) {
ut_crc32 = ut_crc32_hw; ut_crc32 = ut_crc32_hw;
#ifdef INNODB_BUG_ENDIAN_CRC32 #ifdef INNODB_BUG_ENDIAN_CRC32
......
...@@ -145,30 +145,6 @@ ut_usectime( ...@@ -145,30 +145,6 @@ ut_usectime(
return(ret); return(ret);
} }
/**********************************************************//**
Returns the number of microseconds since epoch. Similar to
time(3), the return value is also stored in *tloc, provided
that tloc is non-NULL.
@return us since epoch */
uintmax_t
ut_time_us(
/*=======*/
uintmax_t* tloc) /*!< out: us since epoch, if non-NULL */
{
struct timeval tv;
uintmax_t us;
ut_gettimeofday(&tv, NULL);
us = uintmax_t(tv.tv_sec) * 1000000 + uintmax_t(tv.tv_usec);
if (tloc != NULL) {
*tloc = us;
}
return(us);
}
/**********************************************************//** /**********************************************************//**
Returns the number of milliseconds since some epoch. The Returns the number of milliseconds since some epoch. The
value may wrap around. It should only be used for heuristic value may wrap around. It should only be used for heuristic
...@@ -178,11 +154,7 @@ ulint ...@@ -178,11 +154,7 @@ ulint
ut_time_ms(void) ut_time_ms(void)
/*============*/ /*============*/
{ {
struct timeval tv; return static_cast<ulint>(my_interval_timer() / 1000000);
ut_gettimeofday(&tv, NULL);
return(ulint(tv.tv_sec) * 1000 + ulint(tv.tv_usec / 1000));
} }
/**********************************************************//** /**********************************************************//**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment