From 87f5e1ce8db1b4da4821a95fa0278a96201fa566 Mon Sep 17 00:00:00 2001 From: Krunal Bauskar Date: Mon, 6 Jul 2020 14:59:47 +0800 Subject: [PATCH] Switch to use relaxed memory barrier for redo-log counters - REDO Log flow needs multiple counters to track stats like redo-log size, free-spaces, flushed_upto lsn, checkpoint_upto lsn, etc.... - All of these counters are meant to track the state of redo log and not for the so-called synchronization of critical resources. - This naturally qualifies them to use relaxed memory barriers as proposed in the said patch. --- include/my_atomic.h | 42 ++++++++++++++++++++++++++++ storage/innobase/include/log0types.h | 7 +++-- storage/innobase/log/log0write.cc | 4 +-- 3 files changed, 48 insertions(+), 5 deletions(-) diff --git a/include/my_atomic.h b/include/my_atomic.h index ce8927816f6..ae1d54bd451 100644 --- a/include/my_atomic.h +++ b/include/my_atomic.h @@ -60,4 +60,46 @@ static inline int my_yield_processor() { #endif +#include +#ifdef __powerpc__ +#define CACHE_LINE_SIZE 128 +#else +#define CACHE_LINE_SIZE 64 +#endif /* __powerpc__ */ + +template +class atomic_counter_t { + private: + char m_pad[CACHE_LINE_SIZE - sizeof(std::atomic)]; + std::atomic m_counter; + + public: + atomic_counter_t(T n) : m_counter(n) {} + atomic_counter_t() {} + + atomic_counter_t(const atomic_counter_t &rhs) { m_counter.store(rhs.load()); } + + T fetch_add(T n) { return m_counter.fetch_add(n, std::memory_order_relaxed); } + T fetch_sub(T n) { return m_counter.fetch_sub(n, std::memory_order_relaxed); } + + T add(T n) { return fetch_add(n); } + T sub(T n) { return fetch_sub(n); } + T load() const { return m_counter.load(std::memory_order_relaxed); } + void store(T n) { m_counter.store(n, std::memory_order_relaxed); } + + T operator++(int) { return add(1); } + T operator--(int) { return sub(1); } + T operator++() { return add(1) + 1; } + T operator--() { return sub(1) - 1; } + T operator+=(T n) { return add(n) + n; } + T operator-=(T n) { return sub(n) - n; } + + operator T() const { return m_counter.load(); } + + T operator=(T n) { + store(n); + return n; + } +}; + #endif /* MY_ATOMIC_INCLUDED */ diff --git a/storage/innobase/include/log0types.h b/storage/innobase/include/log0types.h index a9e252c2577..599c4c6ad79 100644 --- a/storage/innobase/include/log0types.h +++ b/storage/innobase/include/log0types.h @@ -54,6 +54,7 @@ this program; if not, write to the Free Software Foundation, Inc., #include "univ.i" #include "ut0link_buf.h" #include "ut0mutex.h" +#include "my_atomic.h" /** Type used for all log sequence number storage and arithmetics. */ typedef uint64_t lsn_t; @@ -62,14 +63,14 @@ typedef uint64_t lsn_t; #define LSN_PF UINT64PF /** Alias for atomic based on lsn_t. */ -using atomic_lsn_t = std::atomic; +using atomic_lsn_t = atomic_counter_t; /** Type used for sn values, which enumerate bytes of data stored in the log. Note that these values skip bytes of headers and footers of log blocks. */ typedef uint64_t sn_t; /** Alias for atomic based on sn_t. */ -using atomic_sn_t = std::atomic; +using atomic_sn_t = atomic_counter_t; /** Type used for checkpoint numbers (consecutive checkpoints receive a number which is increased by one). */ @@ -77,7 +78,7 @@ typedef uint64_t checkpoint_no_t; /** Type used for counters in log_t: flushes_requested and flushes_expected. They represent number of requests to flush the redo log to disk. */ -typedef std::atomic log_flushes_t; +typedef atomic_counter_t log_flushes_t; /** Function used to calculate checksums of log blocks. */ typedef std::atomic log_checksum_func_t; diff --git a/storage/innobase/log/log0write.cc b/storage/innobase/log/log0write.cc index d55b010e1ee..9dfdb1f5698 100644 --- a/storage/innobase/log/log0write.cc +++ b/storage/innobase/log/log0write.cc @@ -858,7 +858,7 @@ static Wait_stats log_wait_for_write(const log_t &log, lsn_t lsn) { @param[in] lsn wait until log.flushed_to_disk_lsn >= lsn @return statistics related to waiting inside */ static Wait_stats log_wait_for_flush(const log_t &log, lsn_t lsn) { - if (log.write_lsn.load(std::memory_order_relaxed) < lsn) { + if (log.write_lsn.load() < lsn) { os_event_set(log.writer_event); } os_event_set(log.flusher_event); @@ -878,7 +878,7 @@ static Wait_stats log_wait_for_flush(const log_t &log, lsn_t lsn) { } if (wait) { - if (log.write_lsn.load(std::memory_order_relaxed) < lsn) { + if (log.write_lsn.load() < lsn) { os_event_set(log.writer_event); }