diff --git a/storage/innobase/include/ib0mutex.h b/storage/innobase/include/ib0mutex.h index 2e77e191dab..0f8c33cd8ad 100644 --- a/storage/innobase/include/ib0mutex.h +++ b/storage/innobase/include/ib0mutex.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2013, 2019, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2013, 2020, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License, version 2.0, as published by the @@ -537,10 +537,9 @@ struct TTASEventMutex { /** Release the mutex. */ void exit() UNIV_NOTHROW { - m_lock_word.store(false); - std::atomic_thread_fence(std::memory_order_acquire); + m_lock_word.store(false, std::memory_order_release); - if (m_waiters.load(std::memory_order_acquire)) { + if (m_waiters.exchange(false)) { signal(); } } @@ -672,20 +671,8 @@ struct TTASEventMutex { m_policy.add(n_spins, n_waits); } - /** @return the value of the m_waiters flag */ - lock_word_t waiters() UNIV_NOTHROW { - return (m_waiters.load(std::memory_order_relaxed)); - } - /** Note that there are threads waiting on the mutex */ - void set_waiters() UNIV_NOTHROW { - m_waiters.store(true, std::memory_order_release); - } - - /** Note that there are no threads waiting on the mutex */ - void clear_waiters() UNIV_NOTHROW { - m_waiters.store(false, std::memory_order_release); - } + void set_waiters() UNIV_NOTHROW { m_waiters.exchange(true); } /** Wakeup any waiting thread(s). */ void signal() UNIV_NOTHROW; diff --git a/storage/innobase/include/ut0mutex.ic b/storage/innobase/include/ut0mutex.ic index 6b3ec99c617..082d7370b5b 100644 --- a/storage/innobase/include/ut0mutex.ic +++ b/storage/innobase/include/ut0mutex.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2013, 2018, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2013, 2020, Oracle and/or its affiliates. All Rights Reserved. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -49,6 +49,70 @@ bool TTASEventMutex::wait(const char *filename, uint32_t line, sync_cell_t *cell; sync_array_t *sync_arr; + /* We shall prove that in C++14 as defined by the draft N4140 following holds: + + Theorem 1. If thread X decides to call os_event_wait(), and each thread which + calls enter() eventually calls exit(), then X will eventually be woken up. + + Proof: By contradiction. + We assume the contrary, that thread X got to endless sleep, so the following + operations are in `sequenced-before` relation in X: + X1 epoc_counter=os_event_reset() + X2 m_waiters.exchange(true) + X3 unsuccessful m_lock_word.compare_exchange_strong(false,true) + X4 os_event_wait(epoc_counter) + + There are two possibilities. + 1) in the modification order of `m_waiters` there is some + `m_waiters.exchange(false)` after X2. + 2) all `m_waiters.exchange(false)` are before X2 in the `modification-order of + m_waiters`. + + In case 1) let Y be the first thread to perform `m_waiters.exchange(false)` in + the `modification-order of m_waiters` after X2. The Y's + `m_waiters.exchange(false)` must read `true`, thus Y will call + os_event_set(..), and Y's `m_waiters.exchange(false)` must `synchronize-with` + X2, thus X1 `happens-before` os_event_wait() in Y, and thus X will be woken + up, which contradicts our assumption. + + In case 2) As X went to sleep, X3 must have failed, which means it has loaded + `true`, which must be a value stored by some thread Z doing + `compare_exchange_strong(false,true)` during `enter()`: + As we assume that each thread calling enter() eventually calls exit() it must + be that Z eventually calls exit() and thus performs. + Z1 m_lock_word.store(false, std::memory_order_release); + Z2 if (m_waiters.exchange(false)) { + Z3 signal(); + } + In case 2) we know that Z2 must appear before X2 in `modification-order of + m_waiters`, thus Z2 synchronizes-with X2, which in turn means that + lines sequenced-after X2 must see effects of modifications from lines + `sequenced-before` the `exchange` in Z's exit(). In particular the X3 + (which is sequenced-after X2) should see that Z1 (which was sequenced-before + Z2). But this violates the premise that X3 saw Z's store of true! In + particular we can employ § 1.10.20: + + If a side effect X on an atomic object M happens before a value computation + B of M, then the evaluation B shall take its value from X or from a side + effect Y that follows X in the modification order of M. + + which translates in our case to: + If Z1 happens before X3, then the evaluation of X3 should take it's value from + Z1 or a side effect that follows Z1 in the modification order of m_lock_word. + Clearly it can not take the value from Z's + `m_lock_word.m_lock_word.compare_exchange_store`, because it does not follow + Z1 in the `modification order of m_lock_word` - quite contrary, it must be + before it, because the `modification-order` must be consistent with + `happens-before` which must be consistent with `sequenced-before`, and we know + that Z's enter() is sequenced before Z's exit(). See § 1.10.17: + + If an operation A that modifies an atomic object M happens before an + operation B that modifies M, then A shall be earlier than B in the + modification order of M. + + Contradictions in both of the two possible cases end the proof. + */ + sync_arr = sync_array_get_and_reserve_cell( this, (m_policy.get_id() == LATCH_ID_BUF_BLOCK_MUTEX || @@ -95,10 +159,8 @@ bool TTASEventMutex::wait(const char *filename, uint32_t line, template