From 557c246779a4b91e63a3a838554c3a2fbecc794c Mon Sep 17 00:00:00 2001 From: Yibo Cai Date: Thu, 14 Nov 2019 09:27:04 +0000 Subject: [PATCH] innobase/dict: refine zip_pad_info->pad with c++11 atomics Though only atomicity is required, current code uses gcc __sync_xxx builtins which introduces unnecessary memory barriers. Refine with C++11 relaxed memory order. --- storage/innobase/dict/dict0dict.cc | 20 +++++++++++--------- storage/innobase/include/dict0mem.h | 2 +- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc index 0134185..bed4248 100644 --- a/storage/innobase/dict/dict0dict.cc +++ b/storage/innobase/dict/dict0dict.cc @@ -6092,6 +6092,7 @@ static void dict_index_zip_pad_update( { ulint total; ulint fail_pct; + ulint pad; ut_ad(info); @@ -6109,6 +6110,8 @@ static void dict_index_zip_pad_update( return; } + pad = info->pad.load(order_relaxed); + /* We are at a 'round' boundary. Reset the values but first calculate fail rate for our heuristic. */ fail_pct = (info->failure * 100) / total; @@ -6119,15 +6122,15 @@ static void dict_index_zip_pad_update( /* Compression failures are more then user defined threshold. Increase the pad size to reduce chances of compression failures. */ - ut_ad(info->pad % ZIP_PAD_INCR == 0); + ut_ad(pad % ZIP_PAD_INCR == 0); /* Only do increment if it won't increase padding beyond max pad size. */ - if (info->pad + ZIP_PAD_INCR < (UNIV_PAGE_SIZE * zip_pad_max) / 100) { + if (pad + ZIP_PAD_INCR < (UNIV_PAGE_SIZE * zip_pad_max) / 100) { /* Use atomics even though we have the mutex. This is to ensure that we are able to read info->pad atomically. */ - os_atomic_increment_ulint(&info->pad, ZIP_PAD_INCR); + info->pad.fetch_add(ZIP_PAD_INCR, order_relaxed); MONITOR_INC(MONITOR_PAD_INCREMENTS); } @@ -6142,12 +6145,12 @@ static void dict_index_zip_pad_update( /* If enough successful rounds are completed with compression failure rate in control, decrease the padding. */ - if (info->n_rounds >= ZIP_PAD_SUCCESSFUL_ROUND_LIMIT && info->pad > 0) { - ut_ad(info->pad % ZIP_PAD_INCR == 0); + if (info->n_rounds >= ZIP_PAD_SUCCESSFUL_ROUND_LIMIT && pad > 0) { + ut_ad(pad % ZIP_PAD_INCR == 0); /* Use atomics even though we have the mutex. This is to ensure that we are able to read info->pad atomically. */ - os_atomic_decrement_ulint(&info->pad, ZIP_PAD_INCR); + info->pad.fetch_sub(ZIP_PAD_INCR, order_relaxed); info->n_rounds = 0; @@ -6211,10 +6214,9 @@ ulint dict_index_zip_pad_optimal_page_size( return (UNIV_PAGE_SIZE); } - /* We use atomics to read index->zip_pad.pad. Here we use zero - as increment as are not changing the value of the 'pad'. */ + /* We use atomics to read index->zip_pad.pad. */ - pad = os_atomic_increment_ulint(&index->zip_pad.pad, 0); + pad = index->zip_pad.pad.load(order_relaxed); ut_ad(pad < UNIV_PAGE_SIZE); sz = UNIV_PAGE_SIZE - pad; diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h index 70fb93a..fe25bd8 100644 --- a/storage/innobase/include/dict0mem.h +++ b/storage/innobase/include/dict0mem.h @@ -768,7 +768,7 @@ an uncompressed page should be left as padding to avoid compression failures. This estimate is based on a self-adapting heuristic. */ struct zip_pad_info_t { SysMutex *mutex; /*!< mutex protecting the info */ - ulint pad; /*!< number of bytes used as pad */ + os_atomic_t pad; /*!< number of bytes used as pad */ ulint success; /*!< successful compression ops during current round */ ulint failure; /*!< failed compression ops during -- 2.7.4