Index: btr/btr0sea.c =================================================================== --- btr/btr0sea.c (revision 50) +++ btr/btr0sea.c (working copy) @@ -904,6 +904,7 @@ ulint* folds; ulint i; mem_heap_t* heap; + dict_index_t* index; ulint* offsets; #ifdef UNIV_SYNC_DEBUG @@ -932,11 +933,16 @@ n_fields = block->curr_n_fields; n_bytes = block->curr_n_bytes; + index = block->index; - ut_a(n_fields + n_bytes > 0); + /* NOTE: The fields of block must not be accessed after + releasing btr_search_latch, as the index page might only + be s-latched! */ rw_lock_s_unlock(&btr_search_latch); + ut_a(n_fields + n_bytes > 0); + n_recs = page_get_n_recs(page); /* Calculate and cache fold values into an array for fast deletion @@ -949,14 +955,6 @@ rec = page_get_infimum_rec(page); rec = page_rec_get_next(rec); - if (!page_rec_is_supremum(rec)) { - ut_a(n_fields <= rec_get_n_fields(rec, block->index)); - - if (n_bytes > 0) { - ut_a(n_fields < rec_get_n_fields(rec, block->index)); - } - } - tree_id = btr_page_get_index_id(page); prev_fold = 0; @@ -964,18 +962,12 @@ heap = NULL; offsets = NULL; - if (block->index == NULL) { - - mem_analyze_corruption((byte*)block); - - ut_a(block->index != NULL); - } - while (!page_rec_is_supremum(rec)) { /* FIXME: in a mixed tree, not all records may have enough ordering fields: */ - offsets = rec_get_offsets(rec, block->index, - offsets, n_fields + (n_bytes > 0), &heap); + offsets = rec_get_offsets(rec, index, offsets, + n_fields + (n_bytes > 0), &heap); + ut_a(rec_offs_n_fields(offsets) == n_fields + (n_bytes > 0)); fold = rec_fold(rec, offsets, n_fields, n_bytes, tree_id); if (fold == prev_fold && prev_fold != 0) { Index: include/buf0buf.h =================================================================== --- include/buf0buf.h (revision 50) +++ include/buf0buf.h (working copy) @@ -745,8 +745,6 @@ buffer pool which are index pages, but this flag is not set because we do not keep track of all pages */ - dict_index_t* index; /* index for which the adaptive - hash index has been created */ /* 2. Page flushing fields */ UT_LIST_NODE_T(buf_block_t) flush_list; @@ -833,7 +831,7 @@ records with the same prefix should be indexed in the hash index */ - /* The following 4 fields are protected by btr_search_latch: */ + /* The following 6 fields are protected by btr_search_latch: */ ibool is_hashed; /* TRUE if hash index has already been built on this page; note that it does @@ -850,6 +848,12 @@ ulint curr_side; /* BTR_SEARCH_LEFT_SIDE or BTR_SEARCH_RIGHT_SIDE in hash indexing */ + dict_index_t* index; /* Index for which the adaptive + hash index has been created. + This field may only be modified + while holding an s-latch or x-latch + on block->lock and an x-latch on + btr_search_latch. */ /* 6. Debug fields */ #ifdef UNIV_SYNC_DEBUG rw_lock_t debug_latch; /* in the debug version, each thread