bug#57032 a01_patch.diff check cases in "hash index scan" --- mysql-test/suite/ndb/r/ndb_index_unique.result | 152 ++++++++++++++++++++++ mysql-test/suite/ndb/t/ndb_index_unique.test | 83 ++++++++++++ sql/ha_ndbcluster.cc | 12 - sql/ha_ndbcluster.h | 4 sql/ha_ndbcluster_cond.cc | 169 +++++++++++++++++++++---- sql/ha_ndbcluster_cond.h | 4 6 files changed, 388 insertions(+), 36 deletions(-) Index: mysql-test/suite/ndb/r/ndb_index_unique.result =================================================================== --- mysql-test/suite/ndb/r/ndb_index_unique.result.orig +++ mysql-test/suite/ndb/r/ndb_index_unique.result @@ -737,3 +737,155 @@ drop table t1; alter tablespace ts1 drop datafile 'datafile.dat' engine=ndb; drop tablespace ts1 engine=ndb; drop logfile group lg1 engine=ndb; +# bug#57032 +create table t1 ( +a int not null, +b int, +primary key using hash (a), +unique key using hash (b) +) +engine ndb; +Warnings: +Warning 1121 Ndb does not support unique index on NULL valued attributes, index access with NULL value will become full table scan +insert into t1 values +(0,0),(1,1),(2,2),(3,3),(4,4), +(5,null),(6,null),(7,null),(8,null),(9,null); +set @old_ecpd = @@session.engine_condition_pushdown; +set engine_condition_pushdown = 0; +select a from t1 where b is not null order by a; +a +0 +1 +2 +3 +4 +select a from t1 where b is null order by a; +a +5 +6 +7 +8 +9 +set engine_condition_pushdown = 1; +select a from t1 where b is not null order by a; +a +0 +1 +2 +3 +4 +select a from t1 where b is null order by a; +a +5 +6 +7 +8 +9 +set engine_condition_pushdown = @old_ecpd; +drop table t1; +create table t1 ( +a int not null, +b int, +c int, +primary key using hash (a), +unique key using hash (b,c) +) +engine ndb; +Warnings: +Warning 1121 Ndb does not support unique index on NULL valued attributes, index access with NULL value will become full table scan +insert into t1 values +(0,0,0),(1,1,1),(2,2,1),(3,3,1),(4,4,2), +(5,null,0),(6,null,1),(7,null,1),(8,null,1),(9,null,2), +(10,0,null),(11,1,null),(12,1,null),(13,1,null),(14,2,null), +(15,null,null),(16,null,null),(17,null,null),(18,null,null),(19,null,null); +set @old_ecpd = @@session.engine_condition_pushdown; +set engine_condition_pushdown = 0; +select a from t1 where b is not null and c = 1 order by a; +a +1 +2 +3 +select a from t1 where b is null and c = 1 order by a; +a +6 +7 +8 +select a from t1 where b = 1 and c is null order by a; +a +11 +12 +13 +select a from t1 where b is null and c is null order by a; +a +15 +16 +17 +18 +19 +select a from t1 where b is not null and c is null order by a; +a +10 +11 +12 +13 +14 +select a from t1 where b is null and c is not null order by a; +a +5 +6 +7 +8 +9 +select a from t1 where b is not null and c is not null order by a; +a +0 +1 +2 +3 +4 +set engine_condition_pushdown = 1; +select a from t1 where b is not null and c = 1 order by a; +a +1 +2 +3 +select a from t1 where b is null and c = 1 order by a; +a +6 +7 +8 +select a from t1 where b = 1 and c is null order by a; +a +11 +12 +13 +select a from t1 where b is null and c is null order by a; +a +15 +16 +17 +18 +19 +select a from t1 where b is not null and c is null order by a; +a +10 +11 +12 +13 +14 +select a from t1 where b is null and c is not null order by a; +a +5 +6 +7 +8 +9 +select a from t1 where b is not null and c is not null order by a; +a +0 +1 +2 +3 +4 +set engine_condition_pushdown = @old_ecpd; +drop table t1; Index: mysql-test/suite/ndb/t/ndb_index_unique.test =================================================================== --- mysql-test/suite/ndb/t/ndb_index_unique.test.orig +++ mysql-test/suite/ndb/t/ndb_index_unique.test @@ -433,4 +433,85 @@ alter tablespace ts1 drop datafile 'data drop tablespace ts1 engine=ndb; drop logfile group lg1 engine=ndb; -# end of tests \ No newline at end of file +# bug#57032 'NOT NULL' evaluation is incorrect when using an 'unique index + +--echo # bug#57032 + +create table t1 ( + a int not null, + b int, + primary key using hash (a), + unique key using hash (b) +) +engine ndb; + +insert into t1 values + (0,0),(1,1),(2,2),(3,3),(4,4), + (5,null),(6,null),(7,null),(8,null),(9,null); + +set @old_ecpd = @@session.engine_condition_pushdown; + +set engine_condition_pushdown = 0; +# failed, empty result +select a from t1 where b is not null order by a; +# worked +select a from t1 where b is null order by a; + +set engine_condition_pushdown = 1; +# failed, empty result +select a from t1 where b is not null order by a; +# worked +select a from t1 where b is null order by a; + +set engine_condition_pushdown = @old_ecpd; + +drop table t1; + +create table t1 ( + a int not null, + b int, + c int, + primary key using hash (a), + unique key using hash (b,c) +) +engine ndb; + +insert into t1 values + (0,0,0),(1,1,1),(2,2,1),(3,3,1),(4,4,2), + (5,null,0),(6,null,1),(7,null,1),(8,null,1),(9,null,2), + (10,0,null),(11,1,null),(12,1,null),(13,1,null),(14,2,null), + (15,null,null),(16,null,null),(17,null,null),(18,null,null),(19,null,null); + +set @old_ecpd = @@session.engine_condition_pushdown; + +set engine_condition_pushdown = 0; +# worked +select a from t1 where b is not null and c = 1 order by a; +# failed, empty result +select a from t1 where b is null and c = 1 order by a; +# failed, empty result +select a from t1 where b = 1 and c is null order by a; +# worked +select a from t1 where b is null and c is null order by a; +select a from t1 where b is not null and c is null order by a; +select a from t1 where b is null and c is not null order by a; +select a from t1 where b is not null and c is not null order by a; + +set engine_condition_pushdown = 1; +# worked +select a from t1 where b is not null and c = 1 order by a; +# failed, empty result +select a from t1 where b is null and c = 1 order by a; +# failed, empty result +select a from t1 where b = 1 and c is null order by a; +# worked +select a from t1 where b is null and c is null order by a; +select a from t1 where b is not null and c is null order by a; +select a from t1 where b is null and c is not null order by a; +select a from t1 where b is not null and c is not null order by a; + +set engine_condition_pushdown = @old_ecpd; + +drop table t1; + +# end of tests Index: sql/ha_ndbcluster.cc =================================================================== --- sql/ha_ndbcluster.cc.orig +++ sql/ha_ndbcluster.cc @@ -3042,8 +3042,8 @@ guess_scan_flags(NdbOperation::LockMode */ int ha_ndbcluster::full_table_scan(const KEY* key_info, - const uchar *key, - uint key_len, + const key_range *start_key, + const key_range *end_key, uchar *buf) { int error; @@ -3143,7 +3143,7 @@ int ha_ndbcluster::full_table_scan(const my_errno= HA_ERR_OUT_OF_MEM; DBUG_RETURN(my_errno); } - if (m_cond->generate_scan_filter_from_key(&code, &options, key_info, key, key_len, buf)) + if (m_cond->generate_scan_filter_from_key(&code, &options, key_info, start_key, end_key, buf)) ERR_RETURN(code.getNdbError()); } @@ -4909,8 +4909,8 @@ int ha_ndbcluster::read_range_first_to_b } else if (type == UNIQUE_INDEX) DBUG_RETURN(full_table_scan(key_info, - start_key->key, - start_key->length, + start_key, + end_key, buf)); break; default: @@ -5010,7 +5010,7 @@ int ha_ndbcluster::rnd_next(uchar *buf) ha_statistic_increment(&SSV::ha_read_rnd_next_count); if (!m_active_cursor) - DBUG_RETURN(full_table_scan(NULL, NULL, 0, buf)); + DBUG_RETURN(full_table_scan(NULL, NULL, NULL, buf)); DBUG_RETURN(next_result(buf)); } Index: sql/ha_ndbcluster.h =================================================================== --- sql/ha_ndbcluster.h.orig +++ sql/ha_ndbcluster.h @@ -655,8 +655,8 @@ private: int unique_index_read(const uchar *key, uint key_len, uchar *buf); int full_table_scan(const KEY* key_info, - const uchar *key, - uint key_len, + const key_range *start_key, + const key_range *end_key, uchar *buf); int flush_bulk_insert(bool allow_batch= FALSE); int ndb_write_row(uchar *record, bool primary_key_update, Index: sql/ha_ndbcluster_cond.cc =================================================================== --- sql/ha_ndbcluster_cond.cc.orig +++ sql/ha_ndbcluster_cond.cc @@ -1431,44 +1431,163 @@ ha_ndbcluster_cond::generate_scan_filter } +/* + Optimizer sometimes does hash index lookup of a key where some + key parts are null. The set of cases where this happens makes + no sense but cannot be ignored since optimizer may expect the result + to be filtered accordingly. The scan is actually on the table and + the index bounds are pushed down. +*/ int ha_ndbcluster_cond::generate_scan_filter_from_key(NdbInterpretedCode* code, NdbScanOperation::ScanOptions* options, const KEY* key_info, - const uchar *key, - uint key_len, + const key_range *start_key, + const key_range *end_key, uchar *buf) { - KEY_PART_INFO* key_part= key_info->key_part; - KEY_PART_INFO* end= key_part+key_info->key_parts; - NdbScanFilter filter(code); - int res; DBUG_ENTER("generate_scan_filter_from_key"); +#ifndef DBUG_OFF + { + DBUG_PRINT("info", ("key parts:%u length:%u", + key_info->key_parts, key_info->key_length)); + const key_range* keylist[2]={ start_key, end_key }; + for (uint j=0; j <= 1; j++) + { + char buf[8192]; + const key_range* key=keylist[j]; + if (key == 0) + { + sprintf(buf, "key range %u: none", j); + } + else + { + sprintf(buf, "key range %u: flag:%u part", j, key->flag); + const KEY_PART_INFO* key_part=key_info->key_part; + const uchar* ptr=key->key; + for (uint i=0; i < key_info->key_parts; i++) + { + sprintf(buf+strlen(buf), " %u:", i); + for (uint k=0; k < key_part->store_length; k++) + { + sprintf(buf+strlen(buf), " %02x", ptr[k]); + } + ptr+=key_part->store_length; + if (ptr - key->key >= key->length) + { + /* + key_range has no count of parts so must test byte length. + But this is not the place for following assert. + */ + // DBUG_ASSERT(ptr - key->key == key->length); + break; + } + key_part++; + } + } + DBUG_PRINT("info", ("%s", buf)); + } + } +#endif + + NdbScanFilter filter(code); + int res; filter.begin(NdbScanFilter::AND); - for (; key_part != end; key_part++) + do { - Field* field= key_part->field; - uint32 pack_len= field->pack_length(); - const uchar* ptr= key; - DBUG_PRINT("info", ("Filtering value for %s", field->field_name)); - DBUG_DUMP("key", ptr, pack_len); - if (key_part->null_bit) + /* + Case "x is not null". + Seen with index(x) where it becomes range "null < x". + Not seen with index(x,y) for any combination of bounds + which include "is not null". + */ + if (start_key != 0 && + start_key->flag == HA_READ_AFTER_KEY && + end_key == 0 && + key_info->key_parts == 1) { - DBUG_PRINT("info", ("Generating ISNULL filter")); - if (filter.isnull(key_part->fieldnr-1) == -1) - DBUG_RETURN(1); + const KEY_PART_INFO* key_part=key_info->key_part; + if (key_part->null_bit != 0) // nullable (must be) + { + const Field* field=key_part->field; + const uchar* ptr= start_key->key; + if (ptr[0] != 0) // null (in "null < x") + { + DBUG_PRINT("info", ("Generating ISNOTNULL filter for nullable %s", + field->field_name)); + if (filter.isnotnull(key_part->fieldnr-1) == -1) + DBUG_RETURN(1); + break; + } + } } - else + + /* + Case "x is null" in an EQ range. + Seen with index(x) for "x is null". + Seen with index(x,y) for "x is null and y = 1". + Not seen with index(x,y) for "x is null and y is null". + Seen only when all key parts are present (but there is + no reason to limit the code to this case). + */ + if (start_key != 0 && + start_key->flag == HA_READ_KEY_EXACT && + end_key != 0 && + end_key->flag == HA_READ_AFTER_KEY && + start_key->length == end_key->length && + memcmp(start_key->key, end_key->key, start_key->length) == 0) { - DBUG_PRINT("info", ("Generating EQ filter")); - if (filter.cmp(NdbScanFilter::COND_EQ, - key_part->fieldnr-1, - ptr, - pack_len) == -1) - DBUG_RETURN(1); + const KEY_PART_INFO* key_part=key_info->key_part; + const uchar* ptr=start_key->key; + for (uint i=0; i < key_info->key_parts; i++) + { + const Field* field=key_part->field; + if (key_part->null_bit) // nullable + { + if (ptr[0] != 0) // null + { + DBUG_PRINT("info", ("Generating ISNULL filter for nullable %s", + field->field_name)); + if (filter.isnull(key_part->fieldnr-1) == -1) + DBUG_RETURN(1); + } + else + { + DBUG_PRINT("info", ("Generating EQ filter for nullable %s", + field->field_name)); + if (filter.cmp(NdbScanFilter::COND_EQ, + key_part->fieldnr-1, + ptr + 1, // skip null-indicator byte + field->pack_length()) == -1) + DBUG_RETURN(1); + } + } + else + { + DBUG_PRINT("info", ("Generating EQ filter for non-nullable %s", + field->field_name)); + if (filter.cmp(NdbScanFilter::COND_EQ, + key_part->fieldnr-1, + ptr, + field->pack_length()) == -1) + DBUG_RETURN(1); + } + ptr+=key_part->store_length; + if (ptr - start_key->key >= start_key->length) + { + break; + } + key_part++; + } + break; } - key += key_part->store_length; - } + + DBUG_PRINT("info", ("Unknown hash index scan")); + // enable to catch new cases when optimizer changes + // DBUG_ASSERT(false); + } + while (0); + // Add any pushed condition if (m_cond_stack && (res= generate_scan_filter_from_cond(filter))) Index: sql/ha_ndbcluster_cond.h =================================================================== --- sql/ha_ndbcluster_cond.h.orig +++ sql/ha_ndbcluster_cond.h @@ -661,8 +661,8 @@ public: int generate_scan_filter_from_key(NdbInterpretedCode* code, NdbScanOperation::ScanOptions* options, const KEY* key_info, - const uchar *key, - uint key_len, + const key_range *start_key, + const key_range *end_key, uchar *buf); private: bool serialize_cond(const COND *cond, Ndb_cond_stack *ndb_cond,