Target architecture will involve two NDB clusters with circular replication, however even before setting up replication I see those messages on `mysqld` nodes of a single cluster being reported every minute: ``` # journalctl -f -u mysqld 2023-11-03T13:48:35.293539Z 0 [System] [MY-010866] [NDB] Metadata: Failed to submit table 'mysql.ndb_apply_status' for synchronization ... ``` I've tried searching for some more detailed description of this error in the documentation, but haven't found anything beyond: https://dev.mysql.com/doc/mysql-errors/8.0/en/server-error-reference.html#error_er_ndb_log_entry_with_prefix So I've downloaded the source code and found that this message can only be generated by: ``` mysql-server-mysql-cluster-8.0.35 $ grep -rn 'Failed to submit table ' * storage/ndb/plugin/ndb_metadata_change_monitor.cc:301: log_info("Failed to submit table '%s.%s' for synchronization", storage/ndb/plugin/ndb_metadata_change_monitor.cc:317: log_info("Failed to submit table '%s.%s' for synchronization", storage/ndb/plugin/ndb_metadata_change_monitor.cc:331: log_info("Failed to submit table '%s.%s' for synchronization", storage/ndb/plugin/ndb_metadata_change_monitor.cc:345: log_info("Failed to submit table '%s.%s' for synchronization", ``` ``` bool Ndb_metadata_change_monitor::detect_table_changes_in_schema( THD *thd, const Thd_ndb *thd_ndb, const std::string &schema_name) const { NdbDictionary::Dictionary *dict = thd_ndb->ndb->getDictionary(); std::unordered_set ndb_tables_in_NDB; // Fetch list of tables in NDB if (!ndb_get_table_names_in_schema(dict, schema_name, &ndb_tables_in_NDB)) { log_NDB_error(dict->getNdbError()); log_info("Failed to get list of tables in schema '%s' from NDB", schema_name.c_str()); return false; } // Lock the schema in DD Ndb_dd_client dd_client(thd); if (!dd_client.mdl_lock_schema(schema_name.c_str())) { log_and_clear_thd_conditions(thd, condition_logging_level::INFO); log_info("Failed to MDL lock schema '%s'", schema_name.c_str()); return false; } // Fetch list of tables in DD, also acquire MDL lock on the tables std::unordered_set ndb_tables_in_DD; std::unordered_set local_tables_in_DD; if (!dd_client.get_table_names_in_schema( schema_name.c_str(), &ndb_tables_in_DD, &local_tables_in_DD)) { log_and_clear_thd_conditions(thd, condition_logging_level::INFO); log_info("Failed to get list of tables in schema '%s' from DD", schema_name.c_str()); return false; } // Special case when all NDB tables belonging to a schema still exist in DD // but not in NDB if (ndb_tables_in_NDB.empty() && !ndb_tables_in_DD.empty()) { for (const auto &ndb_table_name : ndb_tables_in_DD) { // Exists in DD but not in NDB if (ndbcluster_binlog_check_table_async(schema_name, ndb_table_name)) { increment_metadata_detected_count(); } else { log_info("Failed to submit table '%s.%s' for synchronization", schema_name.c_str(), ndb_table_name.c_str()); } } return true; } // Special case when all tables belonging to a schema still exist in NDB but // not in DD (as either NDB or shadow tables) if (!ndb_tables_in_NDB.empty() && ndb_tables_in_DD.empty() && local_tables_in_DD.empty()) { for (const auto &ndb_table_name : ndb_tables_in_NDB) { // Exists in NDB but not in DD if (ndbcluster_binlog_check_table_async(schema_name, ndb_table_name)) { increment_metadata_detected_count(); } else { log_info("Failed to submit table '%s.%s' for synchronization", schema_name.c_str(), ndb_table_name.c_str()); } } return true; } for (const auto &ndb_table_name : ndb_tables_in_NDB) { if (ndb_tables_in_DD.find(ndb_table_name) == ndb_tables_in_DD.end() && local_tables_in_DD.find(ndb_table_name) == local_tables_in_DD.end()) { // Exists in NDB but not in DD if (ndbcluster_binlog_check_table_async(schema_name, ndb_table_name)) { increment_metadata_detected_count(); } else { log_info("Failed to submit table '%s.%s' for synchronization", schema_name.c_str(), ndb_table_name.c_str()); } } else { // Exists in both NDB and DD ndb_tables_in_DD.erase(ndb_table_name); } } for (const auto &ndb_table_name : ndb_tables_in_DD) { // Exists in DD but not in NDB if (ndbcluster_binlog_check_table_async(schema_name, ndb_table_name)) { increment_metadata_detected_count(); } else { log_info("Failed to submit table '%s.%s' for synchronization", schema_name.c_str(), ndb_table_name.c_str()); } } return true; } ``` Notice that all lines logging 'Failed to submit table ' are triggered when `ndbcluster_binlog_check_table_async` returns `false`. But when you take a look at the implementation, it will always explicitly return `false` for `ndb_apply_status` table: mysql-server-mysql-cluster-8.0.35/storage/ndb/plugin/ha_ndbcluster_binlog.cc:5652 ``` bool ndbcluster_binlog_check_table_async(const std::string &db_name, const std::string &table_name) { if (db_name.empty()) { ndb_log_error("Database name of object to be synchronized not set"); return false; } if (table_name.empty()) { ndb_log_error("Table name of object to be synchronized not set"); return false; } if (db_name == Ndb_apply_status_table::DB_NAME && table_name == Ndb_apply_status_table::TABLE_NAME) { // Never check util tables which are managed by the Ndb_binlog_thread // NOTE! The other tables are filtered elsewhere but ndb_apply_status is // special since it's not hidden. return false; } return ndb_binlog_thread.add_table_to_check(db_name, table_name); } ``` So it would seem that `Ndb_metadata_change_monitor` somehow detects that `ndb_apply_status` table is present in NDB, but not in DD (data dictionary), or the other way around. As far as I can tell, this should not be the case - the table is present in NDB, and if I understand correctly, DD can be checked using `information_schema`, and `ndb_apply_status` table is present in `information_schema.tables`: ``` mysql> select * from information_schema.tables where TABLE_NAME = 'ndb_apply_status'\G *************************** 1. row *************************** TABLE_CATALOG: def TABLE_SCHEMA: mysql TABLE_NAME: ndb_apply_status TABLE_TYPE: BASE TABLE ENGINE: ndbcluster VERSION: 10 ROW_FORMAT: Dynamic TABLE_ROWS: 2 AVG_ROW_LENGTH: 0 DATA_LENGTH: 0 MAX_DATA_LENGTH: 0 INDEX_LENGTH: 0 DATA_FREE: 0 AUTO_INCREMENT: NULL CREATE_TIME: 2023-11-02 17:03:50 UPDATE_TIME: NULL CHECK_TIME: NULL TABLE_COLLATION: latin1_swedish_ci CHECKSUM: NULL CREATE_OPTIONS: TABLE_COMMENT: 1 row in set (0.00 sec) mysql> show create table mysql.ndb_apply_status\G *************************** 1. row *************************** Table: ndb_apply_status Create Table: CREATE TABLE `ndb_apply_status` ( `server_id` int unsigned NOT NULL, `epoch` bigint unsigned NOT NULL, `log_name` varchar(255) NOT NULL, `start_pos` bigint unsigned NOT NULL, `end_pos` bigint unsigned NOT NULL, PRIMARY KEY (`server_id`) USING HASH ) ENGINE=ndbcluster DEFAULT CHARSET=latin1 1 row in set (0.00 sec) mysql> select * from mysql.ndb_apply_status; Empty set (0.00 sec) ``` Then `Ndb_metadata_change_monitor` invokes `ndbcluster_binlog_check_table_async` to synchronize it, but this fails by design, since `ndb_apply_status` is supposed to be managed internally by `Ndb_binlog_thread`, according to comments in source.