-- Setup 3 node cluster hod03 ================= rm -rf data/ mkdir data bin/mysqld --defaults-file=./89194.cnf --initialize-insecure --basedir=$PWD --datadir=$PWD/data/ bin/mysqld --defaults-file=./89194.cnf --log-error=hod03.err --core-file 2>&1 & hod04 ================= rm -rf data/ mkdir data bin/mysqld --defaults-file=./89194.cnf --initialize-insecure --basedir=$PWD --datadir=$PWD/data/ bin/mysqld --defaults-file=./89194.cnf --log-error=hod04.err --core-file 2>&1 & hod06 ================= rm -rf data/ mkdir data bin/mysqld --defaults-file=./89194.cnf --initialize-insecure --basedir=$PWD --datadir=$PWD/data/ bin/mysqld --defaults-file=./89194.cnf --log-error=hod06.err --core-file 2>&1 & ################# Configure, Start GR - hod03(node 1) CREATE USER 'rpl_user'@'%' IDENTIFIED BY 'rpl_pass'; GRANT REPLICATION SLAVE ON *.* TO rpl_user@'%'; SHOW GLOBAL VARIABLES LIKE 'group_replication%'; SET GLOBAL group_replication_bootstrap_group=1; CHANGE MASTER TO MASTER_USER='rpl_user', MASTER_PASSWORD='rpl_pass' FOR CHANNEL 'group_replication_recovery'; START GROUP_REPLICATION; SET GLOBAL group_replication_bootstrap_group=0; SELECT * FROM performance_schema.replication_connection_status\G SELECT * FROM performance_schema.replication_group_member_stats\G SELECT * FROM performance_schema.replication_group_members; - hod04/hod06(node 2 and 3) SHOW GLOBAL VARIABLES LIKE 'group_replication%'; CHANGE MASTER TO MASTER_USER='rpl_user', MASTER_PASSWORD='rpl_pass' FOR CHANNEL 'group_replication_recovery'; START GROUP_REPLICATION; SELECT * FROM performance_schema.replication_connection_status\G SELECT * FROM performance_schema.replication_group_member_stats\G SELECT * FROM performance_schema.replication_group_members; -- hod03 hod03> SELECT * FROM performance_schema.replication_group_members; +---------------------------+--------------------------------------+-------------+-------------+--------------+ | CHANNEL_NAME | MEMBER_ID | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | +---------------------------+--------------------------------------+-------------+-------------+--------------+ | group_replication_applier | 634792ec-f9c9-11e7-b622-0010e05f3e06 | hod03 | 3306 | ONLINE | | group_replication_applier | 6989a9a5-f9c9-11e7-ac54-0010e05f4178 | hod04 | 3306 | ONLINE | | group_replication_applier | 6f6fabae-f9c9-11e7-8927-0010e0734b98 | hod06 | 3306 | ONLINE | +---------------------------+--------------------------------------+-------------+-------------+--------------+ 3 rows in set (0.00 sec) -- hod04 hod04> SELECT * FROM performance_schema.replication_group_members; +---------------------------+--------------------------------------+-------------+-------------+--------------+ | CHANNEL_NAME | MEMBER_ID | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | +---------------------------+--------------------------------------+-------------+-------------+--------------+ | group_replication_applier | 634792ec-f9c9-11e7-b622-0010e05f3e06 | hod03 | 3306 | ONLINE | | group_replication_applier | 6989a9a5-f9c9-11e7-ac54-0010e05f4178 | hod04 | 3306 | ONLINE | | group_replication_applier | 6f6fabae-f9c9-11e7-8927-0010e0734b98 | hod06 | 3306 | ONLINE | +---------------------------+--------------------------------------+-------------+-------------+--------------+ 3 rows in set (0.00 sec) -- hod06 hod06> SELECT * FROM performance_schema.replication_group_members; +---------------------------+--------------------------------------+-------------+-------------+--------------+ | CHANNEL_NAME | MEMBER_ID | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | +---------------------------+--------------------------------------+-------------+-------------+--------------+ | group_replication_applier | 634792ec-f9c9-11e7-b622-0010e05f3e06 | hod03 | 3306 | ONLINE | | group_replication_applier | 6989a9a5-f9c9-11e7-ac54-0010e05f4178 | hod04 | 3306 | ONLINE | | group_replication_applier | 6f6fabae-f9c9-11e7-8927-0010e0734b98 | hod06 | 3306 | ONLINE | +---------------------------+--------------------------------------+-------------+-------------+--------------+ 3 rows in set (0.00 sec) ## hod03 ## Run the following as initialization on one member of the group: CREATE DATABASE test_jfg_ws; CREATE TABLE test_jfg_ws.test_jfg_ws ( id int(10) unsigned NOT NULL AUTO_INCREMENT PRIMARY KEY,str varchar(80) NOT NULL UNIQUE); CREATE TABLE test_jfg_ws.test_jfg_ws2 (id INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,str VARCHAR(80) NOT NULL); hod03> CREATE DATABASE test_jfg_ws; Query OK, 1 row affected (0.00 sec) hod03>CREATE TABLE test_jfg_ws.test_jfg_ws ( id int(10) unsigned NOT NULL AUTO_INCREMENT PRIMARY KEY,str varchar(80) NOT NULL UNIQUE); Query OK, 0 rows affected (0.01 sec) hod03>CREATE TABLE test_jfg_ws.test_jfg_ws2 (id INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,str VARCHAR(80) NOT NULL); Query OK, 0 rows affected (0.00 sec) To trigger the false-positive certification (which leads to data inconsistency and group breakage), you can create a table with many rows (one million in my case) by running the following commands in a Linux shell on one member of the group: hod03>ALTER TABLE test_jfg_ws.test_jfg_ws2 MODIFY COLUMN str VARCHAR(60); Query OK, 2000000 rows affected (13.66 sec) Records: 2000000 Duplicates: 0 Warnings: 0 -- on hod03 [umshastr@hod03]/export/umesh/server/binaries/GABuilds/mysql-5.7.20: sql="INSERT INTO test_jfg_ws.test_jfg_ws2 (str) VALUES (RAND());" [umshastr@hod03]/export/umesh/server/binaries/GABuilds/mysql-5.7.20: for i in $(seq 100); do > ( echo "BEGIN;"; yes "$sql" | head -n 20000; echo "COMMIT;"; ) | bin/mysql -uroot -S /tmp/mysql_hod03.sock; > done [umshastr@hod03]/export/umesh/server/binaries/GABuilds/mysql-5.7.20: ## The following ALTER needs to take more than two seconds so below has a chance to trigger the false-positive certification. If it does not take more than two seconds, you can add more rows in the table by running the above loop more times. -- again on hod03 hod03>ALTER TABLE test_jfg_ws.test_jfg_ws2 MODIFY COLUMN str VARCHAR(60); Query OK, 2000000 rows affected (13.66 sec) Records: 2000000 Duplicates: 0 Warnings: 0 ## Then, with the help of above table, trigger a false-positive certification by running the two below script in the Linux shell of two different members of the group: -- hod03 ### On one member, run this in a Linux shell: # while sleep 1; do > bin/mysql -uroot -S /tmp/mysql_hod03.sock <<< "SHOW PROCESSLIST" | grep -q "test_jfg_ws.test_jfg_ws2" || continue; > bin/mysql -uroot -S /tmp/mysql_hod03.sock <<< "INSERT INTO test_jfg_ws.test_jfg_ws (str) VALUES ('B')"; > break; > done ^^ running -- hod06 ### On a different member, run this in a Linux shell: [umshastr@hod06]/export/home/ushastry/mysql-5.7.20: bin/mysql -uroot -S /tmp/mysql_hod06.sock <<< " > ALTER TABLE test_jfg_ws.test_jfg_ws2 MODIFY COLUMN str VARCHAR(80); > INSERT INTO test_jfg_ws.test_jfg_ws (str) VALUES ('b');" [umshastr@hod06]/export/home/ushastry/mysql-5.7.20: -- After sometime observed [umshastr@hod03]/export/umesh/server/binaries/GABuilds/mysql-5.7.20: while sleep 1; do > bin/mysql -uroot -S /tmp/mysql_hod03.sock <<< "SHOW PROCESSLIST" | grep -q "test_jfg_ws.test_jfg_ws2" || continue; > bin/mysql -uroot -S /tmp/mysql_hod03.sock <<< "INSERT INTO test_jfg_ws.test_jfg_ws (str) VALUES ('B')"; > break; > done ERROR 1062 (23000) at line 1: Duplicate entry 'B' for key 'str' -- hod03 hod03>SELECT * FROM performance_schema.replication_group_members; +---------------------------+--------------------------------------+-------------+-------------+--------------+ | CHANNEL_NAME | MEMBER_ID | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | +---------------------------+--------------------------------------+-------------+-------------+--------------+ | group_replication_applier | 634792ec-f9c9-11e7-b622-0010e05f3e06 | hod03 | 3306 | ERROR | +---------------------------+--------------------------------------+-------------+-------------+--------------+ 1 row in set (0.00 sec) hod03>SELECT * FROM test_jfg_ws.test_jfg_ws; +----+-----+ | id | str | +----+-----+ | 7 | B | +----+-----+ 1 row in set (0.00 sec) hod03>SELECT * FROM performance_schema.replication_applier_status_by_worker WHERE CHANNEL_NAME = 'group_replication_applier'\G *************************** 1. row *************************** CHANNEL_NAME: group_replication_applier WORKER_ID: 0 THREAD_ID: NULL SERVICE_STATE: OFF LAST_SEEN_TRANSACTION: 9d7f8c28-c02c-11e6-9829-08002715584a:1000010 LAST_ERROR_NUMBER: 1062 LAST_ERROR_MESSAGE: Could not execute Write_rows event on table test_jfg_ws.test_jfg_ws; Duplicate entry 'b' for key 'str', Error_code: 1062; handler error HA_ERR_FOUND_DUPP_KEY; the event's master log FIRST, end_log_pos 241 LAST_ERROR_TIMESTAMP: 2018-01-15 09:58:54 1 row in set (0.00 sec) -extract from error log 2018-01-15T08:58:54.445935Z 8 [ERROR] Slave SQL for channel 'group_replication_applier': Could not execute Write_rows event on table test_jfg_ws.test_jfg_ws; Duplicate entry 'b' for key 'str', Error_code: 1062; handler error HA_ERR_FOUND_DUPP_KEY; the event's master log FIRST, end_log_pos 241, Error_code: 1062 2018-01-15T08:58:54.445953Z 8 [Warning] Slave: Duplicate entry 'b' for key 'str' Error_code: 1062 2018-01-15T08:58:54.445957Z 8 [ERROR] Error running query, slave SQL thread aborted. Fix the problem, and restart the slave SQL thread with "SLAVE START". We stopped at log 'FIRST' position 144 2018-01-15T08:58:54.445968Z 8 [ERROR] Plugin group_replication reported: 'The applier thread execution was aborted. Unable to process more transactions, this member will now leave the group.' 2018-01-15T08:58:54.446032Z 5 [ERROR] Plugin group_replication reported: 'Fatal error during execution on the Applier process of Group Replication. The server will now leave the group.' 2018-01-15T08:58:54.446130Z 5 [ERROR] Plugin group_replication reported: 'The server was automatically set into read only mode after an error was detected.' 2018-01-15T08:58:54.446492Z 0 [Note] Plugin group_replication reported: 'getstart group_id 1b10d81' 2018-01-15T08:58:54.500648Z 5 [Note] Plugin group_replication reported: 'The group replication applier thread was killed' 2018-01-15T08:58:54.752182Z 0 [Note] Plugin group_replication reported: 'getstart group_id 1b10d81' 2018-01-15T08:58:57.752479Z 0 [Note] Plugin group_replication reported: 'state 4410 action xa_terminate' 2018-01-15T08:58:57.780512Z 0 [Note] Plugin group_replication reported: 'new state x_start' 2018-01-15T08:58:57.780531Z 0 [Note] Plugin group_replication reported: 'state 4337 action xa_exit' 2018-01-15T08:58:57.808468Z 0 [Note] Plugin group_replication reported: 'Exiting xcom thread' 2018-01-15T08:58:57.808483Z 0 [Note] Plugin group_replication reported: 'new state x_start' 2018-01-15T08:58:57.825845Z 0 [Note] Plugin group_replication reported: 'Group membership changed: This member has left the group.' [umshastr@hod03]/export/umesh/server/binaries/GABuilds/mysql-5.7.20: -- hod04 hod04>SELECT * FROM test_jfg_ws.test_jfg_ws; +----+-----+ | id | str | +----+-----+ | 2 | b | +----+-----+ 1 row in set (0.00 sec) hod04> SELECT * FROM performance_schema.replication_group_members; +---------------------------+--------------------------------------+-------------+-------------+--------------+ | CHANNEL_NAME | MEMBER_ID | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | +---------------------------+--------------------------------------+-------------+-------------+--------------+ | group_replication_applier | 6989a9a5-f9c9-11e7-ac54-0010e05f4178 | hod04 | 3306 | ERROR | +---------------------------+--------------------------------------+-------------+-------------+--------------+ 1 row in set (0.00 sec) hod04>SELECT * FROM performance_schema.replication_applier_status_by_worker WHERE CHANNEL_NAME = 'group_replication_applier'\G *************************** 1. row *************************** CHANNEL_NAME: group_replication_applier WORKER_ID: 0 THREAD_ID: NULL SERVICE_STATE: OFF LAST_SEEN_TRANSACTION: 9d7f8c28-c02c-11e6-9829-08002715584a:109 LAST_ERROR_NUMBER: 1062 LAST_ERROR_MESSAGE: Could not execute Write_rows event on table test_jfg_ws.test_jfg_ws; Duplicate entry 'B' for key 'str', Error_code: 1062; handler error HA_ERR_FOUND_DUPP_KEY; the event's master log FIRST, end_log_pos 241 LAST_ERROR_TIMESTAMP: 2018-01-15 09:58:54 1 row in set (0.00 sec) -extract from error log 2018-01-15T08:58:54.446602Z 0 [Note] Plugin group_replication reported: 'getstart group_id 1b10d81' 2018-01-15T08:58:54.751806Z 8 [ERROR] Slave SQL for channel 'group_replication_applier': Could not execute Write_rows event on table test_jfg_ws.test_jfg_ws; Duplicate entry 'B' for key 'str', Error_code: 1062; handler error HA_ERR_FOUND_DUPP_KEY; the event's master log FIRST, end_log_pos 241, Error_code: 1062 2018-01-15T08:58:54.751825Z 8 [Warning] Slave: Duplicate entry 'B' for key 'str' Error_code: 1062 2018-01-15T08:58:54.751829Z 8 [ERROR] Error running query, slave SQL thread aborted. Fix the problem, and restart the slave SQL thread with "SLAVE START". We stopped at log 'FIRST' position 268 2018-01-15T08:58:54.751840Z 8 [ERROR] Plugin group_replication reported: 'The applier thread execution was aborted. Unable to process more transactions, this member will now leave the group.' 2018-01-15T08:58:54.751875Z 5 [ERROR] Plugin group_replication reported: 'Fatal error during execution on the Applier process of Group Replication. The server will now leave the group.' 2018-01-15T08:58:54.751938Z 5 [ERROR] Plugin group_replication reported: 'The server was automatically set into read only mode after an error was detected.' 2018-01-15T08:58:54.752036Z 0 [Note] Plugin group_replication reported: 'getstart group_id 1b10d81' 2018-01-15T08:58:54.752305Z 5 [Note] Plugin group_replication reported: 'The group replication applier thread was killed' 2018-01-15T08:58:58.438548Z 0 [Note] Plugin group_replication reported: 'state 4410 action xa_terminate' 2018-01-15T08:58:58.466679Z 0 [Note] Plugin group_replication reported: 'new state x_start' 2018-01-15T08:58:58.466700Z 0 [Note] Plugin group_replication reported: 'state 4337 action xa_exit' 2018-01-15T08:58:58.494595Z 0 [Note] Plugin group_replication reported: 'Exiting xcom thread' 2018-01-15T08:58:58.494610Z 0 [Note] Plugin group_replication reported: 'new state x_start' 2018-01-15T08:58:58.511822Z 0 [Note] Plugin group_replication reported: 'Group membership changed: This member has left the group.' [umshastr@hod04]/export/home/ushastry/mysql-5.7.20: -- hod06 hod06>SELECT * FROM performance_schema.replication_group_members; +---------------------------+--------------------------------------+-------------+-------------+--------------+ | CHANNEL_NAME | MEMBER_ID | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | +---------------------------+--------------------------------------+-------------+-------------+--------------+ | group_replication_applier | 6f6fabae-f9c9-11e7-8927-0010e0734b98 | hod06 | 3306 | ERROR | +---------------------------+--------------------------------------+-------------+-------------+--------------+ 1 row in set (0.00 sec) hod06>SELECT * FROM test_jfg_ws.test_jfg_ws; +----+-----+ | id | str | +----+-----+ | 2 | b | +----+-----+ 1 row in set (0.00 sec) hod06>SELECT * FROM performance_schema.replication_applier_status_by_worker WHERE CHANNEL_NAME = 'group_replication_applier'\G *************************** 1. row *************************** CHANNEL_NAME: group_replication_applier WORKER_ID: 0 THREAD_ID: NULL SERVICE_STATE: OFF LAST_SEEN_TRANSACTION: 9d7f8c28-c02c-11e6-9829-08002715584a:109 LAST_ERROR_NUMBER: 1062 LAST_ERROR_MESSAGE: Could not execute Write_rows event on table test_jfg_ws.test_jfg_ws; Duplicate entry 'B' for key 'str', Error_code: 1062; handler error HA_ERR_FOUND_DUPP_KEY; the event's master log FIRST, end_log_pos 241 LAST_ERROR_TIMESTAMP: 2018-01-15 09:58:40 1 row in set (0.00 sec) -extract from error log 2018-01-15T07:56:24.625409Z 0 [Note] Plugin group_replication reported: 'This server was declared online within the replication group' 2018-01-15T08:58:40.551275Z 8 [ERROR] Slave SQL for channel 'group_replication_applier': Could not execute Write_rows event on table test_jfg_ws.test_jfg_ws; Duplicate entry 'B' for key 'str', Error_code: 1062; handler error HA_ERR_FOUND_DUPP_KEY; the event's master log FIRST, end_log_pos 241, Error_code: 1062 2018-01-15T08:58:40.551298Z 8 [Warning] Slave: Duplicate entry 'B' for key 'str' Error_code: 1062 2018-01-15T08:58:40.551302Z 8 [ERROR] Error running query, slave SQL thread aborted. Fix the problem, and restart the slave SQL thread with "SLAVE START". We stopped at log 'FIRST' position 96 2018-01-15T08:58:40.551311Z 8 [ERROR] Plugin group_replication reported: 'The applier thread execution was aborted. Unable to process more transactions, this member will now leave the group.' 2018-01-15T08:58:40.551361Z 5 [ERROR] Plugin group_replication reported: 'Fatal error during execution on the Applier process of Group Replication. The server will now leave the group.' 2018-01-15T08:58:40.551405Z 5 [ERROR] Plugin group_replication reported: 'The server was automatically set into read only mode after an error was detected.' 2018-01-15T08:58:40.551733Z 0 [Note] Plugin group_replication reported: 'getstart group_id 1b10d81' 2018-01-15T08:58:40.551738Z 5 [Note] Plugin group_replication reported: 'The group replication applier thread was killed' 2018-01-15T08:58:43.894481Z 0 [Note] Plugin group_replication reported: 'state 4410 action xa_terminate' 2018-01-15T08:58:43.922665Z 0 [Note] Plugin group_replication reported: 'new state x_start' 2018-01-15T08:58:43.922683Z 0 [Note] Plugin group_replication reported: 'state 4337 action xa_exit' 2018-01-15T08:58:43.950639Z 0 [Note] Plugin group_replication reported: 'Exiting xcom thread' 2018-01-15T08:58:43.950654Z 0 [Note] Plugin group_replication reported: 'new state x_start' 2018-01-15T08:58:43.967311Z 0 [Note] Plugin group_replication reported: 'Group membership changed: This member has left the group.' [umshastr@hod06]/export/home/ushastry/mysql-5.7.20: ############### ### Conf files used for tests #hod03 [mysqld] basedir = /export/umesh/server/binaries/GABuilds/mysql-5.7.20 datadir = /export/umesh/server/binaries/GABuilds/mysql-5.7.20/data/ plugin-load = group_replication.so port = 3306 socket = /tmp/mysql_hod03.sock log_bin disabled_storage_engines = MyISAM,BLACKHOLE,FEDERATED,ARCHIVE ssl skip_name_resolve server_id = 571706 binlog_format = ROW binlog_rows_query_log_events = ON gtid_mode = ON enforce_gtid_consistency = ON log_slave_updates = ON master_info_repository = TABLE relay_log_info_repository = TABLE relay_log_recovery = ON transaction_write_set_extraction = XXHASH64 binlog_checksum = NONE #group_replication = FORCE_PLUS_PERMANENT group_replication_start_on_boot = ON group_replication_group_name = 9d7f8c28-c02c-11e6-9829-08002715584a group_replication_local_address = hod03.no.oracle.com:6606 group_replication_group_seeds = hod03.no.oracle.com:6606,hod04.no.oracle.com:6607,hod06.no.oracle.com:6608 group_replication_single_primary_mode=TRUE group_replication_start_on_boot=FALSE #hod04 [mysqld] basedir = /export/home/ushastry/mysql-5.7.20 datadir = /export/home/ushastry/mysql-5.7.20/data/ plugin-load = group_replication.so port = 3306 log_bin socket = /tmp/mysql_hod04.sock disabled_storage_engines = MyISAM,BLACKHOLE,FEDERATED,ARCHIVE ssl skip_name_resolve server_id = 571707 binlog_format = ROW binlog_rows_query_log_events = ON gtid_mode = ON enforce_gtid_consistency = ON log_slave_updates = ON master_info_repository = TABLE relay_log_info_repository = TABLE relay_log_recovery = ON transaction_write_set_extraction = XXHASH64 binlog_checksum = NONE #group_replication = FORCE_PLUS_PERMANENT group_replication_start_on_boot = ON group_replication_group_name = 9d7f8c28-c02c-11e6-9829-08002715584a group_replication_local_address = hod04.no.oracle.com:6607 group_replication_group_seeds = hod03.no.oracle.com:6606,hod04.no.oracle.com:6607,hod06.no.oracle.com:6608 group_replication_single_primary_mode=TRUE group_replication_start_on_boot=FALSE #hod06 [mysqld] basedir = /export/home/ushastry/mysql-5.7.20 datadir = /export/home/ushastry/mysql-5.7.20/data/ plugin-load = group_replication.so port = 3306 log_bin socket = /tmp/mysql_hod06.sock disabled_storage_engines = MyISAM,BLACKHOLE,FEDERATED,ARCHIVE ssl skip_name_resolve server_id = 571708 binlog_format = ROW binlog_rows_query_log_events = ON gtid_mode = ON enforce_gtid_consistency = ON log_slave_updates = ON master_info_repository = TABLE relay_log_info_repository = TABLE relay_log_recovery = ON transaction_write_set_extraction = XXHASH64 binlog_checksum = NONE #group_replication = FORCE_PLUS_PERMANENT group_replication_start_on_boot = ON group_replication_group_name = 9d7f8c28-c02c-11e6-9829-08002715584a group_replication_local_address = hod06.no.oracle.com:6608 group_replication_group_seeds = hod03.no.oracle.com:6606,hod04.no.oracle.com:6607,hod06.no.oracle.com:6608 group_replication_single_primary_mode=TRUE group_replication_start_on_boot=FALSE ## 5.7.20 - Build used cat docs/INFO_SRC commit: 0441472e19e9a2e41df5d61098eb97c3e211547c date: 2017-09-13 17:44:44 +0200 build-date: 2017-09-13 17:48:35 +0200 short: 0441472 branch: mysql-5.7.20-release MySQL source 5.7.20 ## OS details # cat /etc/*release Oracle Linux Server release 7.1 NAME="Oracle Linux Server" VERSION="7.1" ID="ol" VERSION_ID="7.1" PRETTY_NAME="Oracle Linux Server 7.1" ANSI_COLOR="0;31" CPE_NAME="cpe:/o:oracle:linux:7:1" HOME_URL="https://linux.oracle.com/" BUG_REPORT_URL="https://bugzilla.oracle.com/" ORACLE_BUGZILLA_PRODUCT="Oracle Linux 7" ORACLE_BUGZILLA_PRODUCT_VERSION=7.1 ORACLE_SUPPORT_PRODUCT="Oracle Linux" ORACLE_SUPPORT_PRODUCT_VERSION=7.1 Red Hat Enterprise Linux Server release 7.1 (Maipo) Oracle Linux Server release 7.1