diff --git a/tests/functional/replication/test_blob_not_found_in_rw_replica_if_target_row_exists.py b/tests/functional/replication/test_blob_not_found_in_rw_replica_if_target_row_exists.py new file mode 100644 index 00000000..c1934898 --- /dev/null +++ b/tests/functional/replication/test_blob_not_found_in_rw_replica_if_target_row_exists.py @@ -0,0 +1,409 @@ +#coding:utf-8 +# +# id: tests.functional.replication.blob_not_found_in_rw_replica_if_target_row_exists +# title: Error "BLOB is not found" while replication converts INSERT into UPDATE for a conflicting record +# decription: +# See: https://github.com/FirebirdSQL/firebird/issues/7070 +# +# Test temporary changes mode of replica using external call: gfix -replica read_write ... +# Then we create table TEST(ID int, b blob) on master, without adding records in it, and WAIT +# until this table will appear on replica DB. +# Maximal waiting time is limited by variable MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG). +# Message starting with phrase 'POINT-1A ...' is printed when this table appears in replica DB. +# +# NB. Despite that we can create such table at the same time on replica, this ticket issue can be +# reproduced only when we make table on master and wait until it will be replicated on replica. +# +# After this we: +# * add one record with ID = 1 and non-empty blob into this table on REPLICA database and do commit. +# * add record with the same ID = 1 on MASTER database and do commit. +# +# Message "WARNING: Record being inserted into table TEST already exists, updating instead" must appear +# in the replication log at this point but after that message about successfully replicated segment must also be. +# Message 'ERROR: Blob ... is not found for table TEST' must NOT appear. +# If these conditions are met then script issues message starting with 'Point-1B ...' +# +# Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). +# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). +# +# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). +# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, +# thus metadata difference must not be issued. +# +# #################### +# ### CRUCIAL NOTE ### +# #################### +# Currently, 25.06.2021, there is bug in FB 4.x and 5.x which can be seen on SECOND run of this test: message with text +# "ERROR: Record format with length 68 is not found for table TEST" will appear in it after inserting 1st record in master. +# The reason of that is "dirty" pages that remain in RDB$RELATION_FIELDS both on master and replica after dropping table. +# Following query show different data that appear in replica DB on 1st and 2nd run (just after table was created on master): +# ======= +# set blobdisplay 6; +# select rdb$descriptor as fmt_descr +# from rdb$formats natural join rdb$relations where rdb$relation_name = 'TEST'; +# ======= +# This bug was explained by dimitr, see letters 25.06.2021 11:49 and 25.06.2021 16:56. +# It will be fixed later. +# +# The only workaround to solve this problem is to make SWEEP after all DB objects have been dropped. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# !NB! BOTH master and replica must be cleaned up by sweep! +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# ################ +# ### N O T E ### +# ################ +# Test assumes that master and replica DB have been created beforehand. +# Also, it assumes that %FB_HOME% +# eplication.conf has been prepared with apropriate parameters for replication. +# Particularly, name of directories and databases must have info about checked FB major version and ServerMode. +# * verbose = true // in order to find out line with message that required segment was replicated +# * section for master database with specified parameters: +# journal_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.journal" +# journal_archive_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.archive" +# journal_archive_command = "copy $(pathname) $(archivepathname)" +# journal_archive_timeout = 10 +# * section for replica database with specified parameter: +# journal_source_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.archive" +# +# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: +# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) +# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) +# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) +# +# These two databases must NOT be dropped in any of tests related to replication! +# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode +# and make cleanup after it, i.e. when all tests will be completed. +# +# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. +# +# Temporary comment. For debug purpoces: +# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); +# 2) copy file %fbt-repo% ests +# unctional abloidatches\\setup-fb-for-replication.bat.txt +# to some place and rename it "*.bat"; +# 3) open this .bat in editor and asjust value of 'fbt_repo' variable; +# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] +# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: +# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc +# 5) batch 'setup-fb-for-replication.bat' will: +# * stop selected FB instance +# * create test databases (in !fbt_repo! mp\\); +# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); +# * replace %fb_home% +# eplication.conf with apropriate +# * start selected FB instance +# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): +# %fpt_repo% +# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc +# +# Confirmed bug on 4.0.1.2682 and 5.0.0.338, got in the replication.log: +# ERROR: Blob 128.0 is not found for table TEST +# Checked on: +# 4.0.1.2691 SS/CS (32.3s/33.9s) +# 5.0.0.351 SS/CS (28.5s/35.3s) +# +# +# tracker_id: +# min_versions: ['4.0.1'] +# versions: 4.0.1 +# qmid: None + +import pytest +from firebird.qa import db_factory, python_act, Action + +# version: 4.0.1 +# resources: None + +substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR')] + +init_script_1 = """""" + +db_1 = db_factory(sql_dialect=3, init=init_script_1) + +# test_script_1 +#--- +# +# import os +# import subprocess +# import re +# import difflib +# import shutil +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# ##################################### +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65 +# ##################################### +# +# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) +# FB_HOME = svc.get_home_directory() +# svc.close() +# +# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float +# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' +# +# cur = db_conn.cursor() +# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") +# server_mode = 'XX' +# for r in cur: +# if r[0] == 'Super': +# server_mode = 'SS' +# elif r[0] == 'SuperClassic': +# server_mode = 'SC' +# elif r[0] == 'Classic': +# server_mode = 'CS' +# cur.close() +# +# # 'fbt-main.fb50.ss.fdb' etc: +# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) +# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') +# +# runProgram('gfix', ['-replica', 'read_write', 'localhost:' + db_repl]) +# +# # Folders for journalling and archieving segments. +# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) +# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for i in range(len( f_names_list )): +# if type(f_names_list[i]) == file: +# del_name = f_names_list[i].name +# elif type(f_names_list[i]) == str: +# del_name = f_names_list[i] +# else: +# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): +# +# global re +# global difflib +# global time +# +# replold_lines = [] +# with open( os.path.join(fb_home,'replication.log'), 'r') as f: +# replold_lines = f.readlines() +# +# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) +# cur = con.cursor() +# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") +# for r in cur: +# last_generated_repl_segment = r[0] +# cur.close() +# con.close() +# +# #print('last_generated_repl_segment:', last_generated_repl_segment) +# +# # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file +# p_expected=re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) +# p_unexpected = re.compile( '\\+\\s+ERROR: Blob .* not found', re.IGNORECASE) +# +# found_required_message, found_unexpected_message = False, False +# for i in range(0,max_allowed_time_for_wait): +# time.sleep(1) +# +# # Get content of fb_home replication.log _after_ isql finish: +# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') +# diff_data = difflib.unified_diff( +# replold_lines, +# f_repllog_new.readlines() +# ) +# f_repllog_new.close() +# +# for k,d in enumerate(diff_data): +# if p_unexpected.search(d): +# print( (prefix_msg + ' ' if prefix_msg else '') + 'UNEXPECTED message encountered:' ) +# print(d) +# found_unexpected_message = True +# break +# +# if p_expected.search(d): +# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' ) +# found_required_message = True +# break +# +# if found_required_message or found_unexpected_message: +# break +# +# if not found_required_message: +# print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait) +# +# +# #-------------------------------------------- +# +# sql_ddl = ''' set bail on; +# set list on; +# --set blob all; +# set blobdisplay 6; +# +# recreate table test(id int primary key using index test_pk, bindata blob); +# commit; +# +# -- for debug only: +# select rdb$get_context('SYSTEM', 'DB_NAME'), rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') as last_generated_repl_segment from rdb$database; +# select +# RDB$DESCRIPTOR as fmt_descr +# from RDB$FORMATS natural join RDB$RELATIONS +# where RDB$RELATION_NAME = 'TEST'; +# quit; +# ''' % locals() +# +# +# f_sql_ddl = open( os.path.join(context['temp_directory'],'tmp_gh_7070_init.sql'), 'w') +# f_sql_ddl.write(sql_ddl) +# flush_and_close( f_sql_ddl ) +# +# f_ddl_log = open( ''.join( (os.path.splitext(f_sql_ddl.name)[0], '.log' ) ), 'w') +# f_ddl_err = open( ''.join( (os.path.splitext(f_sql_ddl.name)[0], '.err' ) ), 'w') +# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_ddl.name ], stdout = f_ddl_log, stderr = f_ddl_err) +# flush_and_close( f_ddl_log ) +# flush_and_close( f_ddl_err ) +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1A' ) +# +# con_repl = fdb.connect( dsn = 'localhost:' + db_repl) +# cur_repl = con_repl.cursor() +# cur_repl.execute('insert into test(id,bindata) values(?, ?)', (2, 'qwerty-on-replica')) +# con_repl.commit() +# cur_repl.close() +# con_repl.close() +# +# con_main = fdb.connect( dsn = 'localhost:' + db_main) +# cur_main = con_main.cursor() +# cur_main.execute('insert into test(id,bindata) values(?, ?)', (2, 'qwerty-on-master')) +# con_main.commit() +# cur_main.close() +# con_main.close() +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1B' ) +# +# +# runProgram('gfix', ['-replica', 'read_only', 'localhost:' + db_repl]) +# +# # return initial state of master DB: +# # remove all DB objects (tables, views, ...): +# # -------------------------------------------- +# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') +# +# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects-gh_7070.log'), 'w') +# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) +# flush_and_close(f_clean_log) +# flush_and_close(f_clean_err) +# +# with open(f_clean_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in cleanup SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# with open(f_clean_log.name,'r') as f: +# for line in f: +# # show number of dropped objects +# print(line) +# +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) +# +# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_gh_7070.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_main_meta_sql ) +# +# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_gh_7070.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_repl_meta_sql ) +# +# db_main_meta=open(f_main_meta_sql.name, 'r') +# db_repl_meta=open(f_repl_meta_sql.name, 'r') +# +# diffmeta = ''.join(difflib.unified_diff( +# db_main_meta.readlines(), +# db_repl_meta.readlines() +# )) +# db_main_meta.close() +# db_repl_meta.close() +# +# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_gh_7070.txt'), 'w', buffering = 0) +# f_meta_diff.write(diffmeta) +# flush_and_close( f_meta_diff ) +# +# # Following must issue only TWO rows: +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ +# # Only thes lines will be suppressed further (see subst. section): +# with open(f_meta_diff.name, 'r') as f: +# for line in f: +# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): +# print('UNEXPECTED METADATA DIFF.: ' + line) +# +# +# ###################### +# ### A C H T U N G ### +# ###################### +# # MANDATORY, OTHERWISE REPLICATION GETS STUCK ON SECOND RUN OF THIS TEST +# # WITH 'ERROR: Record format with length 68 is not found for table TEST': +# runProgram('gfix', ['-sweep', 'localhost:' + db_repl]) +# runProgram('gfix', ['-sweep', 'localhost:' + db_main]) +# ####################### +# +# # cleanup: +# ########## +# cleanup( (f_sql_ddl, f_ddl_log, f_ddl_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) ) +# +# +#--- +act_1 = python_act('db_1', substitutions=substitutions_1) + +expected_stdout_1 = """ + POINT-1A FOUND message about replicated segment. + POINT-1B FOUND message about replicated segment. + Start removing objects + Finish. Total objects removed + POINT-2 FOUND message about replicated segment. +""" + +@pytest.mark.version('>=4.0.1') +@pytest.mark.platform('Windows') +def test_1(act_1: Action): + pytest.fail("Test not IMPLEMENTED") + + diff --git a/tests/functional/replication/test_dblevel_triggers_must_not_fire_on_replica.py b/tests/functional/replication/test_dblevel_triggers_must_not_fire_on_replica.py new file mode 100644 index 00000000..229696e6 --- /dev/null +++ b/tests/functional/replication/test_dblevel_triggers_must_not_fire_on_replica.py @@ -0,0 +1,482 @@ +#coding:utf-8 +# +# id: tests.functional.replication.dblevel_triggers_must_not_fire_on_replica +# title: Replica DB must not fire DB-level triggers but their activity on master must be eventually seen in replica. +# decription: +# https://github.com/FirebirdSQL/firebird/issues/6850 +# +# Test creates five DB-level triggers in the master DB (on connect/disconnect; on tx start/commit/rollback). +# Each of them registers apropriate event in the table with name 'log_db_triggers_activity'. +# This table must eventually have five records in BOTH databases (i.e. not only in master, but in replica also). +# After creating metadata we make test connect to master DB to fire these triggers. +# +# Then we wait until replica becomes actual to master, and this delay will last no more then threshold +# that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). +# During this delay, we check every second for replication log and search there line with number of last generated +# segment (which was replicated and deleting finally). +# We can assume that replication finished OK only when such line is found see ('POINT-1'). +# +# After this, we do query master and replica databases and obtain data from 'log_db_triggers_activity' table: it must +# have records about every fired trigger. Content of this table must be identical on master and replica, see queries +# to v_log_db_triggers_activity (both on master and replica DB). +# +# Then we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). +# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). +# +# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). +# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, +# thus metadata difference must not be issued. +# +# ################ +# ### N O T E ### +# ################ +# Test assumes that master and replica DB have been created beforehand. +# Also, it assumes that %FB_HOME% +# eplication.conf has been prepared with apropriate parameters for replication. +# Particularly, name of directories and databases must have info about checked FB major version and ServerMode. +# * verbose = true // in order to find out line with message that required segment was replicated +# * section for master database with specified parameters: +# journal_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.journal" +# journal_archive_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.archive" +# journal_archive_command = "copy $(pathname) $(archivepathname)" +# journal_archive_timeout = 10 +# * section for replica database with specified parameter: +# journal_source_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.archive" +# +# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: +# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) +# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) +# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) +# +# These two databases must NOT be dropped in any of tests related to replication! +# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode +# and make cleanup after it, i.e. when all tests will be completed. +# +# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. +# +# Temporary comment. For debug purpoces: +# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); +# 2) copy file %fbt-repo% ests +# unctional abloidatches\\setup-fb-for-replication.bat.txt +# to some place and rename it "*.bat"; +# 3) open this .bat in editor and asjust value of 'fbt_repo' variable; +# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] +# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: +# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc +# 5) batch 'setup-fb-for-replication.bat' will: +# * stop selected FB instance +# * create test databases (in !fbt_repo! mp\\); +# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); +# * replace %fb_home% +# eplication.conf with apropriate +# * start selected FB instance +# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): +# %fpt_repo% +# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc +# +# Checked on: +# 4.0.1.2519 SS: 56.48s, CS: 99.31s +# 5.0.0.82 SS: 20.63s, CS: 21.39s +# +# tracker_id: +# min_versions: ['4.0'] +# versions: 4.0 +# qmid: None + +import pytest +from firebird.qa import db_factory, python_act, Action + +# version: 4.0 +# resources: None + +substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', '')] + +init_script_1 = """""" + +db_1 = db_factory(sql_dialect=3, init=init_script_1) + +# test_script_1 +#--- +# +# import os +# import subprocess +# import re +# import difflib +# import shutil +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# ##################################### +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65 +# ##################################### +# +# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) +# FB_HOME = svc.get_home_directory() +# svc.close() +# +# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float +# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' +# +# cur = db_conn.cursor() +# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") +# server_mode = 'XX' +# for r in cur: +# if r[0] == 'Super': +# server_mode = 'SS' +# elif r[0] == 'SuperClassic': +# server_mode = 'SC' +# elif r[0] == 'Classic': +# server_mode = 'CS' +# cur.close() +# +# # 'fbt-main.fb50.ss.fdb' etc: +# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) +# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') +# +# # Folders for journalling and archieving segments. +# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) +# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for i in range(len( f_names_list )): +# if type(f_names_list[i]) == file: +# del_name = f_names_list[i].name +# elif type(f_names_list[i]) == str: +# del_name = f_names_list[i] +# else: +# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): +# +# global re +# global difflib +# global time +# +# replold_lines = [] +# with open( os.path.join(fb_home,'replication.log'), 'r') as f: +# replold_lines = f.readlines() +# +# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) +# cur = con.cursor() +# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") +# for r in cur: +# last_generated_repl_segment = r[0] +# cur.close() +# con.close() +# +# #print('last_generated_repl_segment:', last_generated_repl_segment) +# +# +# # +IMAGE-PC1 (replica) Fri Jun 11 17:57:01 2021 +# # + Database: C:\\FBTESTING\\QA\\FBT-REPO\\TMP\\FBT-REPL.FB50.FDB +# # + VERBOSE: Added 1 segment(s) to the processing queue +# # + +# # +IMAGE-PC1 (replica) Fri Jun 11 17:57:04 2021 +# # + Database: C:\\FBTESTING\\QA\\FBT-REPO\\TMP\\FBT-REPL.FB50.FDB +# # + VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file +# +# p=re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) +# +# found_required_message = False +# for i in range(0,max_allowed_time_for_wait): +# time.sleep(1) +# +# # Get content of fb_home replication.log _after_ isql finish: +# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') +# diff_data = difflib.unified_diff( +# replold_lines, +# f_repllog_new.readlines() +# ) +# f_repllog_new.close() +# +# for k,d in enumerate(diff_data): +# if p.search(d): +# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' ) +# found_required_message = True +# break +# +# if found_required_message: +# break +# +# if not found_required_message: +# print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait) +# +# #-------------------------------------------- +# +# sql_ddl = ''' set bail on; +# set list on; +# +# select mon$database_name from mon$database; +# +# set term ^; +# execute block as +# begin +# -- Define context variable in order to prevent +# -- DB-level triggers from firing during this execution: +# rdb$set_context('USER_SESSION', 'SKIP_DBLEVEL_TRG','1'); +# end +# ^ +# set term ;^ +# +# -- ::: NB ::: +# -- We can not start this script from 'zero-point', i.e. 'create table ...; create view ... ;' etc, +# -- because it will fail if master or replica DB contain some objects which could remain there +# -- due to fail of some previous test which also had deal with replication and used these databases. +# -- Here we must remove all dependencies and only after this table can be recreated: +# create or alter trigger trg_tx_start on transaction start as begin end; +# create or alter trigger trg_tx_commit on transaction commit as begin end; +# create or alter trigger trg_tx_rollback on transaction rollback as begin end; +# create or alter trigger trg_connect active on connect as begin end; +# create or alter trigger trg_disconnect active on disconnect as begin end; +# create or alter procedure sp_log_dblevel_trg_event as begin end; +# create or alter view v_log_db_triggers_activity as select 1 x from rdb$database; +# +# -- result: no more objects that depend on table 'log_db_triggers_activity', now we can recreate it. +# +# recreate table log_db_triggers_activity ( +# id int generated by default as identity constraint pk_log_db_triggers_activity primary key +# ,dts timestamp default 'now' +# ,att integer default current_connection +# ,trn integer default current_transaction +# ,app varchar(80) +# ,evt varchar(80) +# ); +# +# create or alter view v_log_db_triggers_activity as select * from log_db_triggers_activity; +# +# set term ^; +# create or alter procedure sp_log_dblevel_trg_event ( +# a_event_type varchar(80) -- type of column log_db_triggers_activity.evt +# ,a_working_tx int default null +# ) +# as +# declare v_app varchar(255); +# declare p smallint; +# declare back_slash char(1); +# begin +# v_app = reverse( right(rdb$get_context('SYSTEM','CLIENT_PROCESS'), 80) ); +# back_slash = ascii_char(92); -- backward slash; do NOT specify it literally otherwise Python will handle it as empty string! +# p = maxvalue(position(back_slash in v_app ), position('/' in v_app )); +# v_app = reverse(substring(v_app from 1 for p-1)); +# execute statement( 'insert into v_log_db_triggers_activity( trn, app, evt) values( ?, ?, ? )' ) ( coalesce(:a_working_tx, current_transaction), :v_app, :a_event_type) ; +# +# end +# ^ +# +# create or alter trigger trg_tx_start on transaction start as +# begin +# if (rdb$get_context('USER_SESSION', 'SKIP_DBLEVEL_TRG') is null ) then +# -- execute procedure sp_log_dblevel_trg_event( 'TX_START, TIL=' || coalesce( rdb$get_context('SYSTEM', 'ISOLATION_LEVEL'), '[null]' ) ); +# execute procedure sp_log_dblevel_trg_event( 'TX_START' ); +# end +# ^ +# +# create or alter trigger trg_tx_commit on transaction commit as +# begin +# if (rdb$get_context('USER_SESSION', 'SKIP_DBLEVEL_TRG') is null ) then +# -- execute procedure sp_log_dblevel_trg_event( 'TX_COMMIT, TIL=' || coalesce( rdb$get_context('SYSTEM', 'ISOLATION_LEVEL'), '[null]' ) ); +# execute procedure sp_log_dblevel_trg_event( 'TX_COMMIT' ); +# end +# ^ +# +# create or alter trigger trg_tx_rollback on transaction rollback as +# declare v_current_tx int; +# begin +# v_current_tx = current_transaction; +# if (rdb$get_context('USER_SESSION', 'SKIP_DBLEVEL_TRG') is null ) then +# in autonomous transaction do +# -- execute procedure sp_log_dblevel_trg_event( 'TX_ROLLBACK, TIL=' || coalesce( rdb$get_context('SYSTEM', 'ISOLATION_LEVEL'), '[null]' ), v_current_tx ); +# execute procedure sp_log_dblevel_trg_event( 'TX_ROLLBACK' ); +# end +# ^ +# +# create or alter trigger trg_connect active on connect position 0 as +# begin +# if (rdb$get_context('USER_SESSION', 'SKIP_DBLEVEL_TRG') is null ) then +# execute procedure sp_log_dblevel_trg_event( 'DB_ATTACH' ); +# end +# ^ +# +# create or alter trigger trg_disconnect active on disconnect position 0 as +# declare v_current_tx int; +# begin +# if (rdb$get_context('USER_SESSION', 'SKIP_DBLEVEL_TRG') is null ) then +# execute procedure sp_log_dblevel_trg_event( 'DB_DETACH'); +# end +# ^ +# set term ;^ +# commit; +# +# select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') as last_generated_repl_segment from rdb$database; +# quit; +# ''' % locals() +# +# +# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_repltest_skip_db_trg.sql'), 'w') +# f_sql_chk.write(sql_ddl) +# flush_and_close( f_sql_chk ) +# +# # Get content of FB_HOME replication.log _before_ launching ISQL: +# ############# +# +# replold_lines = [] +# with open( os.path.join(FB_HOME,'replication.log'), 'r') as f: +# replold_lines = f.readlines() +# +# f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w') +# f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w') +# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err) +# flush_and_close( f_sql_log ) +# flush_and_close( f_sql_err ) +# +# last_generated_repl_segment = 0 +# +# with open(f_sql_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in initial SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors +# +# # Test connect to master DB, just to fire DB-level triggers: +# ########################### +# con1 = fdb.connect( dsn = 'localhost:' + db_main) +# con1.execute_immediate('recreate table test(id int)') +# con1.close() +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' ) +# +# runProgram('isql', ['localhost:' + db_main, '-nod'], 'set count on; set list on; select evt as main_db_trigger_fired from v_log_db_triggers_activity order by id;') +# runProgram('isql', ['localhost:' + db_repl, '-nod'], 'set count on; set list on; select evt as repl_db_trigger_fired from v_log_db_triggers_activity order by id;') +# +# +# # return initial state of master DB: +# # remove all DB objects (tables, views, ...): +# # -------------------------------------------- +# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') +# +# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects.log'), 'w') +# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) +# flush_and_close(f_clean_log) +# flush_and_close(f_clean_err) +# +# with open(f_clean_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in cleanup SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# with open(f_clean_log.name,'r') as f: +# for line in f: +# # show number of dropped objects +# print(line) +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) +# +# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_main_meta_sql ) +# +# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_repl_meta_sql ) +# +# db_main_meta=open(f_main_meta_sql.name, 'r') +# db_repl_meta=open(f_repl_meta_sql.name, 'r') +# +# diffmeta = ''.join(difflib.unified_diff( +# db_main_meta.readlines(), +# db_repl_meta.readlines() +# )) +# db_main_meta.close() +# db_repl_meta.close() +# +# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff.txt'), 'w', buffering = 0) +# f_meta_diff.write(diffmeta) +# flush_and_close( f_meta_diff ) +# +# # Following must issue only TWO rows: +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ +# # Only thes lines will be suppressed further (see subst. section): +# with open(f_meta_diff.name, 'r') as f: +# for line in f: +# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): +# print('UNEXPECTED METADATA DIFF.: ' + line) +# +# +# # cleanup: +# ########## +# cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) ) +# +#--- +act_1 = python_act('db_1', substitutions=substitutions_1) + +expected_stdout_1 = """ + POINT-1 FOUND message about replicated segment. + + MAIN_DB_TRIGGER_FIRED DB_ATTACH + MAIN_DB_TRIGGER_FIRED TX_START + MAIN_DB_TRIGGER_FIRED TX_ROLLBACK + MAIN_DB_TRIGGER_FIRED TX_COMMIT + MAIN_DB_TRIGGER_FIRED DB_DETACH + Records affected: 5 + + REPL_DB_TRIGGER_FIRED DB_ATTACH + REPL_DB_TRIGGER_FIRED TX_START + REPL_DB_TRIGGER_FIRED TX_ROLLBACK + REPL_DB_TRIGGER_FIRED TX_COMMIT + REPL_DB_TRIGGER_FIRED DB_DETACH + Records affected: 5 + + Start removing objects in: C:\\FBTESTING\\QA\\FBT-REPO\\TMP\\FBT-MAIN.FB50.FDB + Finish. Total objects removed: 9 + + POINT-2 FOUND message about replicated segment. +""" + +@pytest.mark.version('>=4.0') +@pytest.mark.platform('Windows') +def test_1(act_1: Action): + pytest.fail("Test not IMPLEMENTED") + + diff --git a/tests/functional/replication/test_ddl_triggers_must_not_fire_on_replica.py b/tests/functional/replication/test_ddl_triggers_must_not_fire_on_replica.py new file mode 100644 index 00000000..050b55fa --- /dev/null +++ b/tests/functional/replication/test_ddl_triggers_must_not_fire_on_replica.py @@ -0,0 +1,1305 @@ +#coding:utf-8 +# +# id: tests.functional.replication.ddl_triggers_must_not_fire_on_replica +# title: DDL-triggers must fire only on master DB. +# decription: +# Test creates all kinds of DDL triggers in the master DB. +# Each of them registers apropriate event in the table with name 'log_ddl_triggers_activity'. +# After this we create all kinds of DB objects (tables, procedure, function, etc) in master DB to fire these triggers. +# +# Then we wait until replica becomes actual to master, and this delay will last no more then threshold +# that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). +# During this delay, we check every second for replication log and search there line with number of last generated +# segment (which was replicated and deleting finally). +# We can assume that replication finished OK only when such line is found see ('POINT-1'). +# +# After this, we do following: +# 1) compare metadata of master and replica DB, they must be equal (except file names); +# 2) obtain data from 'log_ddl_triggers_activity' table: +# 2.1) on master it must have record about every DDL-trigger that fired; +# 2.2) on replica this table must be EMPTY (bacause DDL triggers must not fire on replica). +# +# Then we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). +# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). +# +# Finally, we extract metadata for master and replica after this cleanup and compare them. +# +# ################ +# ### N O T E ### +# ################ +# Test assumes that master and replica DB have been created beforehand. +# Also, it assumes that %FB_HOME% +# eplication.conf has been prepared with apropriate parameters for replication. +# Particularly, name of directories and databases must have info about checked FB major version and ServerMode. +# * verbose = true // in order to find out line with message that required segment was replicated +# * section for master database with specified parameters: +# journal_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.journal" +# journal_archive_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.archive" +# journal_archive_command = "copy $(pathname) $(archivepathname)" +# journal_archive_timeout = 10 +# * section for replica database with specified parameter: +# journal_source_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.archive" +# +# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: +# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) +# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) +# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) +# +# These two databases must NOT be dropped in any of tests related to replication! +# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode +# and make cleanup after it, i.e. when all tests will be completed. +# +# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. +# +# Temporary comment. For debug purpoces: +# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); +# 2) copy file %fbt-repo% ests +# unctional abloidatches\\setup-fb-for-replication.bat.txt +# to some place and rename it "*.bat"; +# 3) open this .bat in editor and asjust value of 'fbt_repo' variable; +# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] +# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: +# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc +# 5) batch 'setup-fb-for-replication.bat' will: +# * stop selected FB instance +# * create test databases (in !fbt_repo! mp\\); +# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); +# * replace %fb_home% +# eplication.conf with apropriate +# * start selected FB instance +# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): +# %fpt_repo% +# bt-run2.bat ddl-triggers-must-not-fire-on-replica.fbt 50ss, etc +# +# Checked on: +# 4.0.1.2547 (SS/CS), 5.0.0.120 (SS/CS). +# +# tracker_id: +# min_versions: ['4.0'] +# versions: 4.0 +# qmid: None + +import pytest +from firebird.qa import db_factory, python_act, Action + +# version: 4.0 +# resources: None + +substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', '')] + +init_script_1 = """""" + +db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1) + +# test_script_1 +#--- +# +# import os +# import subprocess +# import re +# import difflib +# import shutil +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# ##################################### +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 75 +# KEEP_LOGS_FOR_DEBUG = 0 +# ##################################### +# +# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) +# FB_HOME = svc.get_home_directory() +# svc.close() +# +# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float +# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' +# +# cur = db_conn.cursor() +# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") +# server_mode = 'XX' +# for r in cur: +# if r[0] == 'Super': +# server_mode = 'SS' +# elif r[0] == 'SuperClassic': +# server_mode = 'SC' +# elif r[0] == 'Classic': +# server_mode = 'CS' +# cur.close() +# +# # 'fbt-main.fb50.ss.fdb' etc: +# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) +# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') +# +# # Folders for journalling and archieving segments. +# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) +# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for i in range(len( f_names_list )): +# if type(f_names_list[i]) == file: +# del_name = f_names_list[i].name +# elif type(f_names_list[i]) == str: +# del_name = f_names_list[i] +# else: +# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): +# +# global re +# global difflib +# global time +# +# replold_lines = [] +# with open( os.path.join(fb_home,'replication.log'), 'r') as f: +# replold_lines = f.readlines() +# +# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) +# cur = con.cursor() +# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") +# for r in cur: +# last_generated_repl_segment = r[0] +# cur.close() +# con.close() +# +# #print('last_generated_repl_segment:', last_generated_repl_segment) +# +# # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file +# p=re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) +# +# found_required_message = False +# for i in range(0,max_allowed_time_for_wait): +# time.sleep(1) +# +# # Get content of fb_home replication.log _after_ isql finish: +# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') +# diff_data = difflib.unified_diff( +# replold_lines, +# f_repllog_new.readlines() +# ) +# f_repllog_new.close() +# +# for k,d in enumerate(diff_data): +# if p.search(d): +# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' ) +# found_required_message = True +# break +# +# if found_required_message: +# break +# +# if not found_required_message: +# print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait) +# +# #-------------------------------------------- +# +# def compare_metadata(db_main, db_repl, nm_suffix = '', keep4dbg = 0): +# +# global subprocess, difflib, flush_and_close, cleanup +# +# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta'+nm_suffix+'.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_main_meta_sql ) +# +# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta'+nm_suffix+'.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_repl_meta_sql ) +# +# db_main_meta=open(f_main_meta_sql.name, 'r') +# db_repl_meta=open(f_repl_meta_sql.name, 'r') +# +# diffmeta = ''.join(difflib.unified_diff( +# db_main_meta.readlines(), +# db_repl_meta.readlines() +# )) +# db_main_meta.close() +# db_repl_meta.close() +# +# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff'+nm_suffix+'.txt'), 'w') +# f_meta_diff.write(diffmeta) +# flush_and_close( f_meta_diff ) +# +# # Following must issue only TWO rows: +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ +# # Only thes lines will be suppressed further (see subst. section): +# with open(f_meta_diff.name, 'r') as f: +# for line in f: +# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): +# print('UNEXPECTED METADATA DIFF.: ' + line) +# +# if not keep4dbg: +# cleanup( (f_main_meta_sql,f_repl_meta_sql,f_meta_diff ) ) +# +# #-------------------------------------------- +# +# sql_ddl = ''' set bail on; +# set list on; +# +# select mon$database_name from mon$database; +# +# recreate table log_ddl_triggers_activity ( +# id int generated by default as identity constraint pk_log_ddl_triggers_activity primary key +# ,ddl_trigger_name varchar(64) +# ,event_type varchar(25) not null +# ,object_type varchar(25) not null +# ,ddl_event varchar(25) not null +# ,object_name varchar(64) not null +# ); +# +# +# set term ^; +# execute block as +# declare v_lf char(1) = x'0A'; +# begin +# rdb$set_context('USER_SESSION', 'SKIP_DDL_TRIGGER', '1'); +# +# for +# with +# a as ( +# select 'ANY DDL STATEMENT' x from rdb$database union all +# select 'CREATE TABLE' from rdb$database union all +# select 'ALTER TABLE' from rdb$database union all +# select 'DROP TABLE' from rdb$database union all +# select 'CREATE PROCEDURE' from rdb$database union all +# select 'ALTER PROCEDURE' from rdb$database union all +# select 'DROP PROCEDURE' from rdb$database union all +# select 'CREATE FUNCTION' from rdb$database union all +# select 'ALTER FUNCTION' from rdb$database union all +# select 'DROP FUNCTION' from rdb$database union all +# select 'CREATE TRIGGER' from rdb$database union all +# select 'ALTER TRIGGER' from rdb$database union all +# select 'DROP TRIGGER' from rdb$database union all +# select 'CREATE EXCEPTION' from rdb$database union all +# select 'ALTER EXCEPTION' from rdb$database union all +# select 'DROP EXCEPTION' from rdb$database union all +# select 'CREATE VIEW' from rdb$database union all +# select 'ALTER VIEW' from rdb$database union all +# select 'DROP VIEW' from rdb$database union all +# select 'CREATE DOMAIN' from rdb$database union all +# select 'ALTER DOMAIN' from rdb$database union all +# select 'DROP DOMAIN' from rdb$database union all +# select 'CREATE ROLE' from rdb$database union all +# select 'ALTER ROLE' from rdb$database union all +# select 'DROP ROLE' from rdb$database union all +# select 'CREATE SEQUENCE' from rdb$database union all +# select 'ALTER SEQUENCE' from rdb$database union all +# select 'DROP SEQUENCE' from rdb$database union all +# select 'CREATE USER' from rdb$database union all +# select 'ALTER USER' from rdb$database union all +# select 'DROP USER' from rdb$database union all +# select 'CREATE INDEX' from rdb$database union all +# select 'ALTER INDEX' from rdb$database union all +# select 'DROP INDEX' from rdb$database union all +# select 'CREATE COLLATION' from rdb$database union all +# select 'DROP COLLATION' from rdb$database union all +# select 'ALTER CHARACTER SET' from rdb$database union all +# select 'CREATE PACKAGE' from rdb$database union all +# select 'ALTER PACKAGE' from rdb$database union all +# select 'DROP PACKAGE' from rdb$database union all +# select 'CREATE PACKAGE BODY' from rdb$database union all +# select 'DROP PACKAGE BODY' from rdb$database +# ) +# ,e as ( +# select 'before' w from rdb$database union all select 'after' from rdb$database +# ) +# ,t as ( +# select upper(trim(replace(trim(a.x),' ','_')) || iif(e.w='before', '_before', '_after')) as trg_name, a.x, e.w +# from e, a +# ) +# +# select +# 'create or alter trigger trg_' || t.trg_name +# || ' active ' || t.w || ' ' || trim(t.x) || ' as ' +# || :v_lf +# || 'begin' +# || :v_lf +# || q'{ if (rdb$get_context('USER_SESSION', 'SKIP_DDL_TRIGGER') is null) then}' +# || :v_lf +# || ' insert into log_ddl_triggers_activity(ddl_trigger_name, event_type, object_type, ddl_event, object_name) values(' +# || :v_lf +# || q'{'}' || trim(t.trg_name) || q'{'}' +# || :v_lf +# || q'{, rdb$get_context('DDL_TRIGGER', 'EVENT_TYPE')}' +# || :v_lf +# || q'{, rdb$get_context('DDL_TRIGGER', 'OBJECT_TYPE')}' +# || :v_lf +# || q'{, rdb$get_context('DDL_TRIGGER', 'DDL_EVENT')}' +# || :v_lf +# || q'{, rdb$get_context('DDL_TRIGGER', 'OBJECT_NAME')}' +# || :v_lf +# || ');' +# || :v_lf +# || ' end' +# as sttm +# from t +# as cursor c +# do begin +# execute statement(c.sttm) with autonomous transaction; +# end +# +# rdb$set_context('USER_SESSION', 'SKIP_DDL_TRIGGER', null); +# end +# ^ +# set term ;^ +# commit; +# +# /* +# select rt.rdb$trigger_name,rt.rdb$relation_name,rt.rdb$trigger_type,rt.rdb$trigger_source +# from rdb$triggers rt +# where +# rt.rdb$system_flag is distinct from 1 +# and rt.rdb$trigger_inactive is distinct from 1; +# +# select * from log_ddl_triggers_activity; +# */ +# +# -- set count on; +# -- set echo on; +# +# set term ^; +# +# create table test(id int not null, name varchar(10)) +# ^ +# alter table test add constraint test_pk primary key(id) +# ^ +# ---------- +# create procedure sp_test as begin end +# ^ +# alter procedure sp_test as declare x int; begin x=1; end +# ^ +# ---------- +# create function fn_test(a_id int) returns bigint as +# begin +# return a_id * a_id; +# end +# ^ +# alter function fn_test(a_id int) returns int128 as +# begin +# return a_id * a_id * a_id; +# end +# ^ +# ---------- +# create trigger trg_connect_test on connect as +# begin +# end +# ^ +# alter trigger trg_connect_test as +# declare x int; +# begin +# x = 1; +# end +# ^ +# ---------- +# create exception exc_test 'Invalud value: @1' +# ^ +# alter exception exc_test 'Bad values: @1 and @2' +# ^ +# ---------- +# create view v_test as select 1 x from rdb$database +# ^ +# alter view v_test as select 1 x, 2 y from rdb$database +# ^ +# ---------- +# create domain dm_test int +# ^ +# alter domain dm_test set not null +# ^ +# ---------- +# create role r_test +# ^ +# alter role r_test set system privileges to use_gstat_utility, ignore_db_triggers +# ^ +# ---------- +# create sequence g_test +# ^ +# alter sequence g_test restart with 123 +# ^ +# ---------- +# /* +# create or alter user u_test password '123' using plugin Srp +# ^ +# alter user u_test password '456' +# ^ +# */ +# ---------- +# create index test_name on test(name) +# ^ +# alter index test_name inactive +# ^ +# ---------- +# create collation name_coll for utf8 from unicode case insensitive +# ^ +# ---------- +# alter character set iso8859_1 set default collation pt_br +# ^ +# ---------- +# create or alter package pg_test as +# begin +# function pg_fn1 returns int; +# end +# ^ +# alter package pg_test as +# begin +# function pg_fn1(a_x int) returns int128; +# end +# ^ +# +# create package body pg_test as +# begin +# function pg_fn1(a_x int) returns int128 as +# begin +# return a_x * a_x * a_x; +# end +# end +# ^ +# set term ;^ +# commit; +# +# +# select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') as last_generated_repl_segment from rdb$database; +# quit; +# ''' % locals() +# +# +# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_repltest_skip_ddl_trg.sql'), 'w') +# f_sql_chk.write(sql_ddl) +# flush_and_close( f_sql_chk ) +# +# f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w') +# f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w') +# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err) +# flush_and_close( f_sql_log ) +# flush_and_close( f_sql_err ) +# +# with open(f_sql_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in initial SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' ) +# +# compare_metadata(db_main, db_repl, '.1', KEEP_LOGS_FOR_DEBUG) +# +# sql_get_result = ''' +# set list on; +# set count on; +# select +# iif(coalesce(rdb$get_context('SYSTEM','REPLICA_MODE'),'') = '', 'MASTER', 'REPLICA') as replication_mode +# ,a.id +# ,a.ddl_trigger_name +# ,a.event_type +# ,a.object_type +# ,a.ddl_event +# ,a.object_name +# from rdb$database r +# left join log_ddl_triggers_activity a on 1=1 +# order by a.id; +# ''' +# +# runProgram('isql', ['localhost:' + db_main, '-nod'], sql_get_result) +# runProgram('isql', ['localhost:' + db_repl, '-nod'], sql_get_result) +# +# +# # return initial state of master DB: +# # remove all DB objects (tables, views, ...): +# # -------------------------------------------- +# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') +# +# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects.log'), 'w') +# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) +# flush_and_close(f_clean_log) +# flush_and_close(f_clean_err) +# +# with open(f_clean_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in cleanup SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# with open(f_clean_log.name,'r') as f: +# for line in f: +# # show number of dropped objects +# print(line) +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) +# +# compare_metadata(db_main, db_repl, '.2', KEEP_LOGS_FOR_DEBUG) +# +# # cleanup: +# ########## +# if not KEEP_LOGS_FOR_DEBUG: +# cleanup( (f_sql_chk, f_sql_log, f_sql_err, f_clean_log, f_clean_err) ) +# +#--- +act_1 = python_act('db_1', substitutions=substitutions_1) + +expected_stdout_1 = """ + POINT-1 FOUND message about replicated segment. + + REPLICATION_MODE MASTER + ID 1 + DDL_TRIGGER_NAME CREATE_TABLE_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE TABLE + DDL_EVENT CREATE TABLE + OBJECT_NAME TEST + REPLICATION_MODE MASTER + ID 2 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE TABLE + DDL_EVENT CREATE TABLE + OBJECT_NAME TEST + REPLICATION_MODE MASTER + ID 3 + DDL_TRIGGER_NAME CREATE_TABLE_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE TABLE + DDL_EVENT CREATE TABLE + OBJECT_NAME TEST + REPLICATION_MODE MASTER + ID 4 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE TABLE + DDL_EVENT CREATE TABLE + OBJECT_NAME TEST + REPLICATION_MODE MASTER + ID 5 + DDL_TRIGGER_NAME ALTER_TABLE_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE TABLE + DDL_EVENT ALTER TABLE + OBJECT_NAME TEST + REPLICATION_MODE MASTER + ID 6 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE TABLE + DDL_EVENT ALTER TABLE + OBJECT_NAME TEST + REPLICATION_MODE MASTER + ID 7 + DDL_TRIGGER_NAME ALTER_TABLE_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE TABLE + DDL_EVENT ALTER TABLE + OBJECT_NAME TEST + REPLICATION_MODE MASTER + ID 8 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE TABLE + DDL_EVENT ALTER TABLE + OBJECT_NAME TEST + REPLICATION_MODE MASTER + ID 9 + DDL_TRIGGER_NAME CREATE_PROCEDURE_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE PROCEDURE + DDL_EVENT CREATE PROCEDURE + OBJECT_NAME SP_TEST + REPLICATION_MODE MASTER + ID 10 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE PROCEDURE + DDL_EVENT CREATE PROCEDURE + OBJECT_NAME SP_TEST + REPLICATION_MODE MASTER + ID 11 + DDL_TRIGGER_NAME CREATE_PROCEDURE_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE PROCEDURE + DDL_EVENT CREATE PROCEDURE + OBJECT_NAME SP_TEST + REPLICATION_MODE MASTER + ID 12 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE PROCEDURE + DDL_EVENT CREATE PROCEDURE + OBJECT_NAME SP_TEST + REPLICATION_MODE MASTER + ID 13 + DDL_TRIGGER_NAME ALTER_PROCEDURE_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE PROCEDURE + DDL_EVENT ALTER PROCEDURE + OBJECT_NAME SP_TEST + REPLICATION_MODE MASTER + ID 14 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE PROCEDURE + DDL_EVENT ALTER PROCEDURE + OBJECT_NAME SP_TEST + REPLICATION_MODE MASTER + ID 15 + DDL_TRIGGER_NAME ALTER_PROCEDURE_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE PROCEDURE + DDL_EVENT ALTER PROCEDURE + OBJECT_NAME SP_TEST + REPLICATION_MODE MASTER + ID 16 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE PROCEDURE + DDL_EVENT ALTER PROCEDURE + OBJECT_NAME SP_TEST + REPLICATION_MODE MASTER + ID 17 + DDL_TRIGGER_NAME CREATE_FUNCTION_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE FUNCTION + DDL_EVENT CREATE FUNCTION + OBJECT_NAME FN_TEST + REPLICATION_MODE MASTER + ID 18 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE FUNCTION + DDL_EVENT CREATE FUNCTION + OBJECT_NAME FN_TEST + REPLICATION_MODE MASTER + ID 19 + DDL_TRIGGER_NAME CREATE_FUNCTION_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE FUNCTION + DDL_EVENT CREATE FUNCTION + OBJECT_NAME FN_TEST + REPLICATION_MODE MASTER + ID 20 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE FUNCTION + DDL_EVENT CREATE FUNCTION + OBJECT_NAME FN_TEST + REPLICATION_MODE MASTER + ID 21 + DDL_TRIGGER_NAME ALTER_FUNCTION_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE FUNCTION + DDL_EVENT ALTER FUNCTION + OBJECT_NAME FN_TEST + REPLICATION_MODE MASTER + ID 22 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE FUNCTION + DDL_EVENT ALTER FUNCTION + OBJECT_NAME FN_TEST + REPLICATION_MODE MASTER + ID 23 + DDL_TRIGGER_NAME ALTER_FUNCTION_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE FUNCTION + DDL_EVENT ALTER FUNCTION + OBJECT_NAME FN_TEST + REPLICATION_MODE MASTER + ID 24 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE FUNCTION + DDL_EVENT ALTER FUNCTION + OBJECT_NAME FN_TEST + REPLICATION_MODE MASTER + ID 25 + DDL_TRIGGER_NAME CREATE_TRIGGER_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE TRIGGER + DDL_EVENT CREATE TRIGGER + OBJECT_NAME TRG_CONNECT_TEST + REPLICATION_MODE MASTER + ID 26 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE TRIGGER + DDL_EVENT CREATE TRIGGER + OBJECT_NAME TRG_CONNECT_TEST + REPLICATION_MODE MASTER + ID 27 + DDL_TRIGGER_NAME CREATE_TRIGGER_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE TRIGGER + DDL_EVENT CREATE TRIGGER + OBJECT_NAME TRG_CONNECT_TEST + REPLICATION_MODE MASTER + ID 28 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE TRIGGER + DDL_EVENT CREATE TRIGGER + OBJECT_NAME TRG_CONNECT_TEST + REPLICATION_MODE MASTER + ID 29 + DDL_TRIGGER_NAME ALTER_TRIGGER_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE TRIGGER + DDL_EVENT ALTER TRIGGER + OBJECT_NAME TRG_CONNECT_TEST + REPLICATION_MODE MASTER + ID 30 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE TRIGGER + DDL_EVENT ALTER TRIGGER + OBJECT_NAME TRG_CONNECT_TEST + REPLICATION_MODE MASTER + ID 31 + DDL_TRIGGER_NAME ALTER_TRIGGER_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE TRIGGER + DDL_EVENT ALTER TRIGGER + OBJECT_NAME TRG_CONNECT_TEST + REPLICATION_MODE MASTER + ID 32 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE TRIGGER + DDL_EVENT ALTER TRIGGER + OBJECT_NAME TRG_CONNECT_TEST + REPLICATION_MODE MASTER + ID 33 + DDL_TRIGGER_NAME CREATE_EXCEPTION_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE EXCEPTION + DDL_EVENT CREATE EXCEPTION + OBJECT_NAME EXC_TEST + REPLICATION_MODE MASTER + ID 34 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE EXCEPTION + DDL_EVENT CREATE EXCEPTION + OBJECT_NAME EXC_TEST + REPLICATION_MODE MASTER + ID 35 + DDL_TRIGGER_NAME CREATE_EXCEPTION_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE EXCEPTION + DDL_EVENT CREATE EXCEPTION + OBJECT_NAME EXC_TEST + REPLICATION_MODE MASTER + ID 36 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE EXCEPTION + DDL_EVENT CREATE EXCEPTION + OBJECT_NAME EXC_TEST + REPLICATION_MODE MASTER + ID 37 + DDL_TRIGGER_NAME ALTER_EXCEPTION_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE EXCEPTION + DDL_EVENT ALTER EXCEPTION + OBJECT_NAME EXC_TEST + REPLICATION_MODE MASTER + ID 38 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE EXCEPTION + DDL_EVENT ALTER EXCEPTION + OBJECT_NAME EXC_TEST + REPLICATION_MODE MASTER + ID 39 + DDL_TRIGGER_NAME ALTER_EXCEPTION_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE EXCEPTION + DDL_EVENT ALTER EXCEPTION + OBJECT_NAME EXC_TEST + REPLICATION_MODE MASTER + ID 40 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE EXCEPTION + DDL_EVENT ALTER EXCEPTION + OBJECT_NAME EXC_TEST + REPLICATION_MODE MASTER + ID 41 + DDL_TRIGGER_NAME CREATE_VIEW_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE VIEW + DDL_EVENT CREATE VIEW + OBJECT_NAME V_TEST + REPLICATION_MODE MASTER + ID 42 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE VIEW + DDL_EVENT CREATE VIEW + OBJECT_NAME V_TEST + REPLICATION_MODE MASTER + ID 43 + DDL_TRIGGER_NAME CREATE_VIEW_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE VIEW + DDL_EVENT CREATE VIEW + OBJECT_NAME V_TEST + REPLICATION_MODE MASTER + ID 44 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE VIEW + DDL_EVENT CREATE VIEW + OBJECT_NAME V_TEST + REPLICATION_MODE MASTER + ID 45 + DDL_TRIGGER_NAME ALTER_VIEW_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE VIEW + DDL_EVENT ALTER VIEW + OBJECT_NAME V_TEST + REPLICATION_MODE MASTER + ID 46 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE VIEW + DDL_EVENT ALTER VIEW + OBJECT_NAME V_TEST + REPLICATION_MODE MASTER + ID 47 + DDL_TRIGGER_NAME ALTER_VIEW_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE VIEW + DDL_EVENT ALTER VIEW + OBJECT_NAME V_TEST + REPLICATION_MODE MASTER + ID 48 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE VIEW + DDL_EVENT ALTER VIEW + OBJECT_NAME V_TEST + REPLICATION_MODE MASTER + ID 49 + DDL_TRIGGER_NAME CREATE_DOMAIN_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE DOMAIN + DDL_EVENT CREATE DOMAIN + OBJECT_NAME DM_TEST + REPLICATION_MODE MASTER + ID 50 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE DOMAIN + DDL_EVENT CREATE DOMAIN + OBJECT_NAME DM_TEST + REPLICATION_MODE MASTER + ID 51 + DDL_TRIGGER_NAME CREATE_DOMAIN_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE DOMAIN + DDL_EVENT CREATE DOMAIN + OBJECT_NAME DM_TEST + REPLICATION_MODE MASTER + ID 52 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE DOMAIN + DDL_EVENT CREATE DOMAIN + OBJECT_NAME DM_TEST + REPLICATION_MODE MASTER + ID 53 + DDL_TRIGGER_NAME ALTER_DOMAIN_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE DOMAIN + DDL_EVENT ALTER DOMAIN + OBJECT_NAME DM_TEST + REPLICATION_MODE MASTER + ID 54 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE DOMAIN + DDL_EVENT ALTER DOMAIN + OBJECT_NAME DM_TEST + REPLICATION_MODE MASTER + ID 55 + DDL_TRIGGER_NAME ALTER_DOMAIN_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE DOMAIN + DDL_EVENT ALTER DOMAIN + OBJECT_NAME DM_TEST + REPLICATION_MODE MASTER + ID 56 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE DOMAIN + DDL_EVENT ALTER DOMAIN + OBJECT_NAME DM_TEST + REPLICATION_MODE MASTER + ID 57 + DDL_TRIGGER_NAME CREATE_ROLE_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE ROLE + DDL_EVENT CREATE ROLE + OBJECT_NAME R_TEST + REPLICATION_MODE MASTER + ID 58 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE ROLE + DDL_EVENT CREATE ROLE + OBJECT_NAME R_TEST + REPLICATION_MODE MASTER + ID 59 + DDL_TRIGGER_NAME CREATE_ROLE_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE ROLE + DDL_EVENT CREATE ROLE + OBJECT_NAME R_TEST + REPLICATION_MODE MASTER + ID 60 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE ROLE + DDL_EVENT CREATE ROLE + OBJECT_NAME R_TEST + REPLICATION_MODE MASTER + ID 61 + DDL_TRIGGER_NAME ALTER_ROLE_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE ROLE + DDL_EVENT ALTER ROLE + OBJECT_NAME R_TEST + REPLICATION_MODE MASTER + ID 62 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE ROLE + DDL_EVENT ALTER ROLE + OBJECT_NAME R_TEST + REPLICATION_MODE MASTER + ID 63 + DDL_TRIGGER_NAME ALTER_ROLE_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE ROLE + DDL_EVENT ALTER ROLE + OBJECT_NAME R_TEST + REPLICATION_MODE MASTER + ID 64 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE ROLE + DDL_EVENT ALTER ROLE + OBJECT_NAME R_TEST + REPLICATION_MODE MASTER + ID 65 + DDL_TRIGGER_NAME CREATE_SEQUENCE_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE SEQUENCE + DDL_EVENT CREATE SEQUENCE + OBJECT_NAME G_TEST + REPLICATION_MODE MASTER + ID 66 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE SEQUENCE + DDL_EVENT CREATE SEQUENCE + OBJECT_NAME G_TEST + REPLICATION_MODE MASTER + ID 67 + DDL_TRIGGER_NAME CREATE_SEQUENCE_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE SEQUENCE + DDL_EVENT CREATE SEQUENCE + OBJECT_NAME G_TEST + REPLICATION_MODE MASTER + ID 68 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE SEQUENCE + DDL_EVENT CREATE SEQUENCE + OBJECT_NAME G_TEST + REPLICATION_MODE MASTER + ID 69 + DDL_TRIGGER_NAME ALTER_SEQUENCE_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE SEQUENCE + DDL_EVENT ALTER SEQUENCE + OBJECT_NAME G_TEST + REPLICATION_MODE MASTER + ID 70 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE SEQUENCE + DDL_EVENT ALTER SEQUENCE + OBJECT_NAME G_TEST + REPLICATION_MODE MASTER + ID 71 + DDL_TRIGGER_NAME ALTER_SEQUENCE_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE SEQUENCE + DDL_EVENT ALTER SEQUENCE + OBJECT_NAME G_TEST + REPLICATION_MODE MASTER + ID 72 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE SEQUENCE + DDL_EVENT ALTER SEQUENCE + OBJECT_NAME G_TEST + REPLICATION_MODE MASTER + ID 73 + DDL_TRIGGER_NAME CREATE_INDEX_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE INDEX + DDL_EVENT CREATE INDEX + OBJECT_NAME TEST_NAME + REPLICATION_MODE MASTER + ID 74 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE INDEX + DDL_EVENT CREATE INDEX + OBJECT_NAME TEST_NAME + REPLICATION_MODE MASTER + ID 75 + DDL_TRIGGER_NAME CREATE_INDEX_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE INDEX + DDL_EVENT CREATE INDEX + OBJECT_NAME TEST_NAME + REPLICATION_MODE MASTER + ID 76 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE INDEX + DDL_EVENT CREATE INDEX + OBJECT_NAME TEST_NAME + REPLICATION_MODE MASTER + ID 77 + DDL_TRIGGER_NAME ALTER_INDEX_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE INDEX + DDL_EVENT ALTER INDEX + OBJECT_NAME TEST_NAME + REPLICATION_MODE MASTER + ID 78 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE INDEX + DDL_EVENT ALTER INDEX + OBJECT_NAME TEST_NAME + REPLICATION_MODE MASTER + ID 79 + DDL_TRIGGER_NAME ALTER_INDEX_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE INDEX + DDL_EVENT ALTER INDEX + OBJECT_NAME TEST_NAME + REPLICATION_MODE MASTER + ID 80 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE INDEX + DDL_EVENT ALTER INDEX + OBJECT_NAME TEST_NAME + REPLICATION_MODE MASTER + ID 81 + DDL_TRIGGER_NAME CREATE_COLLATION_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE COLLATION + DDL_EVENT CREATE COLLATION + OBJECT_NAME NAME_COLL + REPLICATION_MODE MASTER + ID 82 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE COLLATION + DDL_EVENT CREATE COLLATION + OBJECT_NAME NAME_COLL + REPLICATION_MODE MASTER + ID 83 + DDL_TRIGGER_NAME CREATE_COLLATION_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE COLLATION + DDL_EVENT CREATE COLLATION + OBJECT_NAME NAME_COLL + REPLICATION_MODE MASTER + ID 84 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE COLLATION + DDL_EVENT CREATE COLLATION + OBJECT_NAME NAME_COLL + REPLICATION_MODE MASTER + ID 85 + DDL_TRIGGER_NAME ALTER_CHARACTER_SET_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE CHARACTER SET + DDL_EVENT ALTER CHARACTER SET + OBJECT_NAME ISO8859_1 + REPLICATION_MODE MASTER + ID 86 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE CHARACTER SET + DDL_EVENT ALTER CHARACTER SET + OBJECT_NAME ISO8859_1 + REPLICATION_MODE MASTER + ID 87 + DDL_TRIGGER_NAME ALTER_CHARACTER_SET_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE CHARACTER SET + DDL_EVENT ALTER CHARACTER SET + OBJECT_NAME ISO8859_1 + REPLICATION_MODE MASTER + ID 88 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE CHARACTER SET + DDL_EVENT ALTER CHARACTER SET + OBJECT_NAME ISO8859_1 + REPLICATION_MODE MASTER + ID 89 + DDL_TRIGGER_NAME CREATE_PACKAGE_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE PACKAGE + DDL_EVENT CREATE PACKAGE + OBJECT_NAME PG_TEST + REPLICATION_MODE MASTER + ID 90 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE PACKAGE + DDL_EVENT CREATE PACKAGE + OBJECT_NAME PG_TEST + REPLICATION_MODE MASTER + ID 91 + DDL_TRIGGER_NAME CREATE_PACKAGE_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE PACKAGE + DDL_EVENT CREATE PACKAGE + OBJECT_NAME PG_TEST + REPLICATION_MODE MASTER + ID 92 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE PACKAGE + DDL_EVENT CREATE PACKAGE + OBJECT_NAME PG_TEST + REPLICATION_MODE MASTER + ID 93 + DDL_TRIGGER_NAME ALTER_PACKAGE_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE PACKAGE + DDL_EVENT ALTER PACKAGE + OBJECT_NAME PG_TEST + REPLICATION_MODE MASTER + ID 94 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE ALTER + OBJECT_TYPE PACKAGE + DDL_EVENT ALTER PACKAGE + OBJECT_NAME PG_TEST + REPLICATION_MODE MASTER + ID 95 + DDL_TRIGGER_NAME ALTER_PACKAGE_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE PACKAGE + DDL_EVENT ALTER PACKAGE + OBJECT_NAME PG_TEST + REPLICATION_MODE MASTER + ID 96 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE ALTER + OBJECT_TYPE PACKAGE + DDL_EVENT ALTER PACKAGE + OBJECT_NAME PG_TEST + REPLICATION_MODE MASTER + ID 97 + DDL_TRIGGER_NAME CREATE_PACKAGE_BODY_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE PACKAGE BODY + DDL_EVENT CREATE PACKAGE BODY + OBJECT_NAME PG_TEST + REPLICATION_MODE MASTER + ID 98 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_BEFORE + EVENT_TYPE CREATE + OBJECT_TYPE PACKAGE BODY + DDL_EVENT CREATE PACKAGE BODY + OBJECT_NAME PG_TEST + REPLICATION_MODE MASTER + ID 99 + DDL_TRIGGER_NAME CREATE_PACKAGE_BODY_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE PACKAGE BODY + DDL_EVENT CREATE PACKAGE BODY + OBJECT_NAME PG_TEST + REPLICATION_MODE MASTER + ID 100 + DDL_TRIGGER_NAME ANY_DDL_STATEMENT_AFTER + EVENT_TYPE CREATE + OBJECT_TYPE PACKAGE BODY + DDL_EVENT CREATE PACKAGE BODY + OBJECT_NAME PG_TEST + Records affected: 100 + + + REPLICATION_MODE REPLICA + ID + DDL_TRIGGER_NAME + EVENT_TYPE + OBJECT_TYPE + DDL_EVENT + OBJECT_NAME + Records affected: 1 + + Start removing objects + Finish. Total objects removed + + POINT-2 FOUND message about replicated segment. + +""" + +@pytest.mark.version('>=4.0') +@pytest.mark.platform('Windows') +def test_1(act_1: Action): + pytest.fail("Test not IMPLEMENTED") + + diff --git a/tests/functional/replication/test_duplicates_in_rw_replica_after_conflicting_insert.py b/tests/functional/replication/test_duplicates_in_rw_replica_after_conflicting_insert.py new file mode 100644 index 00000000..b357a46d --- /dev/null +++ b/tests/functional/replication/test_duplicates_in_rw_replica_after_conflicting_insert.py @@ -0,0 +1,450 @@ +#coding:utf-8 +# +# id: tests.functional.replication.duplicates_in_rw_replica_after_conflicting_insert +# title: Conflicting INSERT propagated into a read-write replica may cause duplicate records to appear +# decription: +# See: https://github.com/FirebirdSQL/firebird/issues/6849 +# +# Test temporary changes mode of replica using external call: gfix -replica read_write ... +# We create table on master with integer column (PK) and text field that allows to see who is "author" of this record. +# Then we add one record (1,'master, initially') and do commit. +# +# After this we wait until replica becomes actual to master, and this delay will last no more then threshold +# that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). +# During this delay, we check every second for replication log and search there line with number of last generated +# segment (which was replicated and deleting finally). +# We can assume that replication finished OK only when such line is found see ('POINT-1'). +# +# Then we open two FDB-connections and add records: +# 1) first, in replica: (2, 'RW-replica') + commit; +# 2) second, in master with ID that conflicts with just added record in replica: (2, 'master, finally') + commit. +# +# Message "Record being inserted into table TEST already exists, updating instead" will appear after this in replication log. +# We have to wait again until replica becomes actual to master (see above). +# After this we query data from table 'TEST' on *replica* DB. This table must have onl two records: +# (ID = 1, WHO_MADE = 'master, initially') and (ID = 2, WHO_MADE = 'master, finally'). +# Record (2, 'RW-replica') must be overwritten! +# +# Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). +# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). +# +# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). +# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, +# thus metadata difference must not be issued. +# +# #################### +# ### CRUCIAL NOTE ### +# #################### +# Currently, 25.06.2021, there is bug in FB 4.x and 5.x which can be seen on SECOND run of this test: message with text +# "ERROR: Record format with length 68 is not found for table TEST" will appear in it after inserting 1st record in master. +# The reason of that is "dirty" pages that remain in RDB$RELATION_FIELDS both on master and replica after dropping table. +# Following query show different data that appear in replica DB on 1st and 2nd run (just after table was created on master): +# ======= +# set blobdisplay 6; +# select rdb$descriptor as fmt_descr +# from rdb$formats natural join rdb$relations where rdb$relation_name = 'TEST'; +# ======= +# This bug was explained by dimitr, see letters 25.06.2021 11:49 and 25.06.2021 16:56. +# It will be fixed later. +# +# The only workaround to solve this problem is to make SWEEP after all DB objects have been dropped. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# !NB! BOTH master and replica must be cleaned up by sweep! +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# ################ +# ### N O T E ### +# ################ +# Test assumes that master and replica DB have been created beforehand. +# Also, it assumes that %FB_HOME% +# eplication.conf has been prepared with apropriate parameters for replication. +# Particularly, name of directories and databases must have info about checked FB major version and ServerMode. +# * verbose = true // in order to find out line with message that required segment was replicated +# * section for master database with specified parameters: +# journal_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.journal" +# journal_archive_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.archive" +# journal_archive_command = "copy $(pathname) $(archivepathname)" +# journal_archive_timeout = 10 +# * section for replica database with specified parameter: +# journal_source_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.archive" +# +# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: +# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) +# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) +# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) +# +# These two databases must NOT be dropped in any of tests related to replication! +# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode +# and make cleanup after it, i.e. when all tests will be completed. +# +# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. +# +# Temporary comment. For debug purpoces: +# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); +# 2) copy file %fbt-repo% ests +# unctional abloidatches\\setup-fb-for-replication.bat.txt +# to some place and rename it "*.bat"; +# 3) open this .bat in editor and asjust value of 'fbt_repo' variable; +# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] +# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: +# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc +# 5) batch 'setup-fb-for-replication.bat' will: +# * stop selected FB instance +# * create test databases (in !fbt_repo! mp\\); +# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); +# * replace %fb_home% +# eplication.conf with apropriate +# * start selected FB instance +# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): +# %fpt_repo% +# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc +# +# Confirmed bug on 5.0.0.63, 4.0.0.2508 (date of both snapshots: 08-jun-2021, i.e. just before fix). +# Checked on: +# 5.0.0.85 SS: 34.951s. +# 5.0.0.85 CS: 36.813s. +# 4.0.1.2520 SS: 38.939s. +# 4.0.1.2519 CS: 32.376s. +# +# tracker_id: +# min_versions: ['4.0'] +# versions: 4.0 +# qmid: None + +import pytest +from firebird.qa import db_factory, python_act, Action + +# version: 4.0 +# resources: None + +substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR')] + +init_script_1 = """""" + +db_1 = db_factory(sql_dialect=3, init=init_script_1) + +# test_script_1 +#--- +# +# import os +# import subprocess +# import re +# import difflib +# import shutil +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# ##################################### +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65 +# ##################################### +# +# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) +# FB_HOME = svc.get_home_directory() +# svc.close() +# +# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float +# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' +# +# cur = db_conn.cursor() +# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") +# server_mode = 'XX' +# for r in cur: +# if r[0] == 'Super': +# server_mode = 'SS' +# elif r[0] == 'SuperClassic': +# server_mode = 'SC' +# elif r[0] == 'Classic': +# server_mode = 'CS' +# cur.close() +# +# # 'fbt-main.fb50.ss.fdb' etc: +# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) +# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') +# +# runProgram('gfix', ['-replica', 'read_write', 'localhost:' + db_repl]) +# +# # Folders for journalling and archieving segments. +# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) +# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for i in range(len( f_names_list )): +# if type(f_names_list[i]) == file: +# del_name = f_names_list[i].name +# elif type(f_names_list[i]) == str: +# del_name = f_names_list[i] +# else: +# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): +# +# global re +# global difflib +# global time +# +# replold_lines = [] +# with open( os.path.join(fb_home,'replication.log'), 'r') as f: +# replold_lines = f.readlines() +# +# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) +# cur = con.cursor() +# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") +# for r in cur: +# last_generated_repl_segment = r[0] +# cur.close() +# con.close() +# +# #print('last_generated_repl_segment:', last_generated_repl_segment) +# +# # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file +# p=re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) +# +# found_required_message = False +# for i in range(0,max_allowed_time_for_wait): +# time.sleep(1) +# +# # Get content of fb_home replication.log _after_ isql finish: +# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') +# diff_data = difflib.unified_diff( +# replold_lines, +# f_repllog_new.readlines() +# ) +# f_repllog_new.close() +# +# for k,d in enumerate(diff_data): +# if p.search(d): +# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' ) +# found_required_message = True +# break +# +# if found_required_message: +# break +# +# if not found_required_message: +# print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait) +# +# #-------------------------------------------- +# +# sql_ddl = ''' set bail on; +# set list on; +# --set blob all; +# set blobdisplay 6; +# +# recreate table test(id int primary key using index test_pk, dts timestamp default 'now', who_made varchar(50)); +# commit; +# insert into test(id, who_made) values(1,'master, initially'); +# commit; +# +# -- for debug only: +# select rdb$get_context('SYSTEM', 'DB_NAME'), rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') as last_generated_repl_segment from rdb$database; +# select +# RDB$DESCRIPTOR as fmt_descr +# from RDB$FORMATS natural join RDB$RELATIONS +# where RDB$RELATION_NAME = 'TEST'; +# quit; +# ''' % locals() +# +# +# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_gh_6849_init.sql'), 'w') +# f_sql_chk.write(sql_ddl) +# flush_and_close( f_sql_chk ) +# +# f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w') +# f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w') +# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err) +# flush_and_close( f_sql_log ) +# flush_and_close( f_sql_err ) +# +# last_generated_repl_segment = 0 +# +# with open(f_sql_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in initial SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1A' ) +# +# con_repl = fdb.connect( dsn = 'localhost:' + db_repl) +# cur_repl = con_repl.cursor() +# cur_repl.execute('insert into test(id,who_made) values(?, ?)', (2, 'RW-replica')) +# +# # con_repl.execute_immediate( "insert into test(id,who_made) values( 2, )" ) +# con_repl.commit() +# cur_repl.close() +# con_repl.close() +# +# con_main = fdb.connect( dsn = 'localhost:' + db_main) +# cur_main = con_main.cursor() +# cur_main.execute('insert into test(id,who_made) values(?, ?)', (2, 'master, finally')) +# #con_main.execute_immediate( "insert into test(id,who_made) values( 2, 'master, finally')" ) +# con_main.commit() +# cur_main.close() +# con_main.close() +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1B' ) +# +# # Here we must check that replica has no duplicates in PK column test.id: +# #################### +# # for debug only: runProgram('isql', ['localhost:' + db_repl, '-nod'], "set list on; set blobdisplay 6; select id, who_made from test order by dts; select rdb$descriptor as fmt_descr from rdb$formats natural join rdb$relations where rdb$relation_name = 'TEST';") +# runProgram('isql', ['localhost:' + db_repl, '-nod'], "set list on; select id, who_made from test order by dts;") +# +# +# runProgram('gfix', ['-replica', 'read_only', 'localhost:' + db_repl]) +# +# # return initial state of master DB: +# # remove all DB objects (tables, views, ...): +# # -------------------------------------------- +# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') +# +# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects-gh_6849.log'), 'w') +# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) +# flush_and_close(f_clean_log) +# flush_and_close(f_clean_err) +# +# with open(f_clean_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in cleanup SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# with open(f_clean_log.name,'r') as f: +# for line in f: +# # show number of dropped objects +# print(line) +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) +# +# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_gh_6849.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_main_meta_sql ) +# +# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_gh_6849.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_repl_meta_sql ) +# +# db_main_meta=open(f_main_meta_sql.name, 'r') +# db_repl_meta=open(f_repl_meta_sql.name, 'r') +# +# diffmeta = ''.join(difflib.unified_diff( +# db_main_meta.readlines(), +# db_repl_meta.readlines() +# )) +# db_main_meta.close() +# db_repl_meta.close() +# +# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_gh_6849.txt'), 'w', buffering = 0) +# f_meta_diff.write(diffmeta) +# flush_and_close( f_meta_diff ) +# +# # Following must issue only TWO rows: +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ +# # Only thes lines will be suppressed further (see subst. section): +# with open(f_meta_diff.name, 'r') as f: +# for line in f: +# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): +# print('UNEXPECTED METADATA DIFF.: ' + line) +# +# ###################### +# ### A C H T U N G ### +# ###################### +# # MANDATORY, OTHERWISE REPLICATION GETS STUCK ON SECOND RUN OF THIS TEST +# # WITH 'ERROR: Record format with length 68 is not found for table TEST': +# runProgram('gfix', ['-sweep', 'localhost:' + db_repl]) +# runProgram('gfix', ['-sweep', 'localhost:' + db_main]) +# ####################### +# +# #If we skip 'gfix -sweep' then following will be: +# # +# #MASTER and REPLICA, after 1st run: +# #================== +# # Fields: +# # id offset type length sub_type flags +# # --- ------ -------------- ------ -------- ----- +# # 0 4 9 LONG 4 0 0x00 +# # 1 8 16 TIMESTAMP 8 0 0x00 +# # 2 16 3 VARCHAR 52 0 0x00 +# # +# #REPLICA on 2nd run (after creating TEST table on master): +# #=================== +# # Fields: +# # id offset type length sub_type flags +# # --- ------ -------------- ------ -------- ----- +# # 0 8 16 TIMESTAMP 8 0 0x00 +# # 1 16 3 VARCHAR 52 0 0x00 +# # 2 68 9 LONG 4 0 0x00 +# +# +# # cleanup: +# ########## +# cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) ) +# +# +#--- +act_1 = python_act('db_1', substitutions=substitutions_1) + +expected_stdout_1 = """ + POINT-1A FOUND message about replicated segment. + POINT-1B FOUND message about replicated segment. + + ID 1 + WHO_MADE master, initially + ID 2 + WHO_MADE master, finally + + Start removing objects + Finish. Total objects removed + + POINT-2 FOUND message about replicated segment. +""" + +@pytest.mark.version('>=4.0') +@pytest.mark.platform('Windows') +def test_1(act_1: Action): + pytest.fail("Test not IMPLEMENTED") + + diff --git a/tests/functional/replication/test_generator_could_not_be_transferred_to_replica.py b/tests/functional/replication/test_generator_could_not_be_transferred_to_replica.py new file mode 100644 index 00000000..d98ed1c9 --- /dev/null +++ b/tests/functional/replication/test_generator_could_not_be_transferred_to_replica.py @@ -0,0 +1,383 @@ +#coding:utf-8 +# +# id: tests.functional.replication.generator_could_not_be_transferred_to_replica +# title: Generator values may not replicate after commit +# decription: +# https://github.com/FirebirdSQL/firebird/issues/6848 +# +# Test creates table and two sequences: 'g_1' and 'g_2'. +# Then we add record in the table and use 'g_2' (i.e. sequence which was created LAST). +# After this we do update record and use 'g_1' (seq. which was created FIRST). +# +# Then we wait until replica becomes actual to master, and this delay will last no more then threshold +# that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). +# During this delay, we check every second for replication log and search there line with number of last generated +# segment (which was replicated and deleting finally). +# We can assume that replication finished OK only when such line is found see ('POINT-1'). +# +# After this, we do query master and replica databases and obtain count() for table and values of sequences. +# Obtained values must be identical on master and replica. +# +# Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). +# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). +# +# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). +# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, +# thus metadata difference must not be issued. +# +# ################ +# ### N O T E ### +# ################ +# Test assumes that master and replica DB have been created beforehand. +# Also, it assumes that %FB_HOME% +# eplication.conf has been prepared with apropriate parameters for replication. +# Particularly, name of directories and databases must have info about checked FB major version and ServerMode. +# * verbose = true // in order to find out line with message that required segment was replicated +# * section for master database with specified parameters: +# journal_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.journal" +# journal_archive_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.archive" +# journal_archive_command = "copy $(pathname) $(archivepathname)" +# journal_archive_timeout = 10 +# * section for replica database with specified parameter: +# journal_source_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.archive" +# +# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: +# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) +# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) +# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) +# +# These two databases must NOT be dropped in any of tests related to replication! +# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode +# and make cleanup after it, i.e. when all tests will be completed. +# +# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. +# +# Temporary comment. For debug purpoces: +# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); +# 2) copy file %fbt-repo% ests +# unctional abloidatches\\setup-fb-for-replication.bat.txt +# to some place and rename it "*.bat"; +# 3) open this .bat in editor and asjust value of 'fbt_repo' variable; +# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] +# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: +# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc +# 5) batch 'setup-fb-for-replication.bat' will: +# * stop selected FB instance +# * create test databases (in !fbt_repo! mp\\); +# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); +# * replace %fb_home% +# eplication.conf with apropriate +# * start selected FB instance +# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): +# %fpt_repo% +# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc +# +# Confirmed bug on 4.0.0.2465: sequence with least ID ('g_1') is not replicated, its value on replica remains 0. +# Checked on: +# 4.0.1.2519 SS; 5.0.0.82 SS/CS. +# +# tracker_id: +# min_versions: ['4.0'] +# versions: 4.0 +# qmid: None + +import pytest +from firebird.qa import db_factory, python_act, Action + +# version: 4.0 +# resources: None + +substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', '')] + +init_script_1 = """""" + +db_1 = db_factory(sql_dialect=3, init=init_script_1) + +# test_script_1 +#--- +# +# import os +# import subprocess +# import re +# import difflib +# import shutil +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# ##################################### +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65 +# ##################################### +# +# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) +# FB_HOME = svc.get_home_directory() +# svc.close() +# +# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float +# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' +# +# cur = db_conn.cursor() +# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") +# server_mode = 'XX' +# for r in cur: +# if r[0] == 'Super': +# server_mode = 'SS' +# elif r[0] == 'SuperClassic': +# server_mode = 'SC' +# elif r[0] == 'Classic': +# server_mode = 'CS' +# cur.close() +# +# # 'fbt-main.fb50.ss.fdb' etc: +# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) +# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') +# +# # Folders for journalling and archieving segments. +# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) +# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for i in range(len( f_names_list )): +# if type(f_names_list[i]) == file: +# del_name = f_names_list[i].name +# elif type(f_names_list[i]) == str: +# del_name = f_names_list[i] +# else: +# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): +# +# global re +# global difflib +# global time +# +# replold_lines = [] +# with open( os.path.join(fb_home,'replication.log'), 'r') as f: +# replold_lines = f.readlines() +# +# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) +# cur = con.cursor() +# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") +# for r in cur: +# last_generated_repl_segment = r[0] +# cur.close() +# con.close() +# +# #print('last_generated_repl_segment:', last_generated_repl_segment) +# +# +# # +IMAGE-PC1 (replica) Fri Jun 11 17:57:01 2021 +# # + Database: C:\\FBTESTING\\QA\\FBT-REPO\\TMP\\FBT-REPL.FB50.FDB +# # + VERBOSE: Added 1 segment(s) to the processing queue +# # + +# # +IMAGE-PC1 (replica) Fri Jun 11 17:57:04 2021 +# # + Database: C:\\FBTESTING\\QA\\FBT-REPO\\TMP\\FBT-REPL.FB50.FDB +# # + VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file +# +# p=re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) +# +# found_required_message = False +# for i in range(0,max_allowed_time_for_wait): +# time.sleep(1) +# +# # Get content of fb_home replication.log _after_ isql finish: +# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') +# diff_data = difflib.unified_diff( +# replold_lines, +# f_repllog_new.readlines() +# ) +# f_repllog_new.close() +# +# for k,d in enumerate(diff_data): +# if p.search(d): +# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' ) +# found_required_message = True +# break +# +# if found_required_message: +# break +# +# if not found_required_message: +# print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait) +# +# #-------------------------------------------- +# +# sql_ddl = ''' set bail on; +# set list on; +# +# select mon$database_name from mon$database; +# recreate sequence g_1; +# recreate sequence g_2; +# recreate table test(id int primary key); +# commit; +# +# select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') as last_generated_repl_segment from rdb$database; +# quit; +# ''' % locals() +# +# +# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_chk_skip_gen_repl.sql'), 'w') +# f_sql_chk.write(sql_ddl) +# flush_and_close( f_sql_chk ) +# +# f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w') +# f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w') +# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err) +# flush_and_close( f_sql_log ) +# flush_and_close( f_sql_err ) +# +# last_generated_repl_segment = 0 +# +# with open(f_sql_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in initial SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors +# +# # Test connect to master DB, just to fire DB-level triggers: +# ########################### +# con1 = fdb.connect( dsn = 'localhost:' + db_main) +# con1.execute_immediate( 'insert into test(id) values( gen_id(g_2, 1) )' ) +# con1.execute_immediate( 'update test set id = id + gen_id(g_1, 1)' ) +# con1.commit() +# con1.close() +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' ) +# +# runProgram('isql', ['localhost:' + db_main, '-nod'], 'set list on; select count(*) test_rows_on_main, gen_id(g_1,0) as g1_curr_val_on_main, gen_id(g_2,0) as g2_curr_val_on_main from test;') +# runProgram('isql', ['localhost:' + db_repl, '-nod'], 'set list on; select count(*) test_rows_on_repl, gen_id(g_1,0) as g1_curr_val_on_repl, gen_id(g_2,0) as g2_curr_val_on_repl from test;') +# +# +# # return initial state of master DB: +# # remove all DB objects (tables, views, ...): +# # -------------------------------------------- +# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') +# +# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects.log'), 'w') +# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) +# flush_and_close(f_clean_log) +# flush_and_close(f_clean_err) +# +# with open(f_clean_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in cleanup SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# with open(f_clean_log.name,'r') as f: +# for line in f: +# # show number of dropped objects +# print(line) +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) +# +# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_skip_gen_repl.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_main_meta_sql ) +# +# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_skip_gen_repl.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_repl_meta_sql ) +# +# db_main_meta=open(f_main_meta_sql.name, 'r') +# db_repl_meta=open(f_repl_meta_sql.name, 'r') +# +# diffmeta = ''.join(difflib.unified_diff( +# db_main_meta.readlines(), +# db_repl_meta.readlines() +# )) +# db_main_meta.close() +# db_repl_meta.close() +# +# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_skip_gen_repl.txt'), 'w', buffering = 0) +# f_meta_diff.write(diffmeta) +# flush_and_close( f_meta_diff ) +# +# # Following must issue only TWO rows: +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ +# # Only thes lines will be suppressed further (see subst. section): +# with open(f_meta_diff.name, 'r') as f: +# for line in f: +# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): +# print('UNEXPECTED METADATA DIFF.: ' + line) +# +# ###################### +# ### A C H T U N G ### +# ###################### +# # MANDATORY, OTHERWISE REPLICATION MAY GET STUCK ON TESTS THAT ARE EXECUTED AFTER THIS +# # (e.g. on "updating-blob-with-empty-string-stops-replication") +# # with 'ERROR: Record format with length is not found for table TEST': +# runProgram('gfix', ['-sweep', 'localhost:' + db_repl]) +# runProgram('gfix', ['-sweep', 'localhost:' + db_main]) +# ####################### +# +# # cleanup: +# ########## +# cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) ) +# +# +#--- +act_1 = python_act('db_1', substitutions=substitutions_1) + +expected_stdout_1 = """ + POINT-1 FOUND message about replicated segment. + + TEST_ROWS_ON_MAIN 1 + G1_CURR_VAL_ON_MAIN 1 + G2_CURR_VAL_ON_MAIN 1 + + TEST_ROWS_ON_REPL 1 + G1_CURR_VAL_ON_REPL 1 + G2_CURR_VAL_ON_REPL 1 + + Start removing objects + Finish. Total objects removed + POINT-2 FOUND message about replicated segment. +""" + +@pytest.mark.version('>=4.0') +@pytest.mark.platform('Windows') +def test_1(act_1: Action): + pytest.fail("Test not IMPLEMENTED") + + diff --git a/tests/functional/replication/test_invalid_msg_if_target_db_has_no_replica_flag.py b/tests/functional/replication/test_invalid_msg_if_target_db_has_no_replica_flag.py new file mode 100644 index 00000000..94858160 --- /dev/null +++ b/tests/functional/replication/test_invalid_msg_if_target_db_has_no_replica_flag.py @@ -0,0 +1,445 @@ +#coding:utf-8 +# +# id: tests.functional.replication.invalid_msg_if_target_db_has_no_replica_flag +# title: Invalid message in replication.log (and possibly crash in the case of synchronous replication) when the target DB has no its "replica" flag set +# decription: +# See: https://github.com/FirebirdSQL/firebird/issues/6989 +# +# Test changes replica DB attribute (removes 'replica' flag). Then we do some trivial DDL on master (create and drop table). +# Log of replication must soon contain *two* phrases: +# 1. VERBOSE: Added 1 segment(s) to the processing queue +# 2. ERROR: Database is not in the replica mode +# If any of these phrases absent then we have bug. +# +# Otherwise we continue and return attribute 'replica' to the target DB. After this replication log must contain phrase: +# VERBOSE: Segment ( bytes) is replicated in ms, deleting the file. +# We can assume that replication finished OK only when such line is found see ('POINT-1'). +# +# Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). +# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). +# +# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). +# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, +# thus metadata difference must not be issued. +# +# #################### +# ### CRUCIAL NOTE ### +# #################### +# Currently there is bug in FB 4.x and 5.x which can be seen on SECOND run of this test: message with text +# "ERROR: Record format with length 68 is not found for table TEST" will appear in it after inserting 1st record in master. +# The reason of that is "dirty" pages that remain in RDB$RELATION_FIELDS both on master and replica after dropping table. +# Following query show different data that appear in replica DB on 1st and 2nd run (just after table was created on master): +# ======= +# set blobdisplay 6; +# select rdb$descriptor as fmt_descr +# from rdb$formats natural join rdb$relations where rdb$relation_name = 'TEST'; +# ======= +# This bug was explained by dimitr, see letters 25.06.2021 11:49 and 25.06.2021 16:56. +# It will be fixed later. +# +# The only workaround to solve this problem is to make SWEEP after all DB objects have been dropped. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# !NB! BOTH master and replica must be cleaned up by sweep! +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# ################ +# ### N O T E ### +# ################ +# Test assumes that master and replica DB have been created beforehand. +# Also, it assumes that %FB_HOME% +# eplication.conf has been prepared with apropriate parameters for replication. +# Particularly, name of directories and databases must have info about checked FB major version and ServerMode. +# * verbose = true // in order to find out line with message that required segment was replicated +# * section for master database with specified parameters: +# journal_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.journal" +# journal_archive_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.archive" +# journal_archive_command = "copy $(pathname) $(archivepathname)" +# journal_archive_timeout = 10 +# * section for replica database with specified parameter: +# journal_source_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.archive" +# +# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: +# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) +# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) +# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) +# +# These two databases must NOT be dropped in any of tests related to replication! +# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode +# and make cleanup after it, i.e. when all tests will be completed. +# +# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. +# +# Temporary comment. For debug purpoces: +# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); +# 2) copy file %fbt-repo% ests +# unctional abloidatches\\setup-fb-for-replication.bat.txt +# to some place and rename it "*.bat"; +# 3) open this .bat in editor and asjust value of 'fbt_repo' variable; +# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] +# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: +# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc +# 5) batch 'setup-fb-for-replication.bat' will: +# * stop selected FB instance +# * create test databases (in !fbt_repo! mp\\); +# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); +# * replace %fb_home% +# eplication.conf with apropriate +# * start selected FB instance +# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): +# %fpt_repo% +# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc +# +# Checked on: WI-T5.0.0.257; WI-V4.0.1.2631 (both SS/CS). +# +# +# tracker_id: +# min_versions: ['4.0.1'] +# versions: 4.0.1 +# qmid: None + +import pytest +from firebird.qa import db_factory, python_act, Action + +# version: 4.0.1 +# resources: None + +substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR'), ('[ \t]+', ' ')] + +init_script_1 = """""" + +db_1 = db_factory(sql_dialect=3, init=init_script_1) + +# test_script_1 +#--- +# +# import os +# import subprocess +# import re +# import difflib +# import shutil +# import time +# from datetime import datetime +# from datetime import timedelta +# +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# # NB: with default values of 'apply_idle_timeout' and 'apply_error_timeout' (10 and 60 s) +# # total time of this test is about 130...132s. +# ##################################### +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65 +# MAX_TIME_FOR_WAIT_ERR_MSG_IN_LOG = 65 +# ##################################### +# +# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) +# FB_HOME = svc.get_home_directory() +# svc.close() +# +# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float +# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' +# +# cur = db_conn.cursor() +# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") +# server_mode = 'XX' +# for r in cur: +# if r[0] == 'Super': +# server_mode = 'SS' +# elif r[0] == 'SuperClassic': +# server_mode = 'SC' +# elif r[0] == 'Classic': +# server_mode = 'CS' +# cur.close() +# +# # 'fbt-main.fb50.ss.fdb' etc: +# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) +# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') +# +# # Folders for journalling and archieving segments. +# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) +# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) +# +# runProgram('gfix', ['-replica', 'none', 'localhost:' + db_repl]) +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for i in range(len( f_names_list )): +# if type(f_names_list[i]) == file: +# del_name = f_names_list[i].name +# elif type(f_names_list[i]) == str: +# del_name = f_names_list[i] +# else: +# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): +# global re +# global difflib +# global time +# +# # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:- +# def check_pattern_in_log( log_lines, pattern, prefix_msg = '' ): +# found_required_message = False +# for d in log_lines: +# if pattern.search(d): +# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' ) +# found_required_message = True +# break +# return found_required_message +# # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:- +# +# replold_lines = [] +# with open( os.path.join(fb_home,'replication.log'), 'r') as f: +# replold_lines = f.readlines() +# +# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) +# cur = con.cursor() +# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") +# for r in cur: +# last_generated_repl_segment = r[0] +# cur.close() +# con.close() +# +# # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file +# segment_replicated_pattern=re.compile( 'verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) +# +# # 08.09.2021: replication content can remain unchanged if there was no user-defined object in DB that must be dropped! +# # Because of this, it is crucial to check OLD content of replication log before loop. +# # Also, segment_replicated_pattern must NOT start from '\\+' because it can occur only for diff_data (within loop): +# # +# found_required_message = check_pattern_in_log( replold_lines, segment_replicated_pattern, prefix_msg ) +# +# if not found_required_message: +# +# for i in range(0,max_allowed_time_for_wait): +# time.sleep(1) +# +# # Get content of fb_home replication.log _after_ isql finish: +# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') +# diff_data = difflib.unified_diff( +# replold_lines, +# f_repllog_new.readlines() +# ) +# f_repllog_new.close() +# +# found_required_message = check_pattern_in_log( diff_data, segment_replicated_pattern, prefix_msg ) +# if found_required_message: +# break +# +# if not found_required_message: +# print('UNEXPECTED RESULT: no message about replicated segment No. %d for %d seconds.' % (int(last_generated_repl_segment), max_allowed_time_for_wait) ) +# +# +# #-------------------------------------------- +# +# def wait_for_required_msg_in_log( fb_home, required_pattern, db_main, max_allowed_time_for_wait, prefix_msg = '' ): +# +# global re +# global difflib +# global time +# global datetime +# +# +# # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:- +# def check_pattern_in_log( log_lines, pattern, min_timestamp, prefix_msg = '' ): +# found_required_message = False +# for i,r in enumerate(log_lines): +# if pattern.search(r): +# if i>=2 and log_lines[i-2]: +# # a = r.replace('(',' ').split() +# a = log_lines[i-2].split() +# if len(a)>=4: +# # s='replica_host_name (slave) Sun May 30 17:46:43 2021' +# # s.split()[-5:] ==> ['Sun', 'May', '30', '17:46:43', '2021'] +# # ' '.join( ...) ==> 'Sun May 30 17:46:43 2021' +# dts = ' '.join( log_lines[i-2].split()[-5:] ) +# msg_timestamp = datetime.strptime( dts, '%a %b %d %H:%M:%S %Y') +# if msg_timestamp >= min_timestamp: +# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND required message after given timestamp.') #, 'msg_timestamp=%s' % msg_timestamp, '; min_timestamp=%s' % min_timestamp ) +# found_required_message = True +# break +# return found_required_message +# +# # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:- +# +# replold_lines = [] +# with open( os.path.join(fb_home,'replication.log'), 'r') as f: +# replold_lines = f.readlines() +# +# +# ################################## +# ### A.C.H.T.U.N.G ### +# ### do NOT use datetime.now() ### +# ### because of missed accuracy ### +# ### of timestamps in repl.log ### +# ### (it is HH:MM:SS only) ### +# ################################## +# current_date_with_hhmmss = datetime.today().replace(microsecond=0) +# +# runProgram('isql', ['localhost:' + db_main], 'create table test(id int primary key); drop table test;') +# +# for i in range(0,max_allowed_time_for_wait): +# time.sleep(1) +# +# # Get content of fb_home replication.log _after_ isql finish: +# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') +# diff_data = difflib.unified_diff( +# replold_lines, +# f_repllog_new.readlines() +# ) +# f_repllog_new.close() +# +# found_required_message = check_pattern_in_log( list(diff_data), required_pattern, current_date_with_hhmmss, prefix_msg ) +# if found_required_message: +# break +# +# if not found_required_message: +# print('UNEXPECTED RESULT: required message NOT found after %s for %d seconds.' % (current_date_with_hhmmss, max_allowed_time_for_wait)) +# +# return found_required_message +# #-------------------------------------------- +# +# +# ####################################################################### +# ### Make trivial changes in the master (CREATE / DROP table) and # ### +# ### check that "ERROR: Database is not in the replica mode" appears ### +# ### in replication log aftere this, for MAX_SECONDS_WAIT4_MSG... ### +# ####################################################################### +# not_in_replica_mode_pattern=re.compile( 'ERROR: Database is not in the replica mode', re.IGNORECASE) +# found_expected_err_msg = wait_for_required_msg_in_log( FB_HOME, not_in_replica_mode_pattern, db_main, MAX_TIME_FOR_WAIT_ERR_MSG_IN_LOG, 'POINT-A' ) +# +# ''' +# # temp, 4debug only: try this if framework will not able to drop database (Classic only): +# fdb_tmp=os.path.join(context['temp_directory'],'tmp_gh_6989.tmp.fdb') +# runProgram('gfix', ['-shut', 'full', '-force', '0', 'localhost:' + db_repl]) +# shutil.move(db_repl, fdb_tmp) +# runProgram('gfix', ['-online', 'localhost:' + fdb_tmp]) +# runProgram('gfix', ['-replica', 'read_only', 'localhost:' + fdb_tmp]) +# shutil.move(fdb_tmp, db_repl) +# ''' +# +# runProgram('gfix', ['-replica', 'read_only', 'localhost:' + db_repl]) +# +# if found_expected_err_msg: +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' ) +# +# +# # return initial state of master DB: +# # remove all DB objects (tables, views, ...): +# # -------------------------------------------- +# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') +# +# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects-gh_6989.log'), 'w') +# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) +# flush_and_close(f_clean_log) +# flush_and_close(f_clean_err) +# +# with open(f_clean_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in cleanup SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# with open(f_clean_log.name,'r') as f: +# for line in f: +# # show number of dropped objects +# print(line) +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> previous SQL script finished w/o errors +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) +# +# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_gh_6989.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_main_meta_sql ) +# +# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_gh_6989.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_repl_meta_sql ) +# +# db_main_meta=open(f_main_meta_sql.name, 'r') +# db_repl_meta=open(f_repl_meta_sql.name, 'r') +# +# diffmeta = ''.join(difflib.unified_diff( +# db_main_meta.readlines(), +# db_repl_meta.readlines() +# )) +# db_main_meta.close() +# db_repl_meta.close() +# +# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_gh_6989.txt'), 'w', buffering = 0) +# f_meta_diff.write(diffmeta) +# flush_and_close( f_meta_diff ) +# +# # Following must issue only TWO rows: +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ +# # Only thes lines will be suppressed further (see subst. section): +# with open(f_meta_diff.name, 'r') as f: +# for line in f: +# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): +# print('UNEXPECTED METADATA DIFF.: ' + line) +# +# ###################### +# ### A C H T U N G ### +# ###################### +# # MANDATORY, OTHERWISE REPLICATION GETS STUCK ON SECOND RUN OF THIS TEST +# # WITH 'ERROR: Record format with length 68 is not found for table TEST': +# runProgram('gfix', ['-sweep', 'localhost:' + db_repl]) +# runProgram('gfix', ['-sweep', 'localhost:' + db_main]) +# ####################### +# +# # cleanup: +# ########## +# cleanup( (f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) ) +# +#--- +act_1 = python_act('db_1', substitutions=substitutions_1) + +expected_stdout_1 = """ + POINT-A FOUND required message after given timestamp. + POINT-1 FOUND message about replicated segment. + Start removing objects + Finish. Total objects removed: 0 + POINT-2 FOUND message about replicated segment. +""" + +@pytest.mark.version('>=4.0.1') +@pytest.mark.platform('Windows') +def test_1(act_1: Action): + pytest.fail("Test not IMPLEMENTED") + + diff --git a/tests/functional/replication/test_oltp_emul_ddl.py b/tests/functional/replication/test_oltp_emul_ddl.py new file mode 100644 index 00000000..6982ce53 --- /dev/null +++ b/tests/functional/replication/test_oltp_emul_ddl.py @@ -0,0 +1,580 @@ +#coding:utf-8 +# +# id: tests.functional.replication.oltp_emul_ddl +# title: Applying full DDL from OLTP-EMUL test on master with further check replica +# decription: +# +# ################ +# ### N O T E ### +# ################ +# Test assumes that master and replica DB have been created beforehand. +# Also, it assumes that %FB_HOME% +# eplication.conf has been prepared with apropriate parameters for replication. +# Particularly, name of directories and databases must have info about checked FB major version and ServerMode. +# * verbose = true // in order to find out line with message that required segment was replicated +# * section for master database with specified parameters: +# journal_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.journal" +# journal_archive_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.archive" +# journal_archive_command = "copy $(pathname) $(archivepathname)" +# journal_archive_timeout = 10 +# * section for replica database with specified parameter: +# journal_source_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.archive" +# +# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: +# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) +# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) +# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) +# +# These two databases must NOT be dropped in any of tests related to replication! +# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode +# and make cleanup after it, i.e. when all tests will be completed. +# +# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. +# +# Temporary comment. For debug purpoces: +# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); +# 2) copy file %fbt-repo% ests +# unctional abloidatches\\setup-fb-for-replication.bat.txt +# to some place and rename it "*.bat"; +# 3) open this .bat in editor and asjust value of 'fbt_repo' variable; +# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] +# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: +# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc +# 5) batch 'setup-fb-for-replication.bat' will: +# * stop selected FB instance +# * create test databases (in !fbt_repo! mp\\); +# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); +# * replace %fb_home% +# eplication.conf with apropriate +# * start selected FB instance +# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): +# %fpt_repo% +# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc +# +# +# tracker_id: +# min_versions: ['4.0'] +# versions: 4.0 +# qmid: None + +import pytest +from firebird.qa import db_factory, python_act, Action + +# version: 4.0 +# resources: None + +substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', '')] + +init_script_1 = """""" + +db_1 = db_factory(sql_dialect=3, init=init_script_1) + +# test_script_1 +#--- +# +# import os +# import subprocess +# import re +# import zipfile +# import difflib +# import shutil +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# ##################################### +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65 +# ##################################### +# +# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) +# FB_HOME = svc.get_home_directory() +# svc.close() +# +# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float +# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' +# +# cur = db_conn.cursor() +# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") +# server_mode = 'XX' +# for r in cur: +# if r[0] == 'Super': +# server_mode = 'SS' +# elif r[0] == 'SuperClassic': +# server_mode = 'SC' +# elif r[0] == 'Classic': +# server_mode = 'CS' +# cur.close() +# +# # 'fbt-main.fb50.ss.fdb' etc: +# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) +# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') +# +# # Folders for journalling and archieving segments. +# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) +# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) +# +# fb_port = 0 +# cur = db_conn.cursor() +# cur.execute("select rdb$config_value from rdb$config where rdb$config_name = 'RemoteServicePort'") +# for r in cur: +# fb_port = int(r[0]) +# cur.close() +# +# db_conn.close() +# +# runProgram('gfix', ['-w', 'async', 'localhost:' + db_main]) +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for i in range(len( f_names_list )): +# if type(f_names_list[i]) == file: +# del_name = f_names_list[i].name +# elif type(f_names_list[i]) == str: +# del_name = f_names_list[i] +# else: +# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): +# +# global re +# global difflib +# global time +# +# replold_lines = [] +# with open( os.path.join(fb_home,'replication.log'), 'r') as f: +# replold_lines = f.readlines() +# +# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) +# cur = con.cursor() +# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") +# for r in cur: +# last_generated_repl_segment = r[0] +# cur.close() +# con.close() +# +# #print('last_generated_repl_segment:', last_generated_repl_segment) +# +# # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file +# p_successfully_replicated = re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) +# +# # VERBOSE: Segment 16 replication failure at offset 33628 +# p_replication_failure = re.compile('segment\\s+\\d+\\s+replication\\s+failure', re.IGNORECASE) +# +# # ERROR: Record format with length 56 is not found for table PERF_ESTIMATED +# p_rec_format_not_found = re.compile('record\\s+format\\s+with\\s+length\\s+\\d+\\s+is\\s+not\\s+found', re.IGNORECASE) +# +# found_required_message = False +# +# found_replfail_message = False +# found_recformat_message = False +# found_common_error_msg = False +# +# for i in range(0,max_allowed_time_for_wait): +# time.sleep(1) +# +# # Get content of fb_home replication.log _after_ isql finish: +# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') +# diff_data = difflib.unified_diff( +# replold_lines, +# f_repllog_new.readlines() +# ) +# f_repllog_new.close() +# +# for k,d in enumerate(diff_data): +# +# if p_replication_failure.search(d): +# print( (prefix_msg + ' ' if prefix_msg else '') + '@@@ SEGMENT REPLICATION FAILURE @@@ ' + d ) +# found_replfail_message = True +# break +# +# if p_rec_format_not_found.search(d): +# print( (prefix_msg + ' ' if prefix_msg else '') + '@@@ RECORD FORMAT NOT FOUND @@@ ' + d ) +# found_recformat_message = True +# break +# +# if 'ERROR:' in d: +# print( (prefix_msg + ' ' if prefix_msg else '') + '@@@ REPLICATION ERROR @@@ ' + d ) +# found_common_error_msg = True +# break +# +# if p_successfully_replicated.search(d): +# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' ) +# found_required_message = True +# break +# +# if found_required_message or found_replfail_message or found_recformat_message or found_common_error_msg: +# break +# +# if not found_required_message: +# print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait) +# +# #-------------------------------------------- +# +# def generate_sync_settings_sql(db_main, fb_port): +# +# def generate_inject_setting_sql(working_mode, mcode, new_value, allow_insert_if_eof = 0): +# sql_inject_setting = '' +# if allow_insert_if_eof == 0: +# sql_inject_setting = ''' +# update settings set svalue = %(new_value)s +# where working_mode = upper('%(working_mode)s') and mcode = upper('%(mcode)s'); +# if (row_count = 0) then +# exception ex_record_not_found using('SETTINGS', q'{working_mode = upper('%(working_mode)s') and mcode = upper('%(mcode)s')}'); +# ''' % locals() +# else: +# sql_inject_setting = ''' +# update or insert into settings(working_mode, mcode, svalue) +# values( upper('%(working_mode)s'), upper('%(mcode)s'), %(new_value)s ) +# matching (working_mode, mcode); +# ''' % locals() +# +# return sql_inject_setting +# +# +# sql_adjust_settings_table = ''' +# set list on; +# select 'Adjust settings: start at ' || cast('now' as timestamp) as msg from rdb$database; +# set term ^; +# execute block as +# begin +# ''' +# +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'working_mode', "upper('small_03')" ) ) ) +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'enable_mon_query', "'0'" ) ) ) +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'unit_selection_method', "'random'" ) ) ) +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'build_with_split_heavy_tabs', "'1'" ) ) ) +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'build_with_qd_compound_ordr', "lower('most_selective_first')" ) ) ) +# +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'build_with_separ_qdistr_idx', "'0'" ) ) ) +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'used_in_replication', "'1'" ) ) ) +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'separate_workers', "'1'" ) ) ) +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'workers_count', "'100'" ) ) ) +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'update_conflict_percent', "'0'" ) ) ) +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'connect_str', "'connect ''localhost:%(db_main)s'' user ''SYSDBA'' password ''masterkey'';'" % locals(), 1 ) ) ) +# +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'mon_unit_list', "'//'" ) ) ) +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'halt_test_on_errors', "'/CK/'" ) ) ) +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'qmism_verify_bitset', "'1'" ) ) ) +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'recalc_idx_min_interval', "'9999999'" ) ) ) +# +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'warm_time', "'0'", 1 ) ) ) +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'test_intervals', "'10'", 1 ) ) ) +# +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'tmp_worker_role_name', "upper('tmp$oemul$worker')", 1 ) ) ) +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'tmp_worker_user_prefix', "upper('tmp$oemul$user_')", 1 ) ) ) +# +# +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'use_es', "'2'", 1 ) ) ) +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'host', "'localhost'", 1 ) ) ) +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'port', "'%(fb_port)s'" % locals(), 1 ) ) ) +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'usr', "'SYSDBA'", 1 ) ) ) +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'pwd', "'masterkey'", 1 ) ) ) +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'tmp_worker_user_pswd', "'0Ltp-Emu1'", 1 ) ) ) +# +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'conn_pool_support', "'1'", 1 ) ) ) +# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'resetting_support', "'1'", 1 ) ) ) +# +# sql_adjust_settings_table += ''' +# end ^ +# set term ;^ +# commit; +# select 'Adjust settings: finish at ' || cast('now' as timestamp) as msg from rdb$database; +# set list off; +# ''' +# +# return sql_adjust_settings_table +# +# #-------------------------------------------- +# +# # Extract .sql files with OLTP-EMUL DDL for applying +# # ZipFile.extractall(path=None, members=None, pwd=None) +# zf = zipfile.ZipFile( os.path.join(context['files_location'],'oltp-emul-ddl.zip') ) +# src_files = zf.namelist() +# zf.extractall(path = context['temp_directory']) +# zf.close() +# +# #-------------------------------------------- +# +# oltp_emul_initial_scripts = [ +# 'oltp-emul-01_initial_DDL' +# ,'oltp-emul-02_business_units' +# ,'oltp-emul-03_common_units' +# ,'oltp-emul-04_misc_debug_code' +# ,'oltp-emul-05_main_tabs_filling' +# ] +# +# +# #src_dir = context['files_location'] +# src_dir = context['temp_directory'] +# +# sql_apply = '\\n' +# for f in oltp_emul_initial_scripts: +# sql_apply += ' in ' + os.path.join(src_dir, f+'.sql;\\n') +# +# sql_ddl = ''' %(sql_apply)s +# ''' % locals() +# +# #-------------------------------------------- +# +# f_run_initial_ddl = open( os.path.join(context['temp_directory'],'tmp-oltp-emul-ddl.sql'), 'w') +# f_run_initial_ddl.write(sql_ddl) +# +# # Add SQL code for adjust SETTINGS table with values which are commonly used in OLTP-EMUL config: +# f_run_initial_ddl.write( generate_sync_settings_sql(db_main, fb_port) ) +# flush_and_close( f_run_initial_ddl ) +# +# f_run_initial_log = open( ''.join( (os.path.splitext(f_run_initial_ddl.name)[0], '.log' ) ), 'w') +# f_run_initial_err = open( ''.join( (os.path.splitext(f_run_initial_ddl.name)[0], '.err' ) ), 'w') +# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_run_initial_ddl.name ], stdout = f_run_initial_log, stderr = f_run_initial_err) +# +# flush_and_close( f_run_initial_log ) +# flush_and_close( f_run_initial_err ) +# +# with open(f_run_initial_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in initial SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors +# +# post_handling_out = os.path.join( context['temp_directory'],'oltp_split_heavy_tabs.tmp' ) +# post_adjust_sep_wrk_out = os.path.join( context['temp_directory'], 'oltp_adjust_sep_wrk.tmp' ) +# post_adjust_replication = os.path.join( context['temp_directory'], 'post_adjust_repl_pk.tmp' ) +# post_adjust_ext_pool = os.path.join( context['temp_directory'], 'post_adjust_ext_pool.tmp' ) +# post_adjust_eds_perf = os.path.join( context['temp_directory'], 'post_adjust_eds_perf.tmp' ) +# +# oltp_emul_post_handling_scripts = [ +# 'oltp-emul-06_split_heavy_tabs' +# ,'oltp-emul-07_adjust_perf_tabs' +# ,'oltp-emul-08_adjust_repl_ddl' +# ,'oltp-emul-09_adjust_eds_calls' +# ,'oltp-emul-10_adjust_eds_perf' +# ] +# +# +# sql_post_handling = '\\n' +# for f in oltp_emul_post_handling_scripts: +# run_post_handling_sql = os.path.join( src_dir, f+'.sql' ) +# tmp_post_handling_sql = os.path.join( context['temp_directory'], f + '.tmp' ) +# cleanup( (tmp_post_handling_sql,) ) +# sql_post_handling += ''' +# out %(tmp_post_handling_sql)s; +# in %(run_post_handling_sql)s; +# out; +# in %(tmp_post_handling_sql)s; +# ----------------------------- +# ''' % locals() +# +# if 'adjust_eds_calls' in f: +# # We have to make RECONNECT here, otherwise get: +# # Statement failed, SQLSTATE = 2F000 +# # Error while parsing procedure SP_PERF_EDS_LOGGING's BLR +# # -attempted update of read-only column +# # After line 49 in file ... oltp-emul-10_adjust_eds_perf.sql +# +# sql_post_handling += " commit; connect 'localhost:%(db_main)s' user 'SYSDBA' password 'masterkey';" % locals() +# +# oltp_emul_final_actions_scripts = [ +# 'oltp-emul-11_ref_data_filling' +# ,'oltp-emul-12_activate_db_triggers' +# ] +# for f in oltp_emul_final_actions_scripts: +# sql_post_handling += ' in ' + os.path.join(src_dir, f+'.sql;\\n') +# +# f_post_handling_sql = open( os.path.join(context['temp_directory'],'tmp-oltp-emul-post.sql'), 'w') +# f_post_handling_sql.write(sql_post_handling) +# +# sql4debug_only = ''' +# set echo off; +# set list on; +# commit; +# set stat off; +# select +# rdb$get_context('SYSTEM','DB_NAME') as db_name +# ,rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') as last_generated_repl_segment +# from rdb$database; +# quit; +# ''' +# f_post_handling_sql.write( sql4debug_only ) +# +# flush_and_close( f_post_handling_sql ) +# +# f_post_handling_log = open( ''.join( (os.path.splitext(f_post_handling_sql.name)[0], '.log' ) ), 'w') +# f_post_handling_err = open( ''.join( (os.path.splitext(f_post_handling_sql.name)[0], '.err' ) ), 'w') +# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_post_handling_sql.name ], stdout = f_post_handling_log, stderr = f_post_handling_err) +# flush_and_close( f_post_handling_log ) +# flush_and_close( f_post_handling_err ) +# +# with open(f_post_handling_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in post-handling SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# cleanup( (f_post_handling_sql, f_post_handling_log, f_post_handling_err ) ) +# for f in oltp_emul_post_handling_scripts: +# tmp_post_handling_sql = os.path.join( context['temp_directory'], f + '.tmp' ) +# cleanup( (tmp_post_handling_sql,) ) +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors +# +# # Test connect to master DB, call initial business operation: create client order +# ########################### +# custom_tpb = fdb.TPB() +# custom_tpb.lock_resolution = fdb.isc_tpb_nowait +# custom_tpb.isolation_level = fdb.isc_tpb_concurrency +# +# con1 = fdb.connect( dsn = 'localhost:' + db_main, isolation_level = custom_tpb) +# cur1 = con1.cursor() +# cur1.execute( 'select ware_id from sp_client_order order by ware_id' ) +# +# client_order_wares_main = [] +# for r in cur1: +# client_order_wares_main.append(r[0]) +# cur1.close() +# con1.commit() +# con1.close() +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' ) +# +# con2 = fdb.connect( dsn = 'localhost:' + db_repl, no_db_triggers = 1) +# cur2 = con2.cursor() +# cur2.execute( 'select d.ware_id from doc_data d order by d.ware_id' ) +# client_order_wares_repl = [] +# for r in cur2: +# client_order_wares_repl.append(r[0]) +# cur2.close() +# con2.commit() +# con2.close() +# +# print('Master and replica data: %s ' % ( 'THE SAME.' if client_order_wares_main and sorted(client_order_wares_main) == sorted(client_order_wares_repl) else '### FAIL: DIFFERS ###' ) ) +# +# #print('client_order_wares_main=',client_order_wares_main) +# #print('client_order_wares_repl=',client_order_wares_repl) +# +# # return initial state of master DB: +# # remove all DB objects (tables, views, ...): +# # -------------------------------------------- +# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') +# +# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects.log'), 'w') +# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) +# flush_and_close(f_clean_log) +# flush_and_close(f_clean_err) +# +# with open(f_clean_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in cleanup SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# with open(f_clean_log.name,'r') as f: +# for line in f: +# # show number of dropped objects +# print(line) +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) +# +# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_skip_gen_repl.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_main_meta_sql ) +# +# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_skip_gen_repl.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_repl_meta_sql ) +# +# db_main_meta=open(f_main_meta_sql.name, 'r') +# db_repl_meta=open(f_repl_meta_sql.name, 'r') +# +# diffmeta = ''.join(difflib.unified_diff( +# db_main_meta.readlines(), +# db_repl_meta.readlines() +# )) +# db_main_meta.close() +# db_repl_meta.close() +# +# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_skip_gen_repl.txt'), 'w', buffering = 0) +# f_meta_diff.write(diffmeta) +# flush_and_close( f_meta_diff ) +# +# # Following must issue only TWO rows: +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ +# # Only thes lines will be suppressed further (see subst. section): +# with open(f_meta_diff.name, 'r') as f: +# for line in f: +# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): +# print('UNEXPECTED METADATA DIFF.: ' + line) +# +# +# cleanup( (f_main_meta_sql, f_repl_meta_sql, f_meta_diff) ) +# +# ###################### +# ### A C H T U N G ### +# ###################### +# # MANDATORY, OTHERWISE REPLICATION GETS STUCK ON SECOND RUN OF THIS TEST +# # WITH 'ERROR: Record format with length 68 is not found for table TEST': +# runProgram('gfix', ['-sweep', 'localhost:' + db_repl]) +# runProgram('gfix', ['-sweep', 'localhost:' + db_main]) +# ####################### +# +# # cleanup: +# ########## +# cleanup( (f_run_initial_ddl, f_run_initial_log, f_run_initial_err, f_clean_log, f_clean_err) ) +# +# # src_files - list of .sql files which were applied; got from zf.namelist(). +# # We have to remove all of them: +# cleanup( [ os.path.join(context['temp_directory'],f) for f in src_files ] ) +# +# +#--- +act_1 = python_act('db_1', substitutions=substitutions_1) + +expected_stdout_1 = """ + POINT-1 FOUND message about replicated segment. + Master and replica data: THE SAME. + Start removing objects + Finish. Total objects removed + POINT-2 FOUND message about replicated segment. +""" + +@pytest.mark.version('>=4.0') +@pytest.mark.platform('Windows') +def test_1(act_1: Action): + pytest.fail("Test not IMPLEMENTED") + + diff --git a/tests/functional/replication/test_permission_error_on_ddl_issued_by_non_sysdba.py b/tests/functional/replication/test_permission_error_on_ddl_issued_by_non_sysdba.py new file mode 100644 index 00000000..bc0e22de --- /dev/null +++ b/tests/functional/replication/test_permission_error_on_ddl_issued_by_non_sysdba.py @@ -0,0 +1,365 @@ +#coding:utf-8 +# +# id: tests.functional.replication.permission_error_on_ddl_issued_by_non_sysdba +# title: Permission error with replication +# decription: +# See: https://github.com/FirebirdSQL/firebird/issues/6856 +# +# Test run actions which are specified in the ticket (create user with granting admin access to him, etc). +# After this we wait until replica becomes actual to master, and this delay will last no more then threshold +# that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). +# During this delay, we check every second for replication log and search there line with number of last generated +# segment (which was replicated and deleting finally). +# We can assume that replication finished OK only when such line is found see ('POINT-1'). +# +# Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). +# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). +# +# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). +# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, +# thus metadata difference must not be issued. +# +# ################ +# ### N O T E ### +# ################ +# Test assumes that master and replica DB have been created beforehand. +# Also, it assumes that %FB_HOME% +# eplication.conf has been prepared with apropriate parameters for replication. +# Particularly, name of directories and databases must have info about checked FB major version and ServerMode. +# * verbose = true // in order to find out line with message that required segment was replicated +# * section for master database with specified parameters: +# journal_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.journal" +# journal_archive_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.archive" +# journal_archive_command = "copy $(pathname) $(archivepathname)" +# journal_archive_timeout = 10 +# * section for replica database with specified parameter: +# journal_source_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.archive" +# +# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: +# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) +# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) +# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) +# +# These two databases must NOT be dropped in any of tests related to replication! +# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode +# and make cleanup after it, i.e. when all tests will be completed. +# +# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. +# +# Temporary comment. For debug purpoces: +# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); +# 2) copy file %fbt-repo% ests +# unctional abloidatches\\setup-fb-for-replication.bat.txt +# to some place and rename it "*.bat"; +# 3) open this .bat in editor and asjust value of 'fbt_repo' variable; +# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] +# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: +# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc +# 5) batch 'setup-fb-for-replication.bat' will: +# * stop selected FB instance +# * create test databases (in !fbt_repo! mp\\); +# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); +# * replace %fb_home% +# eplication.conf with apropriate +# * start selected FB instance +# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): +# %fpt_repo% +# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc +# +# Confirmed bug on 4.0.1.2578 and 5.0.0.169: messages "ERROR: unsuccessful metadata update / CREATE OR ALTER VIEW V1 failed" +# will be added into replication log and after this replication gets stuck. +# +# Checked on: 4.0.1.2585 (SS/CS); 5.0.0.192 (SS/CS) +# +# +# +# tracker_id: +# min_versions: ['4.0.1'] +# versions: 4.0.1 +# qmid: None + +import pytest +from firebird.qa import db_factory, python_act, Action + +# version: 4.0.1 +# resources: None + +substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: \\d+', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR')] + +init_script_1 = """""" + +db_1 = db_factory(sql_dialect=3, init=init_script_1) + +# test_script_1 +#--- +# +# import os +# import subprocess +# import re +# import difflib +# import shutil +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# ##################################### +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65 +# ##################################### +# +# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) +# FB_HOME = svc.get_home_directory() +# svc.close() +# +# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float +# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' +# +# cur = db_conn.cursor() +# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") +# server_mode = 'XX' +# for r in cur: +# if r[0] == 'Super': +# server_mode = 'SS' +# elif r[0] == 'SuperClassic': +# server_mode = 'SC' +# elif r[0] == 'Classic': +# server_mode = 'CS' +# cur.close() +# +# # 'fbt-main.fb50.ss.fdb' etc: +# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) +# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') +# +# # Folders for journalling and archieving segments. +# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) +# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for i in range(len( f_names_list )): +# if type(f_names_list[i]) == file: +# del_name = f_names_list[i].name +# elif type(f_names_list[i]) == str: +# del_name = f_names_list[i] +# else: +# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): +# +# global re +# global difflib +# global time +# +# # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:- +# def check_pattern_in_log( log_lines, pattern, prefix_msg = '' ): +# found_required_message = False +# for d in log_lines: +# if pattern.search(d): +# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' ) +# found_required_message = True +# break +# return found_required_message +# # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:- +# +# replold_lines = [] +# with open( os.path.join(fb_home,'replication.log'), 'r') as f: +# replold_lines = f.readlines() +# +# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) +# cur = con.cursor() +# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") +# for r in cur: +# last_generated_repl_segment = r[0] +# cur.close() +# con.close() +# +# # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file +# segment_replicated_pattern=re.compile( 'verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) +# +# # 08.09.2021: replication content can remain unchanged if there was no user-defined object in DB that must be dropped! +# # Because of this, it is crucial to check OLD content of replication log before loop. +# # Also, segment_replicated_pattern must NOT start from '\\+' because it can occur only for diff_data (within loop): +# # +# found_required_message = check_pattern_in_log( replold_lines, segment_replicated_pattern, prefix_msg ) +# +# if not found_required_message: +# +# for i in range(0,max_allowed_time_for_wait): +# time.sleep(1) +# +# # Get content of fb_home replication.log _after_ isql finish: +# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') +# diff_data = difflib.unified_diff( +# replold_lines, +# f_repllog_new.readlines() +# ) +# f_repllog_new.close() +# +# found_required_message = check_pattern_in_log( diff_data, segment_replicated_pattern, prefix_msg ) +# if found_required_message: +# break +# +# if not found_required_message: +# print('UNEXPECTED RESULT: no message about replicated segment No. %d for %d seconds.' % (int(last_generated_repl_segment), max_allowed_time_for_wait) ) +# +# #-------------------------------------------- +# +# sql_ddl = ''' create or alter user dba_helper password '123' using plugin Srp grant admin role; +# grant rdb$admin to user dba_helper; +# set term ^; +# execute block as +# begin +# execute statement 'create or alter view v1 as select 1 x from rdb$database' +# with autonomous transaction +# as user 'dba_helper' password '123' role 'rdb$admin'; +# end +# ^ +# set term ;^ +# commit; +# drop user dba_helper using plugin Srp; +# commit; +# drop view v1; +# commit; +# ''' % locals() +# +# +# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_gh_6856_init.sql'), 'w') +# f_sql_chk.write(sql_ddl) +# flush_and_close( f_sql_chk ) +# +# f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w') +# f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w') +# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err) +# flush_and_close( f_sql_log ) +# flush_and_close( f_sql_err ) +# +# last_generated_repl_segment = 0 +# +# with open(f_sql_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in initial SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' ) +# +# # return initial state of master DB: +# # remove all DB objects (tables, views, ...): +# # -------------------------------------------- +# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') +# +# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects-gh_6856.log'), 'w') +# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) +# flush_and_close(f_clean_log) +# flush_and_close(f_clean_err) +# +# with open(f_clean_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in cleanup SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# with open(f_clean_log.name,'r') as f: +# for line in f: +# # show number of dropped objects +# print(line) +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> previous SQL script finished w/o errors +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) +# +# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_gh_6856.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_main_meta_sql ) +# +# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_gh_6856.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_repl_meta_sql ) +# +# db_main_meta=open(f_main_meta_sql.name, 'r') +# db_repl_meta=open(f_repl_meta_sql.name, 'r') +# +# diffmeta = ''.join(difflib.unified_diff( +# db_main_meta.readlines(), +# db_repl_meta.readlines() +# )) +# db_main_meta.close() +# db_repl_meta.close() +# +# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_gh_6856.txt'), 'w', buffering = 0) +# f_meta_diff.write(diffmeta) +# flush_and_close( f_meta_diff ) +# +# # Following must issue only TWO rows: +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ +# # Only thes lines will be suppressed further (see subst. section): +# with open(f_meta_diff.name, 'r') as f: +# for line in f: +# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): +# print('UNEXPECTED METADATA DIFF.: ' + line) +# +# ###################### +# ### A C H T U N G ### +# ###################### +# # MANDATORY, OTHERWISE REPLICATION GETS STUCK ON SECOND RUN OF THIS TEST +# # WITH 'ERROR: Record format with length 68 is not found for table TEST': +# runProgram('gfix', ['-sweep', 'localhost:' + db_repl]) +# runProgram('gfix', ['-sweep', 'localhost:' + db_main]) +# ####################### +# +# # cleanup: +# ########## +# cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) ) +# +#--- +act_1 = python_act('db_1', substitutions=substitutions_1) + +expected_stdout_1 = """ + POINT-1 FOUND message about replicated segment. + Start removing objects + Finish. Total objects removed + POINT-2 FOUND message about replicated segment. +""" + +@pytest.mark.version('>=4.0.1') +@pytest.mark.platform('Windows') +def test_1(act_1: Action): + pytest.fail("Test not IMPLEMENTED") + + diff --git a/tests/functional/replication/test_shutdown_during_applying_segments_leads_to_crash.py b/tests/functional/replication/test_shutdown_during_applying_segments_leads_to_crash.py new file mode 100644 index 00000000..fc85dfd1 --- /dev/null +++ b/tests/functional/replication/test_shutdown_during_applying_segments_leads_to_crash.py @@ -0,0 +1,506 @@ +#coding:utf-8 +# +# id: tests.functional.replication.shutdown_during_applying_segments_leads_to_crash +# title: Crash or hang while shutting down the replica database if segments are being applied +# decription: +# See: https://github.com/FirebirdSQL/firebird/issues/6975 +# +# Bug initially was found during heavy test of replication performed by OLTP-EMUL, for FB 4.x +# (see letters to dimitr 13.09.2021; reply from dimitr, 18.09.2021 08:42 - all in mailbox: pz at ibase.ru). +# +# It *can* be reproduced without heavy/concurrent workload, but we have to operate with data that are written +# into database 'slowly'. Such data can be wide INDEXED column which has GUID-based values. +# +# Test creates a table with 'wide' indexed field and adds data to it. +# Then we save current timestamp (with accuracy up to SECONDS, i.e. cut off milli- or microseconds) to variable. +# After this we start check replicationb.log for appearance of phrase 'Added segment(s) to the processing queue'. +# After founding each such phrase we skip two lines above and parse timestamp when this occurred. +# If timestamp in log less than saved timestamp of our DML action then we go on to the next such phrase. +# Otherwise we can assume that replication BEGINS to apply just generated segment. +# See function wait_for_add_queue_in_replica() which does this parsing of replication.log. +# +# Because we operate with table which have very 'wide' index and, moreover, data in this index are GUID-generated +# text strings, we can safely assume that applying of segment will take at least 5...10 seconds (actually this +# can take done for 30...35 seconds). +# +# During this time we change replica mode to full shutdown and (immediately after that) return to online. +# NO message like 'error reading / writing from/to connection' must appear at this step. +# +# After this, we have to wait for replica finish applying segment and when this occur we drop the table. +# +# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). +# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, +# thus metadata difference must not be issued. +# +# ################ +# ### N O T E ### +# ################ +# Test assumes that master and replica DB have been created beforehand. +# Also, it assumes that %FB_HOME% +# eplication.conf has been prepared with apropriate parameters for replication. +# Particularly, name of directories and databases must have info about checked FB major version and ServerMode. +# * verbose = true // in order to find out line with message that required segment was replicated +# * section for master database with specified parameters: +# journal_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.journal" +# journal_archive_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.archive" +# journal_archive_command = "copy $(pathname) $(archivepathname)" +# journal_archive_timeout = 10 +# * section for replica database with specified parameter: +# journal_source_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.archive" +# +# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: +# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) +# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) +# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) +# +# These two databases must NOT be dropped in any of tests related to replication! +# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode +# and make cleanup after it, i.e. when all tests will be completed. +# +# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. +# +# Temporary comment. For debug purpoces: +# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); +# 2) copy file %fbt-repo% ests +# unctional abloidatches\\setup-fb-for-replication.bat.txt +# to some place and rename it "*.bat"; +# 3) open this .bat in editor and asjust value of 'fbt_repo' variable; +# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] +# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: +# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc +# 5) batch 'setup-fb-for-replication.bat' will: +# * stop selected FB instance +# * create test databases (in !fbt_repo! mp\\); +# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); +# * replace %fb_home% +# eplication.conf with apropriate +# * start selected FB instance +# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): +# %fpt_repo% +# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc +# +# Confirmed bug on 5.0.0.215: server crashed when segment was applied to replica and at the same time we issued +# 'gfix -shut full -force 0 ...'. Regardless of that command, replica DB remained in NORMAL mode, not in shutdown. +# If this command was issued after this again - FB process hanged (gfix could not return control to OS). +# This is the same bug as described in the ticked (discussed with dimitr, letters 22.09.2021). +# +# Checked on: 4.0.1.2613 (SS/CS); 5.0.0.219 (SS/CS) +# +# tracker_id: +# min_versions: ['4.0.1'] +# versions: 4.0.1 +# qmid: None + +import pytest +from firebird.qa import db_factory, python_act, Action + +# version: 4.0.1 +# resources: None + +substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR'), ('[ \t]+', ' ')] + +init_script_1 = """""" + +db_1 = db_factory(sql_dialect=3, init=init_script_1) + +# test_script_1 +#--- +# +# import os +# import subprocess +# import re +# import difflib +# import shutil +# import time +# from datetime import datetime +# from datetime import timedelta +# +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# # NB: with default values of 'apply_idle_timeout' and 'apply_error_timeout' (10 and 60 s) +# # total time of this test is about 130...132s. +# ##################################### +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 135 +# MAX_TIME_FOR_WAIT_ADDED_TO_QUEUE = 135 +# ##################################### +# +# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) +# FB_HOME = svc.get_home_directory() +# svc.close() +# +# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float +# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' +# +# cur = db_conn.cursor() +# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") +# server_mode = 'XX' +# for r in cur: +# if r[0] == 'Super': +# server_mode = 'SS' +# elif r[0] == 'SuperClassic': +# server_mode = 'SC' +# elif r[0] == 'Classic': +# server_mode = 'CS' +# cur.close() +# +# # 'fbt-main.fb50.ss.fdb' etc: +# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) +# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') +# +# # Folders for journalling and archieving segments. +# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) +# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) +# +# runProgram('gfix', ['-w', 'async', 'localhost:' + db_main]) +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for i in range(len( f_names_list )): +# if type(f_names_list[i]) == file: +# del_name = f_names_list[i].name +# elif type(f_names_list[i]) == str: +# del_name = f_names_list[i] +# else: +# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): +# global re +# global difflib +# global time +# +# # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:- +# def check_pattern_in_log( log_lines, pattern, prefix_msg = '' ): +# found_required_message = False +# for d in log_lines: +# if pattern.search(d): +# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' ) +# found_required_message = True +# break +# return found_required_message +# # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:- +# +# replold_lines = [] +# with open( os.path.join(fb_home,'replication.log'), 'r') as f: +# replold_lines = f.readlines() +# +# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) +# cur = con.cursor() +# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") +# for r in cur: +# last_generated_repl_segment = r[0] +# cur.close() +# con.close() +# +# # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file +# segment_replicated_pattern=re.compile( 'verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) +# +# # 08.09.2021: replication content can remain unchanged if there was no user-defined object in DB that must be dropped! +# # Because of this, it is crucial to check OLD content of replication log before loop. +# # Also, segment_replicated_pattern must NOT start from '\\+' because it can occur only for diff_data (within loop): +# # +# found_required_message = check_pattern_in_log( replold_lines, segment_replicated_pattern, prefix_msg ) +# +# if not found_required_message: +# +# for i in range(0,max_allowed_time_for_wait): +# time.sleep(1) +# +# # Get content of fb_home replication.log _after_ isql finish: +# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') +# diff_data = difflib.unified_diff( +# replold_lines, +# f_repllog_new.readlines() +# ) +# f_repllog_new.close() +# +# found_required_message = check_pattern_in_log( diff_data, segment_replicated_pattern, prefix_msg ) +# if found_required_message: +# break +# +# if not found_required_message: +# print('UNEXPECTED RESULT: no message about replicated segment No. %d for %d seconds.' % (int(last_generated_repl_segment), max_allowed_time_for_wait) ) +# +# +# #-------------------------------------------- +# +# def wait_for_add_queue_in_replica( fb_home, max_allowed_time_for_wait, min_timestamp, prefix_msg = '' ): +# +# global re +# global difflib +# global time +# global datetime +# +# # (replica) Tue Sep 21 20:24:57 2021 +# # Database: ... +# # Added 3 segment(s) to the processing queue +# +# # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:- +# def check_pattern_in_log( log_lines, pattern, min_timestamp, prefix_msg = '' ): +# found_required_message = False +# for i,r in enumerate(log_lines): +# if pattern.search(r): +# if i>=2 and log_lines[i-2]: +# # a = r.replace('(',' ').split() +# a = log_lines[i-2].split() +# if len(a)>=4: +# # s='replica_host_name (slave) Sun May 30 17:46:43 2021' +# # s.split()[-5:] ==> ['Sun', 'May', '30', '17:46:43', '2021'] +# # ' '.join( ...) ==> 'Sun May 30 17:46:43 2021' +# dts = ' '.join( log_lines[i-2].split()[-5:] ) +# segment_timestamp = datetime.strptime( dts, '%a %b %d %H:%M:%S %Y') +# if segment_timestamp >= min_timestamp: +# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about segments added to queue after given timestamp.') #, 'segment_timestamp=%s' % segment_timestamp, '; min_timestamp=%s' % min_timestamp ) +# found_required_message = True +# break +# return found_required_message +# +# # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:- +# +# replold_lines = [] +# with open( os.path.join(fb_home,'replication.log'), 'r') as f: +# replold_lines = f.readlines() +# +# +# segments_to_queue_pattern=re.compile( 'verbose:\\s+added\\s+\\d+\\s+segment.*to.*queue', re.IGNORECASE) +# +# # 08.09.2021: replication content can remain unchanged if there was no user-defined object in DB that must be dropped! +# # Because of this, it is crucial to check OLD content of replication log before loop. +# # Also, segments_to_queue_pattern must NOT start from '\\+' because it can occur only for diff_data (within loop): +# # +# found_required_message = check_pattern_in_log( replold_lines, segments_to_queue_pattern, min_timestamp, prefix_msg ) +# +# if not found_required_message: +# +# for i in range(0,max_allowed_time_for_wait): +# time.sleep(1) +# +# # Get content of fb_home replication.log _after_ isql finish: +# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') +# diff_data = difflib.unified_diff( +# replold_lines, +# f_repllog_new.readlines() +# ) +# f_repllog_new.close() +# +# found_required_message = check_pattern_in_log( list(diff_data), segments_to_queue_pattern, min_timestamp, prefix_msg ) +# if found_required_message: +# break +# +# if not found_required_message: +# print('UNEXPECTED RESULT: no message about segments added to queue after %s.' % min_timestamp) +# +# #-------------------------------------------- +# +# sql_ddl = ''' set bail on; +# recreate table test(s varchar(700), constraint test_s_unq unique(s)); +# commit; +# +# set term ^; +# execute block as +# declare fld_len int; +# declare n int; +# begin +# select ff.rdb$field_length +# from rdb$relation_fields rf +# join rdb$fields ff on rf.rdb$field_source = ff.rdb$field_name +# where upper(rf.rdb$relation_name) = upper('test') and upper(rf.rdb$field_name) = upper('s') +# into fld_len; +# +# +# n = 10000; +# while (n > 0) do +# begin +# insert into test(s) values( lpad('', :fld_len, uuid_to_char(gen_uuid())) ); +# n = n - 1; +# end +# +# end +# ^ +# set term ;^ +# commit; +# ''' % locals() +# +# +# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_gh_6975_init.sql'), 'w') +# f_sql_chk.write(sql_ddl) +# flush_and_close( f_sql_chk ) +# +# f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w') +# f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w') +# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err) +# flush_and_close( f_sql_log ) +# flush_and_close( f_sql_err ) +# +# last_generated_repl_segment = 0 +# +# with open(f_sql_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in initial SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors +# +# +# ################################## +# ### A.C.H.T.U.N.G ### +# ### do NOT use datetime.now() ### +# ### because of missed accuracy ### +# ### of timestamps in repl.log ### +# ### (it is HH:MM:SS only) ### +# ################################## +# current_date_with_hhmmss = datetime.today().replace(microsecond=0) +# +# +# ############################################################################## +# ### W A I T F O R S E G M E N T S A D D E D T O Q U E U E ### +# ############################################################################## +# wait_for_add_queue_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_ADDED_TO_QUEUE, current_date_with_hhmmss, 'POINT-A' ) +# +# # This led to crash and appearance of message: +# # "Fatal lock manager error: invalid lock id (0), errno: 0" in firebird.log: +# # +# runProgram('gfix', ['-shut', 'full', '-force', '0', 'localhost:' + db_repl]) +# +# f_repl_hdr_log=open( os.path.join(context['temp_directory'],'db_repl_hdr.log'), 'w') +# subprocess.call( [context['gstat_path'], db_repl, '-h'], stdout=f_repl_hdr_log, stderr=subprocess.STDOUT ) +# flush_and_close( f_repl_hdr_log ) +# +# with open(f_repl_hdr_log.name,'r') as f: +# for line in f: +# if 'Attributes' in line: +# print('POINT-B ' + line.strip()) +# +# +# # This (issuing 'gfix -shu ...' second time) led FB process to hang! +# # runProgram('gfix', ['-shut', 'full', '-force', '0', 'localhost:' + db_repl]) +# +# runProgram('gfix', ['-online', 'localhost:' + db_repl]) +# +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' ) +# +# # return initial state of master DB: +# # remove all DB objects (tables, views, ...): +# # -------------------------------------------- +# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') +# +# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects-gh_6975.log'), 'w') +# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) +# flush_and_close(f_clean_log) +# flush_and_close(f_clean_err) +# +# with open(f_clean_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in cleanup SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# with open(f_clean_log.name,'r') as f: +# for line in f: +# # show number of dropped objects +# print(line) +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> previous SQL script finished w/o errors +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) +# +# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_gh_6975.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_main_meta_sql ) +# +# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_gh_6975.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_repl_meta_sql ) +# +# db_main_meta=open(f_main_meta_sql.name, 'r') +# db_repl_meta=open(f_repl_meta_sql.name, 'r') +# +# diffmeta = ''.join(difflib.unified_diff( +# db_main_meta.readlines(), +# db_repl_meta.readlines() +# )) +# db_main_meta.close() +# db_repl_meta.close() +# +# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_gh_6975.txt'), 'w', buffering = 0) +# f_meta_diff.write(diffmeta) +# flush_and_close( f_meta_diff ) +# +# # Following must issue only TWO rows: +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ +# # Only thes lines will be suppressed further (see subst. section): +# with open(f_meta_diff.name, 'r') as f: +# for line in f: +# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): +# print('UNEXPECTED METADATA DIFF.: ' + line) +# +# runProgram('gfix', ['-w', 'sync', 'localhost:' + db_main]) +# +# ###################### +# ### A C H T U N G ### +# ###################### +# # MANDATORY, OTHERWISE REPLICATION GETS STUCK ON SECOND RUN OF THIS TEST +# # WITH 'ERROR: Record format with length 68 is not found for table TEST': +# runProgram('gfix', ['-sweep', 'localhost:' + db_repl]) +# runProgram('gfix', ['-sweep', 'localhost:' + db_main]) +# ####################### +# +# # cleanup: +# ########## +# #cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) ) +# +#--- +act_1 = python_act('db_1', substitutions=substitutions_1) + +expected_stdout_1 = """ + POINT-A FOUND message about segments added to queue after given timestamp. + POINT-B Attributes force write, full shutdown, read-only replica + POINT-1 FOUND message about replicated segment. + Start removing objects + Finish. Total objects removed + POINT-2 FOUND message about replicated segment. +""" + +@pytest.mark.version('>=4.0.1') +@pytest.mark.platform('Windows') +def test_1(act_1: Action): + pytest.fail("Test not IMPLEMENTED") + + diff --git a/tests/functional/replication/test_some_updates_crash_server_on_replica_side.py b/tests/functional/replication/test_some_updates_crash_server_on_replica_side.py new file mode 100644 index 00000000..aafd15c9 --- /dev/null +++ b/tests/functional/replication/test_some_updates_crash_server_on_replica_side.py @@ -0,0 +1,382 @@ +#coding:utf-8 +# +# id: tests.functional.replication.some_updates_crash_server_on_replica_side +# title: Some updates can crash Firebird server on replica side +# decription: +# See: https://github.com/FirebirdSQL/firebird/issues/6909 +# +# We create table and add data in it according to ticket info. +# +# After this we wait until replica becomes actual to master, and this delay will last no more then threshold +# that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). +# During this delay, we check every second for replication log and search there line with number of last generated +# segment (which was replicated and deleting finally). +# We can assume that replication finished OK only when such line is found see ('POINT-1'). +# +# Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). +# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). +# +# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). +# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, +# thus metadata difference must not be issued. +# +# #################### +# ### CRUCIAL NOTE ### +# #################### +# Currently, 25.06.2021, there is bug in FB 4.x and 5.x which can be seen on SECOND run of this test: message with text +# "ERROR: Record format with length 68 is not found for table TEST" will appear in it after inserting 1st record in master. +# The reason of that is "dirty" pages that remain in RDB$RELATION_FIELDS both on master and replica after dropping table. +# Following query show different data that appear in replica DB on 1st and 2nd run (just after table was created on master): +# ======= +# set blobdisplay 6; +# select rdb$descriptor as fmt_descr +# from rdb$formats natural join rdb$relations where rdb$relation_name = 'TEST'; +# ======= +# This bug was explained by dimitr, see letters 25.06.2021 11:49 and 25.06.2021 16:56. +# It will be fixed later. +# +# The only workaround to solve this problem is to make SWEEP after all DB objects have been dropped. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# !NB! BOTH master and replica must be cleaned up by sweep! +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# ################ +# ### N O T E ### +# ################ +# Test assumes that master and replica DB have been created beforehand. +# Also, it assumes that %FB_HOME% +# eplication.conf has been prepared with apropriate parameters for replication. +# Particularly, name of directories and databases must have info about checked FB major version and ServerMode. +# * verbose = true // in order to find out line with message that required segment was replicated +# * section for master database with specified parameters: +# journal_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.journal" +# journal_archive_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.archive" +# journal_archive_command = "copy $(pathname) $(archivepathname)" +# journal_archive_timeout = 10 +# * section for replica database with specified parameter: +# journal_source_directory = "!fbt_repo! mp +# b-replication.!fb_major!.!server_mode!.archive" +# +# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: +# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) +# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) +# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) +# +# These two databases must NOT be dropped in any of tests related to replication! +# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode +# and make cleanup after it, i.e. when all tests will be completed. +# +# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. +# +# Temporary comment. For debug purpoces: +# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); +# 2) copy file %fbt-repo% ests +# unctional abloidatches\\setup-fb-for-replication.bat.txt +# to some place and rename it "*.bat"; +# 3) open this .bat in editor and asjust value of 'fbt_repo' variable; +# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] +# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: +# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc +# 5) batch 'setup-fb-for-replication.bat' will: +# * stop selected FB instance +# * create test databases (in !fbt_repo! mp\\); +# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); +# * replace %fb_home% +# eplication.conf with apropriate +# * start selected FB instance +# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): +# %fpt_repo% +# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc +# +# Confirmed bug on 5.0.0.126 (31.07.2021), 4.0.1.2547 (30.07.2021) +# FB crashes, segment is not delivered on replica. +# Initial fix was for FB 4.x 30-jul-2021 16:28 (44f48955c250193096c244bee9e5cd7ddf9a099b), +# frontported to FB 5.x 04-aug-2021 12:48 (220ca99b85289fdd7a5257e576499a1b9c345cd9) +# +# Checked on: +# 5.0.0.126 SS: 27.290s // intermediate build, timestamp: 04-aug-2021 12:08. +# 5.0.0.126 CS: 26.239s. +# 4.0.1.2556 SS: 29.956s. +# 4.0.1.2556 CS: 26.108s. +# +# tracker_id: +# min_versions: ['4.0.1'] +# versions: 4.0.1 +# qmid: None + +import pytest +from firebird.qa import db_factory, python_act, Action + +# version: 4.0.1 +# resources: None + +substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR')] + +init_script_1 = """""" + +db_1 = db_factory(sql_dialect=3, init=init_script_1) + +# test_script_1 +#--- +# +# import os +# import subprocess +# import re +# import difflib +# import shutil +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# ##################################### +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65 +# ##################################### +# +# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) +# FB_HOME = svc.get_home_directory() +# svc.close() +# +# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float +# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' +# +# cur = db_conn.cursor() +# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") +# server_mode = 'XX' +# for r in cur: +# if r[0] == 'Super': +# server_mode = 'SS' +# elif r[0] == 'SuperClassic': +# server_mode = 'SC' +# elif r[0] == 'Classic': +# server_mode = 'CS' +# cur.close() +# +# # 'fbt-main.fb50.ss.fdb' etc: +# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) +# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') +# +# # Folders for journalling and archieving segments. +# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) +# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for i in range(len( f_names_list )): +# if type(f_names_list[i]) == file: +# del_name = f_names_list[i].name +# elif type(f_names_list[i]) == str: +# del_name = f_names_list[i] +# else: +# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): +# +# global re +# global difflib +# global time +# +# replold_lines = [] +# with open( os.path.join(fb_home,'replication.log'), 'r') as f: +# replold_lines = f.readlines() +# +# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) +# cur = con.cursor() +# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") +# for r in cur: +# last_generated_repl_segment = r[0] +# cur.close() +# con.close() +# +# #print('last_generated_repl_segment:', last_generated_repl_segment) +# +# # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file +# p=re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) +# +# found_required_message = False +# for i in range(0,max_allowed_time_for_wait): +# time.sleep(1) +# +# # Get content of fb_home replication.log _after_ isql finish: +# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') +# diff_data = difflib.unified_diff( +# replold_lines, +# f_repllog_new.readlines() +# ) +# f_repllog_new.close() +# +# for k,d in enumerate(diff_data): +# if p.search(d): +# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' ) +# found_required_message = True +# break +# +# if found_required_message: +# break +# +# if not found_required_message: +# print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait) +# +# #-------------------------------------------- +# +# sql_ddl = ''' set bail on; +# create table rep ( +# id_report integer not null, +# report_data blob sub_type binary not null, +# zvyrazneni_radku char(1) default 'n' not null, +# kriteria_dotazu char(1) default 'n' not null, +# stav char(1) default 'a' not null, +# constraint rep_pk primary key (id_report) +# ); +# +# insert into rep (id_report, report_data, zvyrazneni_radku, kriteria_dotazu, stav) +# values (0, 'report_data', 'n', 'n', 'a'); +# +# commit; +# +# update rep set zvyrazneni_radku='a' where id_report =0; +# update rep set kriteria_dotazu='a' where id_report =0; +# +# commit; +# ''' % locals() +# +# +# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_gh_6909_init.sql'), 'w') +# f_sql_chk.write(sql_ddl) +# flush_and_close( f_sql_chk ) +# +# f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w') +# f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w') +# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err) +# flush_and_close( f_sql_log ) +# flush_and_close( f_sql_err ) +# +# last_generated_repl_segment = 0 +# +# with open(f_sql_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in initial SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' ) +# +# # return initial state of master DB: +# # remove all DB objects (tables, views, ...): +# # -------------------------------------------- +# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') +# +# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects-gh_6909.log'), 'w') +# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) +# flush_and_close(f_clean_log) +# flush_and_close(f_clean_err) +# +# with open(f_clean_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in cleanup SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# with open(f_clean_log.name,'r') as f: +# for line in f: +# # show number of dropped objects +# print(line) +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> previous SQL script finished w/o errors +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) +# +# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_gh_6909.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_main_meta_sql ) +# +# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_gh_6909.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_repl_meta_sql ) +# +# db_main_meta=open(f_main_meta_sql.name, 'r') +# db_repl_meta=open(f_repl_meta_sql.name, 'r') +# +# diffmeta = ''.join(difflib.unified_diff( +# db_main_meta.readlines(), +# db_repl_meta.readlines() +# )) +# db_main_meta.close() +# db_repl_meta.close() +# +# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_gh_6909.txt'), 'w', buffering = 0) +# f_meta_diff.write(diffmeta) +# flush_and_close( f_meta_diff ) +# +# # Following must issue only TWO rows: +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ +# # Only thes lines will be suppressed further (see subst. section): +# with open(f_meta_diff.name, 'r') as f: +# for line in f: +# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): +# print('UNEXPECTED METADATA DIFF.: ' + line) +# +# ###################### +# ### A C H T U N G ### +# ###################### +# # MANDATORY, OTHERWISE REPLICATION GETS STUCK ON SECOND RUN OF THIS TEST +# # WITH 'ERROR: Record format with length 68 is not found for table TEST': +# runProgram('gfix', ['-sweep', 'localhost:' + db_repl]) +# runProgram('gfix', ['-sweep', 'localhost:' + db_main]) +# ####################### +# +# # cleanup: +# ########## +# cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) ) +# +#--- +act_1 = python_act('db_1', substitutions=substitutions_1) + +expected_stdout_1 = """ + POINT-1 FOUND message about replicated segment. + Start removing objects + Finish. Total objects removed + POINT-2 FOUND message about replicated segment. +""" + +@pytest.mark.version('>=4.0.1') +@pytest.mark.platform('Windows') +def test_1(act_1: Action): + pytest.fail("Test not IMPLEMENTED") + +