mirror of
https://github.com/FirebirdSQL/firebird-qa.git
synced 2025-01-22 21:43:06 +01:00
Missing functional tests, fresh conversion without adjustment
This commit is contained in:
parent
1b015de4fa
commit
2b19dc571c
@ -0,0 +1,409 @@
|
||||
#coding:utf-8
|
||||
#
|
||||
# id: tests.functional.replication.blob_not_found_in_rw_replica_if_target_row_exists
|
||||
# title: Error "BLOB is not found" while replication converts INSERT into UPDATE for a conflicting record
|
||||
# decription:
|
||||
# See: https://github.com/FirebirdSQL/firebird/issues/7070
|
||||
#
|
||||
# Test temporary changes mode of replica using external call: gfix -replica read_write ...
|
||||
# Then we create table TEST(ID int, b blob) on master, without adding records in it, and WAIT
|
||||
# until this table will appear on replica DB.
|
||||
# Maximal waiting time is limited by variable MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG).
|
||||
# Message starting with phrase 'POINT-1A ...' is printed when this table appears in replica DB.
|
||||
#
|
||||
# NB. Despite that we can create such table at the same time on replica, this ticket issue can be
|
||||
# reproduced only when we make table on master and wait until it will be replicated on replica.
|
||||
#
|
||||
# After this we:
|
||||
# * add one record with ID = 1 and non-empty blob into this table on REPLICA database and do commit.
|
||||
# * add record with the same ID = 1 on MASTER database and do commit.
|
||||
#
|
||||
# Message "WARNING: Record being inserted into table TEST already exists, updating instead" must appear
|
||||
# in the replication log at this point but after that message about successfully replicated segment must also be.
|
||||
# Message 'ERROR: Blob ... is not found for table TEST' must NOT appear.
|
||||
# If these conditions are met then script issues message starting with 'Point-1B ...'
|
||||
#
|
||||
# Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch).
|
||||
# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2').
|
||||
#
|
||||
# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff').
|
||||
# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it,
|
||||
# thus metadata difference must not be issued.
|
||||
#
|
||||
# ####################
|
||||
# ### CRUCIAL NOTE ###
|
||||
# ####################
|
||||
# Currently, 25.06.2021, there is bug in FB 4.x and 5.x which can be seen on SECOND run of this test: message with text
|
||||
# "ERROR: Record format with length 68 is not found for table TEST" will appear in it after inserting 1st record in master.
|
||||
# The reason of that is "dirty" pages that remain in RDB$RELATION_FIELDS both on master and replica after dropping table.
|
||||
# Following query show different data that appear in replica DB on 1st and 2nd run (just after table was created on master):
|
||||
# =======
|
||||
# set blobdisplay 6;
|
||||
# select rdb$descriptor as fmt_descr
|
||||
# from rdb$formats natural join rdb$relations where rdb$relation_name = 'TEST';
|
||||
# =======
|
||||
# This bug was explained by dimitr, see letters 25.06.2021 11:49 and 25.06.2021 16:56.
|
||||
# It will be fixed later.
|
||||
#
|
||||
# The only workaround to solve this problem is to make SWEEP after all DB objects have been dropped.
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
# !NB! BOTH master and replica must be cleaned up by sweep!
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
#
|
||||
# ################
|
||||
# ### N O T E ###
|
||||
# ################
|
||||
# Test assumes that master and replica DB have been created beforehand.
|
||||
# Also, it assumes that %FB_HOME%
|
||||
# eplication.conf has been prepared with apropriate parameters for replication.
|
||||
# Particularly, name of directories and databases must have info about checked FB major version and ServerMode.
|
||||
# * verbose = true // in order to find out line with message that required segment was replicated
|
||||
# * section for master database with specified parameters:
|
||||
# journal_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.journal"
|
||||
# journal_archive_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.archive"
|
||||
# journal_archive_command = "copy $(pathname) $(archivepathname)"
|
||||
# journal_archive_timeout = 10
|
||||
# * section for replica database with specified parameter:
|
||||
# journal_source_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.archive"
|
||||
#
|
||||
# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these:
|
||||
# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic)
|
||||
# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic)
|
||||
# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc)
|
||||
#
|
||||
# These two databases must NOT be dropped in any of tests related to replication!
|
||||
# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode
|
||||
# and make cleanup after it, i.e. when all tests will be completed.
|
||||
#
|
||||
# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'.
|
||||
#
|
||||
# Temporary comment. For debug purpoces:
|
||||
# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc);
|
||||
# 2) copy file %fbt-repo% ests
|
||||
# unctional abloidatches\\setup-fb-for-replication.bat.txt
|
||||
# to some place and rename it "*.bat";
|
||||
# 3) open this .bat in editor and asjust value of 'fbt_repo' variable;
|
||||
# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE]
|
||||
# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check:
|
||||
# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc
|
||||
# 5) batch 'setup-fb-for-replication.bat' will:
|
||||
# * stop selected FB instance
|
||||
# * create test databases (in !fbt_repo! mp\\);
|
||||
# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\);
|
||||
# * replace %fb_home%
|
||||
# eplication.conf with apropriate
|
||||
# * start selected FB instance
|
||||
# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat):
|
||||
# %fpt_repo%
|
||||
# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc
|
||||
#
|
||||
# Confirmed bug on 4.0.1.2682 and 5.0.0.338, got in the replication.log:
|
||||
# ERROR: Blob 128.0 is not found for table TEST
|
||||
# Checked on:
|
||||
# 4.0.1.2691 SS/CS (32.3s/33.9s)
|
||||
# 5.0.0.351 SS/CS (28.5s/35.3s)
|
||||
#
|
||||
#
|
||||
# tracker_id:
|
||||
# min_versions: ['4.0.1']
|
||||
# versions: 4.0.1
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 4.0.1
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR')]
|
||||
|
||||
init_script_1 = """"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
# import os
|
||||
# import subprocess
|
||||
# import re
|
||||
# import difflib
|
||||
# import shutil
|
||||
# import time
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
# #####################################
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65
|
||||
# #####################################
|
||||
#
|
||||
# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password)
|
||||
# FB_HOME = svc.get_home_directory()
|
||||
# svc.close()
|
||||
#
|
||||
# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float
|
||||
# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50'
|
||||
#
|
||||
# cur = db_conn.cursor()
|
||||
# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')")
|
||||
# server_mode = 'XX'
|
||||
# for r in cur:
|
||||
# if r[0] == 'Super':
|
||||
# server_mode = 'SS'
|
||||
# elif r[0] == 'SuperClassic':
|
||||
# server_mode = 'SC'
|
||||
# elif r[0] == 'Classic':
|
||||
# server_mode = 'CS'
|
||||
# cur.close()
|
||||
#
|
||||
# # 'fbt-main.fb50.ss.fdb' etc:
|
||||
# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' )
|
||||
# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.')
|
||||
#
|
||||
# runProgram('gfix', ['-replica', 'read_write', 'localhost:' + db_repl])
|
||||
#
|
||||
# # Folders for journalling and archieving segments.
|
||||
# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' )
|
||||
# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' )
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
# if type(f_names_list[i]) == file:
|
||||
# del_name = f_names_list[i].name
|
||||
# elif type(f_names_list[i]) == str:
|
||||
# del_name = f_names_list[i]
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ):
|
||||
#
|
||||
# global re
|
||||
# global difflib
|
||||
# global time
|
||||
#
|
||||
# replold_lines = []
|
||||
# with open( os.path.join(fb_home,'replication.log'), 'r') as f:
|
||||
# replold_lines = f.readlines()
|
||||
#
|
||||
# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1)
|
||||
# cur = con.cursor()
|
||||
# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database")
|
||||
# for r in cur:
|
||||
# last_generated_repl_segment = r[0]
|
||||
# cur.close()
|
||||
# con.close()
|
||||
#
|
||||
# #print('last_generated_repl_segment:', last_generated_repl_segment)
|
||||
#
|
||||
# # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file
|
||||
# p_expected=re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE)
|
||||
# p_unexpected = re.compile( '\\+\\s+ERROR: Blob .* not found', re.IGNORECASE)
|
||||
#
|
||||
# found_required_message, found_unexpected_message = False, False
|
||||
# for i in range(0,max_allowed_time_for_wait):
|
||||
# time.sleep(1)
|
||||
#
|
||||
# # Get content of fb_home replication.log _after_ isql finish:
|
||||
# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r')
|
||||
# diff_data = difflib.unified_diff(
|
||||
# replold_lines,
|
||||
# f_repllog_new.readlines()
|
||||
# )
|
||||
# f_repllog_new.close()
|
||||
#
|
||||
# for k,d in enumerate(diff_data):
|
||||
# if p_unexpected.search(d):
|
||||
# print( (prefix_msg + ' ' if prefix_msg else '') + 'UNEXPECTED message encountered:' )
|
||||
# print(d)
|
||||
# found_unexpected_message = True
|
||||
# break
|
||||
#
|
||||
# if p_expected.search(d):
|
||||
# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' )
|
||||
# found_required_message = True
|
||||
# break
|
||||
#
|
||||
# if found_required_message or found_unexpected_message:
|
||||
# break
|
||||
#
|
||||
# if not found_required_message:
|
||||
# print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait)
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# sql_ddl = ''' set bail on;
|
||||
# set list on;
|
||||
# --set blob all;
|
||||
# set blobdisplay 6;
|
||||
#
|
||||
# recreate table test(id int primary key using index test_pk, bindata blob);
|
||||
# commit;
|
||||
#
|
||||
# -- for debug only:
|
||||
# select rdb$get_context('SYSTEM', 'DB_NAME'), rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') as last_generated_repl_segment from rdb$database;
|
||||
# select
|
||||
# RDB$DESCRIPTOR as fmt_descr
|
||||
# from RDB$FORMATS natural join RDB$RELATIONS
|
||||
# where RDB$RELATION_NAME = 'TEST';
|
||||
# quit;
|
||||
# ''' % locals()
|
||||
#
|
||||
#
|
||||
# f_sql_ddl = open( os.path.join(context['temp_directory'],'tmp_gh_7070_init.sql'), 'w')
|
||||
# f_sql_ddl.write(sql_ddl)
|
||||
# flush_and_close( f_sql_ddl )
|
||||
#
|
||||
# f_ddl_log = open( ''.join( (os.path.splitext(f_sql_ddl.name)[0], '.log' ) ), 'w')
|
||||
# f_ddl_err = open( ''.join( (os.path.splitext(f_sql_ddl.name)[0], '.err' ) ), 'w')
|
||||
# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_ddl.name ], stdout = f_ddl_log, stderr = f_ddl_err)
|
||||
# flush_and_close( f_ddl_log )
|
||||
# flush_and_close( f_ddl_err )
|
||||
#
|
||||
# ##############################################################################
|
||||
# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ###
|
||||
# ##############################################################################
|
||||
# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1A' )
|
||||
#
|
||||
# con_repl = fdb.connect( dsn = 'localhost:' + db_repl)
|
||||
# cur_repl = con_repl.cursor()
|
||||
# cur_repl.execute('insert into test(id,bindata) values(?, ?)', (2, 'qwerty-on-replica'))
|
||||
# con_repl.commit()
|
||||
# cur_repl.close()
|
||||
# con_repl.close()
|
||||
#
|
||||
# con_main = fdb.connect( dsn = 'localhost:' + db_main)
|
||||
# cur_main = con_main.cursor()
|
||||
# cur_main.execute('insert into test(id,bindata) values(?, ?)', (2, 'qwerty-on-master'))
|
||||
# con_main.commit()
|
||||
# cur_main.close()
|
||||
# con_main.close()
|
||||
#
|
||||
# ##############################################################################
|
||||
# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ###
|
||||
# ##############################################################################
|
||||
# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1B' )
|
||||
#
|
||||
#
|
||||
# runProgram('gfix', ['-replica', 'read_only', 'localhost:' + db_repl])
|
||||
#
|
||||
# # return initial state of master DB:
|
||||
# # remove all DB objects (tables, views, ...):
|
||||
# # --------------------------------------------
|
||||
# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql')
|
||||
#
|
||||
# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects-gh_7070.log'), 'w')
|
||||
# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err )
|
||||
# flush_and_close(f_clean_log)
|
||||
# flush_and_close(f_clean_err)
|
||||
#
|
||||
# with open(f_clean_err.name,'r') as f:
|
||||
# for line in f:
|
||||
# print('UNEXPECTED STDERR in cleanup SQL: ' + line)
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0
|
||||
#
|
||||
# with open(f_clean_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# # show number of dropped objects
|
||||
# print(line)
|
||||
#
|
||||
#
|
||||
# ##############################################################################
|
||||
# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ###
|
||||
# ##############################################################################
|
||||
# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' )
|
||||
#
|
||||
# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_gh_7070.sql'), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT )
|
||||
# flush_and_close( f_main_meta_sql )
|
||||
#
|
||||
# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_gh_7070.sql'), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT )
|
||||
# flush_and_close( f_repl_meta_sql )
|
||||
#
|
||||
# db_main_meta=open(f_main_meta_sql.name, 'r')
|
||||
# db_repl_meta=open(f_repl_meta_sql.name, 'r')
|
||||
#
|
||||
# diffmeta = ''.join(difflib.unified_diff(
|
||||
# db_main_meta.readlines(),
|
||||
# db_repl_meta.readlines()
|
||||
# ))
|
||||
# db_main_meta.close()
|
||||
# db_repl_meta.close()
|
||||
#
|
||||
# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_gh_7070.txt'), 'w', buffering = 0)
|
||||
# f_meta_diff.write(diffmeta)
|
||||
# flush_and_close( f_meta_diff )
|
||||
#
|
||||
# # Following must issue only TWO rows:
|
||||
# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */
|
||||
# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */
|
||||
# # Only thes lines will be suppressed further (see subst. section):
|
||||
# with open(f_meta_diff.name, 'r') as f:
|
||||
# for line in f:
|
||||
# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'):
|
||||
# print('UNEXPECTED METADATA DIFF.: ' + line)
|
||||
#
|
||||
#
|
||||
# ######################
|
||||
# ### A C H T U N G ###
|
||||
# ######################
|
||||
# # MANDATORY, OTHERWISE REPLICATION GETS STUCK ON SECOND RUN OF THIS TEST
|
||||
# # WITH 'ERROR: Record format with length 68 is not found for table TEST':
|
||||
# runProgram('gfix', ['-sweep', 'localhost:' + db_repl])
|
||||
# runProgram('gfix', ['-sweep', 'localhost:' + db_main])
|
||||
# #######################
|
||||
#
|
||||
# # cleanup:
|
||||
# ##########
|
||||
# cleanup( (f_sql_ddl, f_ddl_log, f_ddl_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) )
|
||||
#
|
||||
#
|
||||
#---
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
POINT-1A FOUND message about replicated segment.
|
||||
POINT-1B FOUND message about replicated segment.
|
||||
Start removing objects
|
||||
Finish. Total objects removed
|
||||
POINT-2 FOUND message about replicated segment.
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=4.0.1')
|
||||
@pytest.mark.platform('Windows')
|
||||
def test_1(act_1: Action):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
@ -0,0 +1,482 @@
|
||||
#coding:utf-8
|
||||
#
|
||||
# id: tests.functional.replication.dblevel_triggers_must_not_fire_on_replica
|
||||
# title: Replica DB must not fire DB-level triggers but their activity on master must be eventually seen in replica.
|
||||
# decription:
|
||||
# https://github.com/FirebirdSQL/firebird/issues/6850
|
||||
#
|
||||
# Test creates five DB-level triggers in the master DB (on connect/disconnect; on tx start/commit/rollback).
|
||||
# Each of them registers apropriate event in the table with name 'log_db_triggers_activity'.
|
||||
# This table must eventually have five records in BOTH databases (i.e. not only in master, but in replica also).
|
||||
# After creating metadata we make test connect to master DB to fire these triggers.
|
||||
#
|
||||
# Then we wait until replica becomes actual to master, and this delay will last no more then threshold
|
||||
# that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds).
|
||||
# During this delay, we check every second for replication log and search there line with number of last generated
|
||||
# segment (which was replicated and deleting finally).
|
||||
# We can assume that replication finished OK only when such line is found see ('POINT-1').
|
||||
#
|
||||
# After this, we do query master and replica databases and obtain data from 'log_db_triggers_activity' table: it must
|
||||
# have records about every fired trigger. Content of this table must be identical on master and replica, see queries
|
||||
# to v_log_db_triggers_activity (both on master and replica DB).
|
||||
#
|
||||
# Then we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch).
|
||||
# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2').
|
||||
#
|
||||
# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff').
|
||||
# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it,
|
||||
# thus metadata difference must not be issued.
|
||||
#
|
||||
# ################
|
||||
# ### N O T E ###
|
||||
# ################
|
||||
# Test assumes that master and replica DB have been created beforehand.
|
||||
# Also, it assumes that %FB_HOME%
|
||||
# eplication.conf has been prepared with apropriate parameters for replication.
|
||||
# Particularly, name of directories and databases must have info about checked FB major version and ServerMode.
|
||||
# * verbose = true // in order to find out line with message that required segment was replicated
|
||||
# * section for master database with specified parameters:
|
||||
# journal_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.journal"
|
||||
# journal_archive_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.archive"
|
||||
# journal_archive_command = "copy $(pathname) $(archivepathname)"
|
||||
# journal_archive_timeout = 10
|
||||
# * section for replica database with specified parameter:
|
||||
# journal_source_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.archive"
|
||||
#
|
||||
# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these:
|
||||
# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic)
|
||||
# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic)
|
||||
# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc)
|
||||
#
|
||||
# These two databases must NOT be dropped in any of tests related to replication!
|
||||
# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode
|
||||
# and make cleanup after it, i.e. when all tests will be completed.
|
||||
#
|
||||
# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'.
|
||||
#
|
||||
# Temporary comment. For debug purpoces:
|
||||
# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc);
|
||||
# 2) copy file %fbt-repo% ests
|
||||
# unctional abloidatches\\setup-fb-for-replication.bat.txt
|
||||
# to some place and rename it "*.bat";
|
||||
# 3) open this .bat in editor and asjust value of 'fbt_repo' variable;
|
||||
# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE]
|
||||
# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check:
|
||||
# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc
|
||||
# 5) batch 'setup-fb-for-replication.bat' will:
|
||||
# * stop selected FB instance
|
||||
# * create test databases (in !fbt_repo! mp\\);
|
||||
# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\);
|
||||
# * replace %fb_home%
|
||||
# eplication.conf with apropriate
|
||||
# * start selected FB instance
|
||||
# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat):
|
||||
# %fpt_repo%
|
||||
# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc
|
||||
#
|
||||
# Checked on:
|
||||
# 4.0.1.2519 SS: 56.48s, CS: 99.31s
|
||||
# 5.0.0.82 SS: 20.63s, CS: 21.39s
|
||||
#
|
||||
# tracker_id:
|
||||
# min_versions: ['4.0']
|
||||
# versions: 4.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 4.0
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', '')]
|
||||
|
||||
init_script_1 = """"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
# import os
|
||||
# import subprocess
|
||||
# import re
|
||||
# import difflib
|
||||
# import shutil
|
||||
# import time
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
# #####################################
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65
|
||||
# #####################################
|
||||
#
|
||||
# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password)
|
||||
# FB_HOME = svc.get_home_directory()
|
||||
# svc.close()
|
||||
#
|
||||
# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float
|
||||
# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50'
|
||||
#
|
||||
# cur = db_conn.cursor()
|
||||
# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')")
|
||||
# server_mode = 'XX'
|
||||
# for r in cur:
|
||||
# if r[0] == 'Super':
|
||||
# server_mode = 'SS'
|
||||
# elif r[0] == 'SuperClassic':
|
||||
# server_mode = 'SC'
|
||||
# elif r[0] == 'Classic':
|
||||
# server_mode = 'CS'
|
||||
# cur.close()
|
||||
#
|
||||
# # 'fbt-main.fb50.ss.fdb' etc:
|
||||
# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' )
|
||||
# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.')
|
||||
#
|
||||
# # Folders for journalling and archieving segments.
|
||||
# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' )
|
||||
# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' )
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
# if type(f_names_list[i]) == file:
|
||||
# del_name = f_names_list[i].name
|
||||
# elif type(f_names_list[i]) == str:
|
||||
# del_name = f_names_list[i]
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ):
|
||||
#
|
||||
# global re
|
||||
# global difflib
|
||||
# global time
|
||||
#
|
||||
# replold_lines = []
|
||||
# with open( os.path.join(fb_home,'replication.log'), 'r') as f:
|
||||
# replold_lines = f.readlines()
|
||||
#
|
||||
# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1)
|
||||
# cur = con.cursor()
|
||||
# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database")
|
||||
# for r in cur:
|
||||
# last_generated_repl_segment = r[0]
|
||||
# cur.close()
|
||||
# con.close()
|
||||
#
|
||||
# #print('last_generated_repl_segment:', last_generated_repl_segment)
|
||||
#
|
||||
#
|
||||
# # +IMAGE-PC1 (replica) Fri Jun 11 17:57:01 2021
|
||||
# # + Database: C:\\FBTESTING\\QA\\FBT-REPO\\TMP\\FBT-REPL.FB50.FDB
|
||||
# # + VERBOSE: Added 1 segment(s) to the processing queue
|
||||
# # +
|
||||
# # +IMAGE-PC1 (replica) Fri Jun 11 17:57:04 2021
|
||||
# # + Database: C:\\FBTESTING\\QA\\FBT-REPO\\TMP\\FBT-REPL.FB50.FDB
|
||||
# # + VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file
|
||||
#
|
||||
# p=re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE)
|
||||
#
|
||||
# found_required_message = False
|
||||
# for i in range(0,max_allowed_time_for_wait):
|
||||
# time.sleep(1)
|
||||
#
|
||||
# # Get content of fb_home replication.log _after_ isql finish:
|
||||
# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r')
|
||||
# diff_data = difflib.unified_diff(
|
||||
# replold_lines,
|
||||
# f_repllog_new.readlines()
|
||||
# )
|
||||
# f_repllog_new.close()
|
||||
#
|
||||
# for k,d in enumerate(diff_data):
|
||||
# if p.search(d):
|
||||
# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' )
|
||||
# found_required_message = True
|
||||
# break
|
||||
#
|
||||
# if found_required_message:
|
||||
# break
|
||||
#
|
||||
# if not found_required_message:
|
||||
# print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait)
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# sql_ddl = ''' set bail on;
|
||||
# set list on;
|
||||
#
|
||||
# select mon$database_name from mon$database;
|
||||
#
|
||||
# set term ^;
|
||||
# execute block as
|
||||
# begin
|
||||
# -- Define context variable in order to prevent
|
||||
# -- DB-level triggers from firing during this execution:
|
||||
# rdb$set_context('USER_SESSION', 'SKIP_DBLEVEL_TRG','1');
|
||||
# end
|
||||
# ^
|
||||
# set term ;^
|
||||
#
|
||||
# -- ::: NB :::
|
||||
# -- We can not start this script from 'zero-point', i.e. 'create table ...; create view ... ;' etc,
|
||||
# -- because it will fail if master or replica DB contain some objects which could remain there
|
||||
# -- due to fail of some previous test which also had deal with replication and used these databases.
|
||||
# -- Here we must remove all dependencies and only after this table can be recreated:
|
||||
# create or alter trigger trg_tx_start on transaction start as begin end;
|
||||
# create or alter trigger trg_tx_commit on transaction commit as begin end;
|
||||
# create or alter trigger trg_tx_rollback on transaction rollback as begin end;
|
||||
# create or alter trigger trg_connect active on connect as begin end;
|
||||
# create or alter trigger trg_disconnect active on disconnect as begin end;
|
||||
# create or alter procedure sp_log_dblevel_trg_event as begin end;
|
||||
# create or alter view v_log_db_triggers_activity as select 1 x from rdb$database;
|
||||
#
|
||||
# -- result: no more objects that depend on table 'log_db_triggers_activity', now we can recreate it.
|
||||
#
|
||||
# recreate table log_db_triggers_activity (
|
||||
# id int generated by default as identity constraint pk_log_db_triggers_activity primary key
|
||||
# ,dts timestamp default 'now'
|
||||
# ,att integer default current_connection
|
||||
# ,trn integer default current_transaction
|
||||
# ,app varchar(80)
|
||||
# ,evt varchar(80)
|
||||
# );
|
||||
#
|
||||
# create or alter view v_log_db_triggers_activity as select * from log_db_triggers_activity;
|
||||
#
|
||||
# set term ^;
|
||||
# create or alter procedure sp_log_dblevel_trg_event (
|
||||
# a_event_type varchar(80) -- type of column log_db_triggers_activity.evt
|
||||
# ,a_working_tx int default null
|
||||
# )
|
||||
# as
|
||||
# declare v_app varchar(255);
|
||||
# declare p smallint;
|
||||
# declare back_slash char(1);
|
||||
# begin
|
||||
# v_app = reverse( right(rdb$get_context('SYSTEM','CLIENT_PROCESS'), 80) );
|
||||
# back_slash = ascii_char(92); -- backward slash; do NOT specify it literally otherwise Python will handle it as empty string!
|
||||
# p = maxvalue(position(back_slash in v_app ), position('/' in v_app ));
|
||||
# v_app = reverse(substring(v_app from 1 for p-1));
|
||||
# execute statement( 'insert into v_log_db_triggers_activity( trn, app, evt) values( ?, ?, ? )' ) ( coalesce(:a_working_tx, current_transaction), :v_app, :a_event_type) ;
|
||||
#
|
||||
# end
|
||||
# ^
|
||||
#
|
||||
# create or alter trigger trg_tx_start on transaction start as
|
||||
# begin
|
||||
# if (rdb$get_context('USER_SESSION', 'SKIP_DBLEVEL_TRG') is null ) then
|
||||
# -- execute procedure sp_log_dblevel_trg_event( 'TX_START, TIL=' || coalesce( rdb$get_context('SYSTEM', 'ISOLATION_LEVEL'), '[null]' ) );
|
||||
# execute procedure sp_log_dblevel_trg_event( 'TX_START' );
|
||||
# end
|
||||
# ^
|
||||
#
|
||||
# create or alter trigger trg_tx_commit on transaction commit as
|
||||
# begin
|
||||
# if (rdb$get_context('USER_SESSION', 'SKIP_DBLEVEL_TRG') is null ) then
|
||||
# -- execute procedure sp_log_dblevel_trg_event( 'TX_COMMIT, TIL=' || coalesce( rdb$get_context('SYSTEM', 'ISOLATION_LEVEL'), '[null]' ) );
|
||||
# execute procedure sp_log_dblevel_trg_event( 'TX_COMMIT' );
|
||||
# end
|
||||
# ^
|
||||
#
|
||||
# create or alter trigger trg_tx_rollback on transaction rollback as
|
||||
# declare v_current_tx int;
|
||||
# begin
|
||||
# v_current_tx = current_transaction;
|
||||
# if (rdb$get_context('USER_SESSION', 'SKIP_DBLEVEL_TRG') is null ) then
|
||||
# in autonomous transaction do
|
||||
# -- execute procedure sp_log_dblevel_trg_event( 'TX_ROLLBACK, TIL=' || coalesce( rdb$get_context('SYSTEM', 'ISOLATION_LEVEL'), '[null]' ), v_current_tx );
|
||||
# execute procedure sp_log_dblevel_trg_event( 'TX_ROLLBACK' );
|
||||
# end
|
||||
# ^
|
||||
#
|
||||
# create or alter trigger trg_connect active on connect position 0 as
|
||||
# begin
|
||||
# if (rdb$get_context('USER_SESSION', 'SKIP_DBLEVEL_TRG') is null ) then
|
||||
# execute procedure sp_log_dblevel_trg_event( 'DB_ATTACH' );
|
||||
# end
|
||||
# ^
|
||||
#
|
||||
# create or alter trigger trg_disconnect active on disconnect position 0 as
|
||||
# declare v_current_tx int;
|
||||
# begin
|
||||
# if (rdb$get_context('USER_SESSION', 'SKIP_DBLEVEL_TRG') is null ) then
|
||||
# execute procedure sp_log_dblevel_trg_event( 'DB_DETACH');
|
||||
# end
|
||||
# ^
|
||||
# set term ;^
|
||||
# commit;
|
||||
#
|
||||
# select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') as last_generated_repl_segment from rdb$database;
|
||||
# quit;
|
||||
# ''' % locals()
|
||||
#
|
||||
#
|
||||
# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_repltest_skip_db_trg.sql'), 'w')
|
||||
# f_sql_chk.write(sql_ddl)
|
||||
# flush_and_close( f_sql_chk )
|
||||
#
|
||||
# # Get content of FB_HOME replication.log _before_ launching ISQL:
|
||||
# #############
|
||||
#
|
||||
# replold_lines = []
|
||||
# with open( os.path.join(FB_HOME,'replication.log'), 'r') as f:
|
||||
# replold_lines = f.readlines()
|
||||
#
|
||||
# f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w')
|
||||
# f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w')
|
||||
# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err)
|
||||
# flush_and_close( f_sql_log )
|
||||
# flush_and_close( f_sql_err )
|
||||
#
|
||||
# last_generated_repl_segment = 0
|
||||
#
|
||||
# with open(f_sql_err.name,'r') as f:
|
||||
# for line in f:
|
||||
# print('UNEXPECTED STDERR in initial SQL: ' + line)
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0
|
||||
#
|
||||
# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors
|
||||
#
|
||||
# # Test connect to master DB, just to fire DB-level triggers:
|
||||
# ###########################
|
||||
# con1 = fdb.connect( dsn = 'localhost:' + db_main)
|
||||
# con1.execute_immediate('recreate table test(id int)')
|
||||
# con1.close()
|
||||
#
|
||||
# ##############################################################################
|
||||
# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ###
|
||||
# ##############################################################################
|
||||
# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' )
|
||||
#
|
||||
# runProgram('isql', ['localhost:' + db_main, '-nod'], 'set count on; set list on; select evt as main_db_trigger_fired from v_log_db_triggers_activity order by id;')
|
||||
# runProgram('isql', ['localhost:' + db_repl, '-nod'], 'set count on; set list on; select evt as repl_db_trigger_fired from v_log_db_triggers_activity order by id;')
|
||||
#
|
||||
#
|
||||
# # return initial state of master DB:
|
||||
# # remove all DB objects (tables, views, ...):
|
||||
# # --------------------------------------------
|
||||
# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql')
|
||||
#
|
||||
# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects.log'), 'w')
|
||||
# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err )
|
||||
# flush_and_close(f_clean_log)
|
||||
# flush_and_close(f_clean_err)
|
||||
#
|
||||
# with open(f_clean_err.name,'r') as f:
|
||||
# for line in f:
|
||||
# print('UNEXPECTED STDERR in cleanup SQL: ' + line)
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0
|
||||
#
|
||||
# with open(f_clean_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# # show number of dropped objects
|
||||
# print(line)
|
||||
#
|
||||
# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors
|
||||
#
|
||||
# ##############################################################################
|
||||
# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ###
|
||||
# ##############################################################################
|
||||
# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' )
|
||||
#
|
||||
# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta.sql'), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT )
|
||||
# flush_and_close( f_main_meta_sql )
|
||||
#
|
||||
# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta.sql'), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT )
|
||||
# flush_and_close( f_repl_meta_sql )
|
||||
#
|
||||
# db_main_meta=open(f_main_meta_sql.name, 'r')
|
||||
# db_repl_meta=open(f_repl_meta_sql.name, 'r')
|
||||
#
|
||||
# diffmeta = ''.join(difflib.unified_diff(
|
||||
# db_main_meta.readlines(),
|
||||
# db_repl_meta.readlines()
|
||||
# ))
|
||||
# db_main_meta.close()
|
||||
# db_repl_meta.close()
|
||||
#
|
||||
# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff.txt'), 'w', buffering = 0)
|
||||
# f_meta_diff.write(diffmeta)
|
||||
# flush_and_close( f_meta_diff )
|
||||
#
|
||||
# # Following must issue only TWO rows:
|
||||
# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */
|
||||
# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */
|
||||
# # Only thes lines will be suppressed further (see subst. section):
|
||||
# with open(f_meta_diff.name, 'r') as f:
|
||||
# for line in f:
|
||||
# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'):
|
||||
# print('UNEXPECTED METADATA DIFF.: ' + line)
|
||||
#
|
||||
#
|
||||
# # cleanup:
|
||||
# ##########
|
||||
# cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) )
|
||||
#
|
||||
#---
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
POINT-1 FOUND message about replicated segment.
|
||||
|
||||
MAIN_DB_TRIGGER_FIRED DB_ATTACH
|
||||
MAIN_DB_TRIGGER_FIRED TX_START
|
||||
MAIN_DB_TRIGGER_FIRED TX_ROLLBACK
|
||||
MAIN_DB_TRIGGER_FIRED TX_COMMIT
|
||||
MAIN_DB_TRIGGER_FIRED DB_DETACH
|
||||
Records affected: 5
|
||||
|
||||
REPL_DB_TRIGGER_FIRED DB_ATTACH
|
||||
REPL_DB_TRIGGER_FIRED TX_START
|
||||
REPL_DB_TRIGGER_FIRED TX_ROLLBACK
|
||||
REPL_DB_TRIGGER_FIRED TX_COMMIT
|
||||
REPL_DB_TRIGGER_FIRED DB_DETACH
|
||||
Records affected: 5
|
||||
|
||||
Start removing objects in: C:\\FBTESTING\\QA\\FBT-REPO\\TMP\\FBT-MAIN.FB50.FDB
|
||||
Finish. Total objects removed: 9
|
||||
|
||||
POINT-2 FOUND message about replicated segment.
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=4.0')
|
||||
@pytest.mark.platform('Windows')
|
||||
def test_1(act_1: Action):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,450 @@
|
||||
#coding:utf-8
|
||||
#
|
||||
# id: tests.functional.replication.duplicates_in_rw_replica_after_conflicting_insert
|
||||
# title: Conflicting INSERT propagated into a read-write replica may cause duplicate records to appear
|
||||
# decription:
|
||||
# See: https://github.com/FirebirdSQL/firebird/issues/6849
|
||||
#
|
||||
# Test temporary changes mode of replica using external call: gfix -replica read_write ...
|
||||
# We create table on master with integer column (PK) and text field that allows to see who is "author" of this record.
|
||||
# Then we add one record (1,'master, initially') and do commit.
|
||||
#
|
||||
# After this we wait until replica becomes actual to master, and this delay will last no more then threshold
|
||||
# that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds).
|
||||
# During this delay, we check every second for replication log and search there line with number of last generated
|
||||
# segment (which was replicated and deleting finally).
|
||||
# We can assume that replication finished OK only when such line is found see ('POINT-1').
|
||||
#
|
||||
# Then we open two FDB-connections and add records:
|
||||
# 1) first, in replica: (2, 'RW-replica') + commit;
|
||||
# 2) second, in master with ID that conflicts with just added record in replica: (2, 'master, finally') + commit.
|
||||
#
|
||||
# Message "Record being inserted into table TEST already exists, updating instead" will appear after this in replication log.
|
||||
# We have to wait again until replica becomes actual to master (see above).
|
||||
# After this we query data from table 'TEST' on *replica* DB. This table must have onl two records:
|
||||
# (ID = 1, WHO_MADE = 'master, initially') and (ID = 2, WHO_MADE = 'master, finally').
|
||||
# Record (2, 'RW-replica') must be overwritten!
|
||||
#
|
||||
# Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch).
|
||||
# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2').
|
||||
#
|
||||
# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff').
|
||||
# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it,
|
||||
# thus metadata difference must not be issued.
|
||||
#
|
||||
# ####################
|
||||
# ### CRUCIAL NOTE ###
|
||||
# ####################
|
||||
# Currently, 25.06.2021, there is bug in FB 4.x and 5.x which can be seen on SECOND run of this test: message with text
|
||||
# "ERROR: Record format with length 68 is not found for table TEST" will appear in it after inserting 1st record in master.
|
||||
# The reason of that is "dirty" pages that remain in RDB$RELATION_FIELDS both on master and replica after dropping table.
|
||||
# Following query show different data that appear in replica DB on 1st and 2nd run (just after table was created on master):
|
||||
# =======
|
||||
# set blobdisplay 6;
|
||||
# select rdb$descriptor as fmt_descr
|
||||
# from rdb$formats natural join rdb$relations where rdb$relation_name = 'TEST';
|
||||
# =======
|
||||
# This bug was explained by dimitr, see letters 25.06.2021 11:49 and 25.06.2021 16:56.
|
||||
# It will be fixed later.
|
||||
#
|
||||
# The only workaround to solve this problem is to make SWEEP after all DB objects have been dropped.
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
# !NB! BOTH master and replica must be cleaned up by sweep!
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
#
|
||||
# ################
|
||||
# ### N O T E ###
|
||||
# ################
|
||||
# Test assumes that master and replica DB have been created beforehand.
|
||||
# Also, it assumes that %FB_HOME%
|
||||
# eplication.conf has been prepared with apropriate parameters for replication.
|
||||
# Particularly, name of directories and databases must have info about checked FB major version and ServerMode.
|
||||
# * verbose = true // in order to find out line with message that required segment was replicated
|
||||
# * section for master database with specified parameters:
|
||||
# journal_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.journal"
|
||||
# journal_archive_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.archive"
|
||||
# journal_archive_command = "copy $(pathname) $(archivepathname)"
|
||||
# journal_archive_timeout = 10
|
||||
# * section for replica database with specified parameter:
|
||||
# journal_source_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.archive"
|
||||
#
|
||||
# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these:
|
||||
# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic)
|
||||
# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic)
|
||||
# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc)
|
||||
#
|
||||
# These two databases must NOT be dropped in any of tests related to replication!
|
||||
# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode
|
||||
# and make cleanup after it, i.e. when all tests will be completed.
|
||||
#
|
||||
# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'.
|
||||
#
|
||||
# Temporary comment. For debug purpoces:
|
||||
# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc);
|
||||
# 2) copy file %fbt-repo% ests
|
||||
# unctional abloidatches\\setup-fb-for-replication.bat.txt
|
||||
# to some place and rename it "*.bat";
|
||||
# 3) open this .bat in editor and asjust value of 'fbt_repo' variable;
|
||||
# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE]
|
||||
# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check:
|
||||
# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc
|
||||
# 5) batch 'setup-fb-for-replication.bat' will:
|
||||
# * stop selected FB instance
|
||||
# * create test databases (in !fbt_repo! mp\\);
|
||||
# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\);
|
||||
# * replace %fb_home%
|
||||
# eplication.conf with apropriate
|
||||
# * start selected FB instance
|
||||
# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat):
|
||||
# %fpt_repo%
|
||||
# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc
|
||||
#
|
||||
# Confirmed bug on 5.0.0.63, 4.0.0.2508 (date of both snapshots: 08-jun-2021, i.e. just before fix).
|
||||
# Checked on:
|
||||
# 5.0.0.85 SS: 34.951s.
|
||||
# 5.0.0.85 CS: 36.813s.
|
||||
# 4.0.1.2520 SS: 38.939s.
|
||||
# 4.0.1.2519 CS: 32.376s.
|
||||
#
|
||||
# tracker_id:
|
||||
# min_versions: ['4.0']
|
||||
# versions: 4.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 4.0
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR')]
|
||||
|
||||
init_script_1 = """"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
# import os
|
||||
# import subprocess
|
||||
# import re
|
||||
# import difflib
|
||||
# import shutil
|
||||
# import time
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
# #####################################
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65
|
||||
# #####################################
|
||||
#
|
||||
# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password)
|
||||
# FB_HOME = svc.get_home_directory()
|
||||
# svc.close()
|
||||
#
|
||||
# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float
|
||||
# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50'
|
||||
#
|
||||
# cur = db_conn.cursor()
|
||||
# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')")
|
||||
# server_mode = 'XX'
|
||||
# for r in cur:
|
||||
# if r[0] == 'Super':
|
||||
# server_mode = 'SS'
|
||||
# elif r[0] == 'SuperClassic':
|
||||
# server_mode = 'SC'
|
||||
# elif r[0] == 'Classic':
|
||||
# server_mode = 'CS'
|
||||
# cur.close()
|
||||
#
|
||||
# # 'fbt-main.fb50.ss.fdb' etc:
|
||||
# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' )
|
||||
# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.')
|
||||
#
|
||||
# runProgram('gfix', ['-replica', 'read_write', 'localhost:' + db_repl])
|
||||
#
|
||||
# # Folders for journalling and archieving segments.
|
||||
# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' )
|
||||
# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' )
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
# if type(f_names_list[i]) == file:
|
||||
# del_name = f_names_list[i].name
|
||||
# elif type(f_names_list[i]) == str:
|
||||
# del_name = f_names_list[i]
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ):
|
||||
#
|
||||
# global re
|
||||
# global difflib
|
||||
# global time
|
||||
#
|
||||
# replold_lines = []
|
||||
# with open( os.path.join(fb_home,'replication.log'), 'r') as f:
|
||||
# replold_lines = f.readlines()
|
||||
#
|
||||
# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1)
|
||||
# cur = con.cursor()
|
||||
# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database")
|
||||
# for r in cur:
|
||||
# last_generated_repl_segment = r[0]
|
||||
# cur.close()
|
||||
# con.close()
|
||||
#
|
||||
# #print('last_generated_repl_segment:', last_generated_repl_segment)
|
||||
#
|
||||
# # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file
|
||||
# p=re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE)
|
||||
#
|
||||
# found_required_message = False
|
||||
# for i in range(0,max_allowed_time_for_wait):
|
||||
# time.sleep(1)
|
||||
#
|
||||
# # Get content of fb_home replication.log _after_ isql finish:
|
||||
# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r')
|
||||
# diff_data = difflib.unified_diff(
|
||||
# replold_lines,
|
||||
# f_repllog_new.readlines()
|
||||
# )
|
||||
# f_repllog_new.close()
|
||||
#
|
||||
# for k,d in enumerate(diff_data):
|
||||
# if p.search(d):
|
||||
# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' )
|
||||
# found_required_message = True
|
||||
# break
|
||||
#
|
||||
# if found_required_message:
|
||||
# break
|
||||
#
|
||||
# if not found_required_message:
|
||||
# print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait)
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# sql_ddl = ''' set bail on;
|
||||
# set list on;
|
||||
# --set blob all;
|
||||
# set blobdisplay 6;
|
||||
#
|
||||
# recreate table test(id int primary key using index test_pk, dts timestamp default 'now', who_made varchar(50));
|
||||
# commit;
|
||||
# insert into test(id, who_made) values(1,'master, initially');
|
||||
# commit;
|
||||
#
|
||||
# -- for debug only:
|
||||
# select rdb$get_context('SYSTEM', 'DB_NAME'), rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') as last_generated_repl_segment from rdb$database;
|
||||
# select
|
||||
# RDB$DESCRIPTOR as fmt_descr
|
||||
# from RDB$FORMATS natural join RDB$RELATIONS
|
||||
# where RDB$RELATION_NAME = 'TEST';
|
||||
# quit;
|
||||
# ''' % locals()
|
||||
#
|
||||
#
|
||||
# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_gh_6849_init.sql'), 'w')
|
||||
# f_sql_chk.write(sql_ddl)
|
||||
# flush_and_close( f_sql_chk )
|
||||
#
|
||||
# f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w')
|
||||
# f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w')
|
||||
# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err)
|
||||
# flush_and_close( f_sql_log )
|
||||
# flush_and_close( f_sql_err )
|
||||
#
|
||||
# last_generated_repl_segment = 0
|
||||
#
|
||||
# with open(f_sql_err.name,'r') as f:
|
||||
# for line in f:
|
||||
# print('UNEXPECTED STDERR in initial SQL: ' + line)
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0
|
||||
#
|
||||
# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors
|
||||
#
|
||||
# ##############################################################################
|
||||
# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ###
|
||||
# ##############################################################################
|
||||
# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1A' )
|
||||
#
|
||||
# con_repl = fdb.connect( dsn = 'localhost:' + db_repl)
|
||||
# cur_repl = con_repl.cursor()
|
||||
# cur_repl.execute('insert into test(id,who_made) values(?, ?)', (2, 'RW-replica'))
|
||||
#
|
||||
# # con_repl.execute_immediate( "insert into test(id,who_made) values( 2, )" )
|
||||
# con_repl.commit()
|
||||
# cur_repl.close()
|
||||
# con_repl.close()
|
||||
#
|
||||
# con_main = fdb.connect( dsn = 'localhost:' + db_main)
|
||||
# cur_main = con_main.cursor()
|
||||
# cur_main.execute('insert into test(id,who_made) values(?, ?)', (2, 'master, finally'))
|
||||
# #con_main.execute_immediate( "insert into test(id,who_made) values( 2, 'master, finally')" )
|
||||
# con_main.commit()
|
||||
# cur_main.close()
|
||||
# con_main.close()
|
||||
#
|
||||
# ##############################################################################
|
||||
# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ###
|
||||
# ##############################################################################
|
||||
# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1B' )
|
||||
#
|
||||
# # Here we must check that replica has no duplicates in PK column test.id:
|
||||
# ####################
|
||||
# # for debug only: runProgram('isql', ['localhost:' + db_repl, '-nod'], "set list on; set blobdisplay 6; select id, who_made from test order by dts; select rdb$descriptor as fmt_descr from rdb$formats natural join rdb$relations where rdb$relation_name = 'TEST';")
|
||||
# runProgram('isql', ['localhost:' + db_repl, '-nod'], "set list on; select id, who_made from test order by dts;")
|
||||
#
|
||||
#
|
||||
# runProgram('gfix', ['-replica', 'read_only', 'localhost:' + db_repl])
|
||||
#
|
||||
# # return initial state of master DB:
|
||||
# # remove all DB objects (tables, views, ...):
|
||||
# # --------------------------------------------
|
||||
# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql')
|
||||
#
|
||||
# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects-gh_6849.log'), 'w')
|
||||
# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err )
|
||||
# flush_and_close(f_clean_log)
|
||||
# flush_and_close(f_clean_err)
|
||||
#
|
||||
# with open(f_clean_err.name,'r') as f:
|
||||
# for line in f:
|
||||
# print('UNEXPECTED STDERR in cleanup SQL: ' + line)
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0
|
||||
#
|
||||
# with open(f_clean_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# # show number of dropped objects
|
||||
# print(line)
|
||||
#
|
||||
# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors
|
||||
#
|
||||
# ##############################################################################
|
||||
# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ###
|
||||
# ##############################################################################
|
||||
# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' )
|
||||
#
|
||||
# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_gh_6849.sql'), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT )
|
||||
# flush_and_close( f_main_meta_sql )
|
||||
#
|
||||
# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_gh_6849.sql'), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT )
|
||||
# flush_and_close( f_repl_meta_sql )
|
||||
#
|
||||
# db_main_meta=open(f_main_meta_sql.name, 'r')
|
||||
# db_repl_meta=open(f_repl_meta_sql.name, 'r')
|
||||
#
|
||||
# diffmeta = ''.join(difflib.unified_diff(
|
||||
# db_main_meta.readlines(),
|
||||
# db_repl_meta.readlines()
|
||||
# ))
|
||||
# db_main_meta.close()
|
||||
# db_repl_meta.close()
|
||||
#
|
||||
# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_gh_6849.txt'), 'w', buffering = 0)
|
||||
# f_meta_diff.write(diffmeta)
|
||||
# flush_and_close( f_meta_diff )
|
||||
#
|
||||
# # Following must issue only TWO rows:
|
||||
# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */
|
||||
# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */
|
||||
# # Only thes lines will be suppressed further (see subst. section):
|
||||
# with open(f_meta_diff.name, 'r') as f:
|
||||
# for line in f:
|
||||
# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'):
|
||||
# print('UNEXPECTED METADATA DIFF.: ' + line)
|
||||
#
|
||||
# ######################
|
||||
# ### A C H T U N G ###
|
||||
# ######################
|
||||
# # MANDATORY, OTHERWISE REPLICATION GETS STUCK ON SECOND RUN OF THIS TEST
|
||||
# # WITH 'ERROR: Record format with length 68 is not found for table TEST':
|
||||
# runProgram('gfix', ['-sweep', 'localhost:' + db_repl])
|
||||
# runProgram('gfix', ['-sweep', 'localhost:' + db_main])
|
||||
# #######################
|
||||
#
|
||||
# #If we skip 'gfix -sweep' then following will be:
|
||||
# #
|
||||
# #MASTER and REPLICA, after 1st run:
|
||||
# #==================
|
||||
# # Fields:
|
||||
# # id offset type length sub_type flags
|
||||
# # --- ------ -------------- ------ -------- -----
|
||||
# # 0 4 9 LONG 4 0 0x00
|
||||
# # 1 8 16 TIMESTAMP 8 0 0x00
|
||||
# # 2 16 3 VARCHAR 52 0 0x00
|
||||
# #
|
||||
# #REPLICA on 2nd run (after creating TEST table on master):
|
||||
# #===================
|
||||
# # Fields:
|
||||
# # id offset type length sub_type flags
|
||||
# # --- ------ -------------- ------ -------- -----
|
||||
# # 0 8 16 TIMESTAMP 8 0 0x00
|
||||
# # 1 16 3 VARCHAR 52 0 0x00
|
||||
# # 2 68 9 LONG 4 0 0x00
|
||||
#
|
||||
#
|
||||
# # cleanup:
|
||||
# ##########
|
||||
# cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) )
|
||||
#
|
||||
#
|
||||
#---
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
POINT-1A FOUND message about replicated segment.
|
||||
POINT-1B FOUND message about replicated segment.
|
||||
|
||||
ID 1
|
||||
WHO_MADE master, initially
|
||||
ID 2
|
||||
WHO_MADE master, finally
|
||||
|
||||
Start removing objects
|
||||
Finish. Total objects removed
|
||||
|
||||
POINT-2 FOUND message about replicated segment.
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=4.0')
|
||||
@pytest.mark.platform('Windows')
|
||||
def test_1(act_1: Action):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
@ -0,0 +1,383 @@
|
||||
#coding:utf-8
|
||||
#
|
||||
# id: tests.functional.replication.generator_could_not_be_transferred_to_replica
|
||||
# title: Generator values may not replicate after commit
|
||||
# decription:
|
||||
# https://github.com/FirebirdSQL/firebird/issues/6848
|
||||
#
|
||||
# Test creates table and two sequences: 'g_1' and 'g_2'.
|
||||
# Then we add record in the table and use 'g_2' (i.e. sequence which was created LAST).
|
||||
# After this we do update record and use 'g_1' (seq. which was created FIRST).
|
||||
#
|
||||
# Then we wait until replica becomes actual to master, and this delay will last no more then threshold
|
||||
# that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds).
|
||||
# During this delay, we check every second for replication log and search there line with number of last generated
|
||||
# segment (which was replicated and deleting finally).
|
||||
# We can assume that replication finished OK only when such line is found see ('POINT-1').
|
||||
#
|
||||
# After this, we do query master and replica databases and obtain count() for table and values of sequences.
|
||||
# Obtained values must be identical on master and replica.
|
||||
#
|
||||
# Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch).
|
||||
# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2').
|
||||
#
|
||||
# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff').
|
||||
# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it,
|
||||
# thus metadata difference must not be issued.
|
||||
#
|
||||
# ################
|
||||
# ### N O T E ###
|
||||
# ################
|
||||
# Test assumes that master and replica DB have been created beforehand.
|
||||
# Also, it assumes that %FB_HOME%
|
||||
# eplication.conf has been prepared with apropriate parameters for replication.
|
||||
# Particularly, name of directories and databases must have info about checked FB major version and ServerMode.
|
||||
# * verbose = true // in order to find out line with message that required segment was replicated
|
||||
# * section for master database with specified parameters:
|
||||
# journal_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.journal"
|
||||
# journal_archive_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.archive"
|
||||
# journal_archive_command = "copy $(pathname) $(archivepathname)"
|
||||
# journal_archive_timeout = 10
|
||||
# * section for replica database with specified parameter:
|
||||
# journal_source_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.archive"
|
||||
#
|
||||
# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these:
|
||||
# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic)
|
||||
# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic)
|
||||
# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc)
|
||||
#
|
||||
# These two databases must NOT be dropped in any of tests related to replication!
|
||||
# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode
|
||||
# and make cleanup after it, i.e. when all tests will be completed.
|
||||
#
|
||||
# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'.
|
||||
#
|
||||
# Temporary comment. For debug purpoces:
|
||||
# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc);
|
||||
# 2) copy file %fbt-repo% ests
|
||||
# unctional abloidatches\\setup-fb-for-replication.bat.txt
|
||||
# to some place and rename it "*.bat";
|
||||
# 3) open this .bat in editor and asjust value of 'fbt_repo' variable;
|
||||
# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE]
|
||||
# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check:
|
||||
# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc
|
||||
# 5) batch 'setup-fb-for-replication.bat' will:
|
||||
# * stop selected FB instance
|
||||
# * create test databases (in !fbt_repo! mp\\);
|
||||
# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\);
|
||||
# * replace %fb_home%
|
||||
# eplication.conf with apropriate
|
||||
# * start selected FB instance
|
||||
# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat):
|
||||
# %fpt_repo%
|
||||
# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc
|
||||
#
|
||||
# Confirmed bug on 4.0.0.2465: sequence with least ID ('g_1') is not replicated, its value on replica remains 0.
|
||||
# Checked on:
|
||||
# 4.0.1.2519 SS; 5.0.0.82 SS/CS.
|
||||
#
|
||||
# tracker_id:
|
||||
# min_versions: ['4.0']
|
||||
# versions: 4.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 4.0
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', '')]
|
||||
|
||||
init_script_1 = """"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
# import os
|
||||
# import subprocess
|
||||
# import re
|
||||
# import difflib
|
||||
# import shutil
|
||||
# import time
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
# #####################################
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65
|
||||
# #####################################
|
||||
#
|
||||
# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password)
|
||||
# FB_HOME = svc.get_home_directory()
|
||||
# svc.close()
|
||||
#
|
||||
# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float
|
||||
# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50'
|
||||
#
|
||||
# cur = db_conn.cursor()
|
||||
# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')")
|
||||
# server_mode = 'XX'
|
||||
# for r in cur:
|
||||
# if r[0] == 'Super':
|
||||
# server_mode = 'SS'
|
||||
# elif r[0] == 'SuperClassic':
|
||||
# server_mode = 'SC'
|
||||
# elif r[0] == 'Classic':
|
||||
# server_mode = 'CS'
|
||||
# cur.close()
|
||||
#
|
||||
# # 'fbt-main.fb50.ss.fdb' etc:
|
||||
# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' )
|
||||
# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.')
|
||||
#
|
||||
# # Folders for journalling and archieving segments.
|
||||
# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' )
|
||||
# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' )
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
# if type(f_names_list[i]) == file:
|
||||
# del_name = f_names_list[i].name
|
||||
# elif type(f_names_list[i]) == str:
|
||||
# del_name = f_names_list[i]
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ):
|
||||
#
|
||||
# global re
|
||||
# global difflib
|
||||
# global time
|
||||
#
|
||||
# replold_lines = []
|
||||
# with open( os.path.join(fb_home,'replication.log'), 'r') as f:
|
||||
# replold_lines = f.readlines()
|
||||
#
|
||||
# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1)
|
||||
# cur = con.cursor()
|
||||
# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database")
|
||||
# for r in cur:
|
||||
# last_generated_repl_segment = r[0]
|
||||
# cur.close()
|
||||
# con.close()
|
||||
#
|
||||
# #print('last_generated_repl_segment:', last_generated_repl_segment)
|
||||
#
|
||||
#
|
||||
# # +IMAGE-PC1 (replica) Fri Jun 11 17:57:01 2021
|
||||
# # + Database: C:\\FBTESTING\\QA\\FBT-REPO\\TMP\\FBT-REPL.FB50.FDB
|
||||
# # + VERBOSE: Added 1 segment(s) to the processing queue
|
||||
# # +
|
||||
# # +IMAGE-PC1 (replica) Fri Jun 11 17:57:04 2021
|
||||
# # + Database: C:\\FBTESTING\\QA\\FBT-REPO\\TMP\\FBT-REPL.FB50.FDB
|
||||
# # + VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file
|
||||
#
|
||||
# p=re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE)
|
||||
#
|
||||
# found_required_message = False
|
||||
# for i in range(0,max_allowed_time_for_wait):
|
||||
# time.sleep(1)
|
||||
#
|
||||
# # Get content of fb_home replication.log _after_ isql finish:
|
||||
# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r')
|
||||
# diff_data = difflib.unified_diff(
|
||||
# replold_lines,
|
||||
# f_repllog_new.readlines()
|
||||
# )
|
||||
# f_repllog_new.close()
|
||||
#
|
||||
# for k,d in enumerate(diff_data):
|
||||
# if p.search(d):
|
||||
# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' )
|
||||
# found_required_message = True
|
||||
# break
|
||||
#
|
||||
# if found_required_message:
|
||||
# break
|
||||
#
|
||||
# if not found_required_message:
|
||||
# print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait)
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# sql_ddl = ''' set bail on;
|
||||
# set list on;
|
||||
#
|
||||
# select mon$database_name from mon$database;
|
||||
# recreate sequence g_1;
|
||||
# recreate sequence g_2;
|
||||
# recreate table test(id int primary key);
|
||||
# commit;
|
||||
#
|
||||
# select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') as last_generated_repl_segment from rdb$database;
|
||||
# quit;
|
||||
# ''' % locals()
|
||||
#
|
||||
#
|
||||
# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_chk_skip_gen_repl.sql'), 'w')
|
||||
# f_sql_chk.write(sql_ddl)
|
||||
# flush_and_close( f_sql_chk )
|
||||
#
|
||||
# f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w')
|
||||
# f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w')
|
||||
# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err)
|
||||
# flush_and_close( f_sql_log )
|
||||
# flush_and_close( f_sql_err )
|
||||
#
|
||||
# last_generated_repl_segment = 0
|
||||
#
|
||||
# with open(f_sql_err.name,'r') as f:
|
||||
# for line in f:
|
||||
# print('UNEXPECTED STDERR in initial SQL: ' + line)
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0
|
||||
#
|
||||
# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors
|
||||
#
|
||||
# # Test connect to master DB, just to fire DB-level triggers:
|
||||
# ###########################
|
||||
# con1 = fdb.connect( dsn = 'localhost:' + db_main)
|
||||
# con1.execute_immediate( 'insert into test(id) values( gen_id(g_2, 1) )' )
|
||||
# con1.execute_immediate( 'update test set id = id + gen_id(g_1, 1)' )
|
||||
# con1.commit()
|
||||
# con1.close()
|
||||
#
|
||||
# ##############################################################################
|
||||
# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ###
|
||||
# ##############################################################################
|
||||
# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' )
|
||||
#
|
||||
# runProgram('isql', ['localhost:' + db_main, '-nod'], 'set list on; select count(*) test_rows_on_main, gen_id(g_1,0) as g1_curr_val_on_main, gen_id(g_2,0) as g2_curr_val_on_main from test;')
|
||||
# runProgram('isql', ['localhost:' + db_repl, '-nod'], 'set list on; select count(*) test_rows_on_repl, gen_id(g_1,0) as g1_curr_val_on_repl, gen_id(g_2,0) as g2_curr_val_on_repl from test;')
|
||||
#
|
||||
#
|
||||
# # return initial state of master DB:
|
||||
# # remove all DB objects (tables, views, ...):
|
||||
# # --------------------------------------------
|
||||
# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql')
|
||||
#
|
||||
# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects.log'), 'w')
|
||||
# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err )
|
||||
# flush_and_close(f_clean_log)
|
||||
# flush_and_close(f_clean_err)
|
||||
#
|
||||
# with open(f_clean_err.name,'r') as f:
|
||||
# for line in f:
|
||||
# print('UNEXPECTED STDERR in cleanup SQL: ' + line)
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0
|
||||
#
|
||||
# with open(f_clean_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# # show number of dropped objects
|
||||
# print(line)
|
||||
#
|
||||
# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors
|
||||
#
|
||||
# ##############################################################################
|
||||
# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ###
|
||||
# ##############################################################################
|
||||
# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' )
|
||||
#
|
||||
# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_skip_gen_repl.sql'), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT )
|
||||
# flush_and_close( f_main_meta_sql )
|
||||
#
|
||||
# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_skip_gen_repl.sql'), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT )
|
||||
# flush_and_close( f_repl_meta_sql )
|
||||
#
|
||||
# db_main_meta=open(f_main_meta_sql.name, 'r')
|
||||
# db_repl_meta=open(f_repl_meta_sql.name, 'r')
|
||||
#
|
||||
# diffmeta = ''.join(difflib.unified_diff(
|
||||
# db_main_meta.readlines(),
|
||||
# db_repl_meta.readlines()
|
||||
# ))
|
||||
# db_main_meta.close()
|
||||
# db_repl_meta.close()
|
||||
#
|
||||
# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_skip_gen_repl.txt'), 'w', buffering = 0)
|
||||
# f_meta_diff.write(diffmeta)
|
||||
# flush_and_close( f_meta_diff )
|
||||
#
|
||||
# # Following must issue only TWO rows:
|
||||
# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */
|
||||
# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */
|
||||
# # Only thes lines will be suppressed further (see subst. section):
|
||||
# with open(f_meta_diff.name, 'r') as f:
|
||||
# for line in f:
|
||||
# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'):
|
||||
# print('UNEXPECTED METADATA DIFF.: ' + line)
|
||||
#
|
||||
# ######################
|
||||
# ### A C H T U N G ###
|
||||
# ######################
|
||||
# # MANDATORY, OTHERWISE REPLICATION MAY GET STUCK ON TESTS THAT ARE EXECUTED AFTER THIS
|
||||
# # (e.g. on "updating-blob-with-empty-string-stops-replication")
|
||||
# # with 'ERROR: Record format with length <NN> is not found for table TEST':
|
||||
# runProgram('gfix', ['-sweep', 'localhost:' + db_repl])
|
||||
# runProgram('gfix', ['-sweep', 'localhost:' + db_main])
|
||||
# #######################
|
||||
#
|
||||
# # cleanup:
|
||||
# ##########
|
||||
# cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) )
|
||||
#
|
||||
#
|
||||
#---
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
POINT-1 FOUND message about replicated segment.
|
||||
|
||||
TEST_ROWS_ON_MAIN 1
|
||||
G1_CURR_VAL_ON_MAIN 1
|
||||
G2_CURR_VAL_ON_MAIN 1
|
||||
|
||||
TEST_ROWS_ON_REPL 1
|
||||
G1_CURR_VAL_ON_REPL 1
|
||||
G2_CURR_VAL_ON_REPL 1
|
||||
|
||||
Start removing objects
|
||||
Finish. Total objects removed
|
||||
POINT-2 FOUND message about replicated segment.
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=4.0')
|
||||
@pytest.mark.platform('Windows')
|
||||
def test_1(act_1: Action):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
@ -0,0 +1,445 @@
|
||||
#coding:utf-8
|
||||
#
|
||||
# id: tests.functional.replication.invalid_msg_if_target_db_has_no_replica_flag
|
||||
# title: Invalid message in replication.log (and possibly crash in the case of synchronous replication) when the target DB has no its "replica" flag set
|
||||
# decription:
|
||||
# See: https://github.com/FirebirdSQL/firebird/issues/6989
|
||||
#
|
||||
# Test changes replica DB attribute (removes 'replica' flag). Then we do some trivial DDL on master (create and drop table).
|
||||
# Log of replication must soon contain *two* phrases:
|
||||
# 1. VERBOSE: Added 1 segment(s) to the processing queue
|
||||
# 2. ERROR: Database is not in the replica mode
|
||||
# If any of these phrases absent then we have bug.
|
||||
#
|
||||
# Otherwise we continue and return attribute 'replica' to the target DB. After this replication log must contain phrase:
|
||||
# VERBOSE: Segment <N> (<M> bytes) is replicated in <K> ms, deleting the file.
|
||||
# We can assume that replication finished OK only when such line is found see ('POINT-1').
|
||||
#
|
||||
# Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch).
|
||||
# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2').
|
||||
#
|
||||
# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff').
|
||||
# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it,
|
||||
# thus metadata difference must not be issued.
|
||||
#
|
||||
# ####################
|
||||
# ### CRUCIAL NOTE ###
|
||||
# ####################
|
||||
# Currently there is bug in FB 4.x and 5.x which can be seen on SECOND run of this test: message with text
|
||||
# "ERROR: Record format with length 68 is not found for table TEST" will appear in it after inserting 1st record in master.
|
||||
# The reason of that is "dirty" pages that remain in RDB$RELATION_FIELDS both on master and replica after dropping table.
|
||||
# Following query show different data that appear in replica DB on 1st and 2nd run (just after table was created on master):
|
||||
# =======
|
||||
# set blobdisplay 6;
|
||||
# select rdb$descriptor as fmt_descr
|
||||
# from rdb$formats natural join rdb$relations where rdb$relation_name = 'TEST';
|
||||
# =======
|
||||
# This bug was explained by dimitr, see letters 25.06.2021 11:49 and 25.06.2021 16:56.
|
||||
# It will be fixed later.
|
||||
#
|
||||
# The only workaround to solve this problem is to make SWEEP after all DB objects have been dropped.
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
# !NB! BOTH master and replica must be cleaned up by sweep!
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
#
|
||||
# ################
|
||||
# ### N O T E ###
|
||||
# ################
|
||||
# Test assumes that master and replica DB have been created beforehand.
|
||||
# Also, it assumes that %FB_HOME%
|
||||
# eplication.conf has been prepared with apropriate parameters for replication.
|
||||
# Particularly, name of directories and databases must have info about checked FB major version and ServerMode.
|
||||
# * verbose = true // in order to find out line with message that required segment was replicated
|
||||
# * section for master database with specified parameters:
|
||||
# journal_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.journal"
|
||||
# journal_archive_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.archive"
|
||||
# journal_archive_command = "copy $(pathname) $(archivepathname)"
|
||||
# journal_archive_timeout = 10
|
||||
# * section for replica database with specified parameter:
|
||||
# journal_source_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.archive"
|
||||
#
|
||||
# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these:
|
||||
# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic)
|
||||
# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic)
|
||||
# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc)
|
||||
#
|
||||
# These two databases must NOT be dropped in any of tests related to replication!
|
||||
# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode
|
||||
# and make cleanup after it, i.e. when all tests will be completed.
|
||||
#
|
||||
# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'.
|
||||
#
|
||||
# Temporary comment. For debug purpoces:
|
||||
# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc);
|
||||
# 2) copy file %fbt-repo% ests
|
||||
# unctional abloidatches\\setup-fb-for-replication.bat.txt
|
||||
# to some place and rename it "*.bat";
|
||||
# 3) open this .bat in editor and asjust value of 'fbt_repo' variable;
|
||||
# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE]
|
||||
# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check:
|
||||
# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc
|
||||
# 5) batch 'setup-fb-for-replication.bat' will:
|
||||
# * stop selected FB instance
|
||||
# * create test databases (in !fbt_repo! mp\\);
|
||||
# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\);
|
||||
# * replace %fb_home%
|
||||
# eplication.conf with apropriate
|
||||
# * start selected FB instance
|
||||
# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat):
|
||||
# %fpt_repo%
|
||||
# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc
|
||||
#
|
||||
# Checked on: WI-T5.0.0.257; WI-V4.0.1.2631 (both SS/CS).
|
||||
#
|
||||
#
|
||||
# tracker_id:
|
||||
# min_versions: ['4.0.1']
|
||||
# versions: 4.0.1
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 4.0.1
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR'), ('[ \t]+', ' ')]
|
||||
|
||||
init_script_1 = """"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
# import os
|
||||
# import subprocess
|
||||
# import re
|
||||
# import difflib
|
||||
# import shutil
|
||||
# import time
|
||||
# from datetime import datetime
|
||||
# from datetime import timedelta
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
# # NB: with default values of 'apply_idle_timeout' and 'apply_error_timeout' (10 and 60 s)
|
||||
# # total time of this test is about 130...132s.
|
||||
# #####################################
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65
|
||||
# MAX_TIME_FOR_WAIT_ERR_MSG_IN_LOG = 65
|
||||
# #####################################
|
||||
#
|
||||
# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password)
|
||||
# FB_HOME = svc.get_home_directory()
|
||||
# svc.close()
|
||||
#
|
||||
# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float
|
||||
# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50'
|
||||
#
|
||||
# cur = db_conn.cursor()
|
||||
# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')")
|
||||
# server_mode = 'XX'
|
||||
# for r in cur:
|
||||
# if r[0] == 'Super':
|
||||
# server_mode = 'SS'
|
||||
# elif r[0] == 'SuperClassic':
|
||||
# server_mode = 'SC'
|
||||
# elif r[0] == 'Classic':
|
||||
# server_mode = 'CS'
|
||||
# cur.close()
|
||||
#
|
||||
# # 'fbt-main.fb50.ss.fdb' etc:
|
||||
# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' )
|
||||
# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.')
|
||||
#
|
||||
# # Folders for journalling and archieving segments.
|
||||
# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' )
|
||||
# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' )
|
||||
#
|
||||
# runProgram('gfix', ['-replica', 'none', 'localhost:' + db_repl])
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
# if type(f_names_list[i]) == file:
|
||||
# del_name = f_names_list[i].name
|
||||
# elif type(f_names_list[i]) == str:
|
||||
# del_name = f_names_list[i]
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ):
|
||||
# global re
|
||||
# global difflib
|
||||
# global time
|
||||
#
|
||||
# # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-
|
||||
# def check_pattern_in_log( log_lines, pattern, prefix_msg = '' ):
|
||||
# found_required_message = False
|
||||
# for d in log_lines:
|
||||
# if pattern.search(d):
|
||||
# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' )
|
||||
# found_required_message = True
|
||||
# break
|
||||
# return found_required_message
|
||||
# # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-
|
||||
#
|
||||
# replold_lines = []
|
||||
# with open( os.path.join(fb_home,'replication.log'), 'r') as f:
|
||||
# replold_lines = f.readlines()
|
||||
#
|
||||
# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1)
|
||||
# cur = con.cursor()
|
||||
# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database")
|
||||
# for r in cur:
|
||||
# last_generated_repl_segment = r[0]
|
||||
# cur.close()
|
||||
# con.close()
|
||||
#
|
||||
# # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file
|
||||
# segment_replicated_pattern=re.compile( 'verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE)
|
||||
#
|
||||
# # 08.09.2021: replication content can remain unchanged if there was no user-defined object in DB that must be dropped!
|
||||
# # Because of this, it is crucial to check OLD content of replication log before loop.
|
||||
# # Also, segment_replicated_pattern must NOT start from '\\+' because it can occur only for diff_data (within loop):
|
||||
# #
|
||||
# found_required_message = check_pattern_in_log( replold_lines, segment_replicated_pattern, prefix_msg )
|
||||
#
|
||||
# if not found_required_message:
|
||||
#
|
||||
# for i in range(0,max_allowed_time_for_wait):
|
||||
# time.sleep(1)
|
||||
#
|
||||
# # Get content of fb_home replication.log _after_ isql finish:
|
||||
# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r')
|
||||
# diff_data = difflib.unified_diff(
|
||||
# replold_lines,
|
||||
# f_repllog_new.readlines()
|
||||
# )
|
||||
# f_repllog_new.close()
|
||||
#
|
||||
# found_required_message = check_pattern_in_log( diff_data, segment_replicated_pattern, prefix_msg )
|
||||
# if found_required_message:
|
||||
# break
|
||||
#
|
||||
# if not found_required_message:
|
||||
# print('UNEXPECTED RESULT: no message about replicated segment No. %d for %d seconds.' % (int(last_generated_repl_segment), max_allowed_time_for_wait) )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def wait_for_required_msg_in_log( fb_home, required_pattern, db_main, max_allowed_time_for_wait, prefix_msg = '' ):
|
||||
#
|
||||
# global re
|
||||
# global difflib
|
||||
# global time
|
||||
# global datetime
|
||||
#
|
||||
#
|
||||
# # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-
|
||||
# def check_pattern_in_log( log_lines, pattern, min_timestamp, prefix_msg = '' ):
|
||||
# found_required_message = False
|
||||
# for i,r in enumerate(log_lines):
|
||||
# if pattern.search(r):
|
||||
# if i>=2 and log_lines[i-2]:
|
||||
# # a = r.replace('(',' ').split()
|
||||
# a = log_lines[i-2].split()
|
||||
# if len(a)>=4:
|
||||
# # s='replica_host_name (slave) Sun May 30 17:46:43 2021'
|
||||
# # s.split()[-5:] ==> ['Sun', 'May', '30', '17:46:43', '2021']
|
||||
# # ' '.join( ...) ==> 'Sun May 30 17:46:43 2021'
|
||||
# dts = ' '.join( log_lines[i-2].split()[-5:] )
|
||||
# msg_timestamp = datetime.strptime( dts, '%a %b %d %H:%M:%S %Y')
|
||||
# if msg_timestamp >= min_timestamp:
|
||||
# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND required message after given timestamp.') #, 'msg_timestamp=%s' % msg_timestamp, '; min_timestamp=%s' % min_timestamp )
|
||||
# found_required_message = True
|
||||
# break
|
||||
# return found_required_message
|
||||
#
|
||||
# # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-
|
||||
#
|
||||
# replold_lines = []
|
||||
# with open( os.path.join(fb_home,'replication.log'), 'r') as f:
|
||||
# replold_lines = f.readlines()
|
||||
#
|
||||
#
|
||||
# ##################################
|
||||
# ### A.C.H.T.U.N.G ###
|
||||
# ### do NOT use datetime.now() ###
|
||||
# ### because of missed accuracy ###
|
||||
# ### of timestamps in repl.log ###
|
||||
# ### (it is HH:MM:SS only) ###
|
||||
# ##################################
|
||||
# current_date_with_hhmmss = datetime.today().replace(microsecond=0)
|
||||
#
|
||||
# runProgram('isql', ['localhost:' + db_main], 'create table test(id int primary key); drop table test;')
|
||||
#
|
||||
# for i in range(0,max_allowed_time_for_wait):
|
||||
# time.sleep(1)
|
||||
#
|
||||
# # Get content of fb_home replication.log _after_ isql finish:
|
||||
# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r')
|
||||
# diff_data = difflib.unified_diff(
|
||||
# replold_lines,
|
||||
# f_repllog_new.readlines()
|
||||
# )
|
||||
# f_repllog_new.close()
|
||||
#
|
||||
# found_required_message = check_pattern_in_log( list(diff_data), required_pattern, current_date_with_hhmmss, prefix_msg )
|
||||
# if found_required_message:
|
||||
# break
|
||||
#
|
||||
# if not found_required_message:
|
||||
# print('UNEXPECTED RESULT: required message NOT found after %s for %d seconds.' % (current_date_with_hhmmss, max_allowed_time_for_wait))
|
||||
#
|
||||
# return found_required_message
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# #######################################################################
|
||||
# ### Make trivial changes in the master (CREATE / DROP table) and # ###
|
||||
# ### check that "ERROR: Database is not in the replica mode" appears ###
|
||||
# ### in replication log aftere this, for MAX_SECONDS_WAIT4_MSG... ###
|
||||
# #######################################################################
|
||||
# not_in_replica_mode_pattern=re.compile( 'ERROR: Database is not in the replica mode', re.IGNORECASE)
|
||||
# found_expected_err_msg = wait_for_required_msg_in_log( FB_HOME, not_in_replica_mode_pattern, db_main, MAX_TIME_FOR_WAIT_ERR_MSG_IN_LOG, 'POINT-A' )
|
||||
#
|
||||
# '''
|
||||
# # temp, 4debug only: try this if framework will not able to drop database (Classic only):
|
||||
# fdb_tmp=os.path.join(context['temp_directory'],'tmp_gh_6989.tmp.fdb')
|
||||
# runProgram('gfix', ['-shut', 'full', '-force', '0', 'localhost:' + db_repl])
|
||||
# shutil.move(db_repl, fdb_tmp)
|
||||
# runProgram('gfix', ['-online', 'localhost:' + fdb_tmp])
|
||||
# runProgram('gfix', ['-replica', 'read_only', 'localhost:' + fdb_tmp])
|
||||
# shutil.move(fdb_tmp, db_repl)
|
||||
# '''
|
||||
#
|
||||
# runProgram('gfix', ['-replica', 'read_only', 'localhost:' + db_repl])
|
||||
#
|
||||
# if found_expected_err_msg:
|
||||
# ##############################################################################
|
||||
# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ###
|
||||
# ##############################################################################
|
||||
# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' )
|
||||
#
|
||||
#
|
||||
# # return initial state of master DB:
|
||||
# # remove all DB objects (tables, views, ...):
|
||||
# # --------------------------------------------
|
||||
# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql')
|
||||
#
|
||||
# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects-gh_6989.log'), 'w')
|
||||
# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err )
|
||||
# flush_and_close(f_clean_log)
|
||||
# flush_and_close(f_clean_err)
|
||||
#
|
||||
# with open(f_clean_err.name,'r') as f:
|
||||
# for line in f:
|
||||
# print('UNEXPECTED STDERR in cleanup SQL: ' + line)
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0
|
||||
#
|
||||
# with open(f_clean_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# # show number of dropped objects
|
||||
# print(line)
|
||||
#
|
||||
# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> previous SQL script finished w/o errors
|
||||
#
|
||||
# ##############################################################################
|
||||
# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ###
|
||||
# ##############################################################################
|
||||
# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' )
|
||||
#
|
||||
# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_gh_6989.sql'), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT )
|
||||
# flush_and_close( f_main_meta_sql )
|
||||
#
|
||||
# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_gh_6989.sql'), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT )
|
||||
# flush_and_close( f_repl_meta_sql )
|
||||
#
|
||||
# db_main_meta=open(f_main_meta_sql.name, 'r')
|
||||
# db_repl_meta=open(f_repl_meta_sql.name, 'r')
|
||||
#
|
||||
# diffmeta = ''.join(difflib.unified_diff(
|
||||
# db_main_meta.readlines(),
|
||||
# db_repl_meta.readlines()
|
||||
# ))
|
||||
# db_main_meta.close()
|
||||
# db_repl_meta.close()
|
||||
#
|
||||
# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_gh_6989.txt'), 'w', buffering = 0)
|
||||
# f_meta_diff.write(diffmeta)
|
||||
# flush_and_close( f_meta_diff )
|
||||
#
|
||||
# # Following must issue only TWO rows:
|
||||
# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */
|
||||
# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */
|
||||
# # Only thes lines will be suppressed further (see subst. section):
|
||||
# with open(f_meta_diff.name, 'r') as f:
|
||||
# for line in f:
|
||||
# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'):
|
||||
# print('UNEXPECTED METADATA DIFF.: ' + line)
|
||||
#
|
||||
# ######################
|
||||
# ### A C H T U N G ###
|
||||
# ######################
|
||||
# # MANDATORY, OTHERWISE REPLICATION GETS STUCK ON SECOND RUN OF THIS TEST
|
||||
# # WITH 'ERROR: Record format with length 68 is not found for table TEST':
|
||||
# runProgram('gfix', ['-sweep', 'localhost:' + db_repl])
|
||||
# runProgram('gfix', ['-sweep', 'localhost:' + db_main])
|
||||
# #######################
|
||||
#
|
||||
# # cleanup:
|
||||
# ##########
|
||||
# cleanup( (f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) )
|
||||
#
|
||||
#---
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
POINT-A FOUND required message after given timestamp.
|
||||
POINT-1 FOUND message about replicated segment.
|
||||
Start removing objects
|
||||
Finish. Total objects removed: 0
|
||||
POINT-2 FOUND message about replicated segment.
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=4.0.1')
|
||||
@pytest.mark.platform('Windows')
|
||||
def test_1(act_1: Action):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
580
tests/functional/replication/test_oltp_emul_ddl.py
Normal file
580
tests/functional/replication/test_oltp_emul_ddl.py
Normal file
@ -0,0 +1,580 @@
|
||||
#coding:utf-8
|
||||
#
|
||||
# id: tests.functional.replication.oltp_emul_ddl
|
||||
# title: Applying full DDL from OLTP-EMUL test on master with further check replica
|
||||
# decription:
|
||||
#
|
||||
# ################
|
||||
# ### N O T E ###
|
||||
# ################
|
||||
# Test assumes that master and replica DB have been created beforehand.
|
||||
# Also, it assumes that %FB_HOME%
|
||||
# eplication.conf has been prepared with apropriate parameters for replication.
|
||||
# Particularly, name of directories and databases must have info about checked FB major version and ServerMode.
|
||||
# * verbose = true // in order to find out line with message that required segment was replicated
|
||||
# * section for master database with specified parameters:
|
||||
# journal_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.journal"
|
||||
# journal_archive_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.archive"
|
||||
# journal_archive_command = "copy $(pathname) $(archivepathname)"
|
||||
# journal_archive_timeout = 10
|
||||
# * section for replica database with specified parameter:
|
||||
# journal_source_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.archive"
|
||||
#
|
||||
# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these:
|
||||
# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic)
|
||||
# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic)
|
||||
# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc)
|
||||
#
|
||||
# These two databases must NOT be dropped in any of tests related to replication!
|
||||
# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode
|
||||
# and make cleanup after it, i.e. when all tests will be completed.
|
||||
#
|
||||
# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'.
|
||||
#
|
||||
# Temporary comment. For debug purpoces:
|
||||
# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc);
|
||||
# 2) copy file %fbt-repo% ests
|
||||
# unctional abloidatches\\setup-fb-for-replication.bat.txt
|
||||
# to some place and rename it "*.bat";
|
||||
# 3) open this .bat in editor and asjust value of 'fbt_repo' variable;
|
||||
# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE]
|
||||
# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check:
|
||||
# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc
|
||||
# 5) batch 'setup-fb-for-replication.bat' will:
|
||||
# * stop selected FB instance
|
||||
# * create test databases (in !fbt_repo! mp\\);
|
||||
# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\);
|
||||
# * replace %fb_home%
|
||||
# eplication.conf with apropriate
|
||||
# * start selected FB instance
|
||||
# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat):
|
||||
# %fpt_repo%
|
||||
# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc
|
||||
#
|
||||
#
|
||||
# tracker_id:
|
||||
# min_versions: ['4.0']
|
||||
# versions: 4.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 4.0
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', '')]
|
||||
|
||||
init_script_1 = """"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
# import os
|
||||
# import subprocess
|
||||
# import re
|
||||
# import zipfile
|
||||
# import difflib
|
||||
# import shutil
|
||||
# import time
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
# #####################################
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65
|
||||
# #####################################
|
||||
#
|
||||
# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password)
|
||||
# FB_HOME = svc.get_home_directory()
|
||||
# svc.close()
|
||||
#
|
||||
# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float
|
||||
# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50'
|
||||
#
|
||||
# cur = db_conn.cursor()
|
||||
# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')")
|
||||
# server_mode = 'XX'
|
||||
# for r in cur:
|
||||
# if r[0] == 'Super':
|
||||
# server_mode = 'SS'
|
||||
# elif r[0] == 'SuperClassic':
|
||||
# server_mode = 'SC'
|
||||
# elif r[0] == 'Classic':
|
||||
# server_mode = 'CS'
|
||||
# cur.close()
|
||||
#
|
||||
# # 'fbt-main.fb50.ss.fdb' etc:
|
||||
# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' )
|
||||
# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.')
|
||||
#
|
||||
# # Folders for journalling and archieving segments.
|
||||
# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' )
|
||||
# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' )
|
||||
#
|
||||
# fb_port = 0
|
||||
# cur = db_conn.cursor()
|
||||
# cur.execute("select rdb$config_value from rdb$config where rdb$config_name = 'RemoteServicePort'")
|
||||
# for r in cur:
|
||||
# fb_port = int(r[0])
|
||||
# cur.close()
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
# runProgram('gfix', ['-w', 'async', 'localhost:' + db_main])
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
# if type(f_names_list[i]) == file:
|
||||
# del_name = f_names_list[i].name
|
||||
# elif type(f_names_list[i]) == str:
|
||||
# del_name = f_names_list[i]
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ):
|
||||
#
|
||||
# global re
|
||||
# global difflib
|
||||
# global time
|
||||
#
|
||||
# replold_lines = []
|
||||
# with open( os.path.join(fb_home,'replication.log'), 'r') as f:
|
||||
# replold_lines = f.readlines()
|
||||
#
|
||||
# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1)
|
||||
# cur = con.cursor()
|
||||
# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database")
|
||||
# for r in cur:
|
||||
# last_generated_repl_segment = r[0]
|
||||
# cur.close()
|
||||
# con.close()
|
||||
#
|
||||
# #print('last_generated_repl_segment:', last_generated_repl_segment)
|
||||
#
|
||||
# # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file
|
||||
# p_successfully_replicated = re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE)
|
||||
#
|
||||
# # VERBOSE: Segment 16 replication failure at offset 33628
|
||||
# p_replication_failure = re.compile('segment\\s+\\d+\\s+replication\\s+failure', re.IGNORECASE)
|
||||
#
|
||||
# # ERROR: Record format with length 56 is not found for table PERF_ESTIMATED
|
||||
# p_rec_format_not_found = re.compile('record\\s+format\\s+with\\s+length\\s+\\d+\\s+is\\s+not\\s+found', re.IGNORECASE)
|
||||
#
|
||||
# found_required_message = False
|
||||
#
|
||||
# found_replfail_message = False
|
||||
# found_recformat_message = False
|
||||
# found_common_error_msg = False
|
||||
#
|
||||
# for i in range(0,max_allowed_time_for_wait):
|
||||
# time.sleep(1)
|
||||
#
|
||||
# # Get content of fb_home replication.log _after_ isql finish:
|
||||
# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r')
|
||||
# diff_data = difflib.unified_diff(
|
||||
# replold_lines,
|
||||
# f_repllog_new.readlines()
|
||||
# )
|
||||
# f_repllog_new.close()
|
||||
#
|
||||
# for k,d in enumerate(diff_data):
|
||||
#
|
||||
# if p_replication_failure.search(d):
|
||||
# print( (prefix_msg + ' ' if prefix_msg else '') + '@@@ SEGMENT REPLICATION FAILURE @@@ ' + d )
|
||||
# found_replfail_message = True
|
||||
# break
|
||||
#
|
||||
# if p_rec_format_not_found.search(d):
|
||||
# print( (prefix_msg + ' ' if prefix_msg else '') + '@@@ RECORD FORMAT NOT FOUND @@@ ' + d )
|
||||
# found_recformat_message = True
|
||||
# break
|
||||
#
|
||||
# if 'ERROR:' in d:
|
||||
# print( (prefix_msg + ' ' if prefix_msg else '') + '@@@ REPLICATION ERROR @@@ ' + d )
|
||||
# found_common_error_msg = True
|
||||
# break
|
||||
#
|
||||
# if p_successfully_replicated.search(d):
|
||||
# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' )
|
||||
# found_required_message = True
|
||||
# break
|
||||
#
|
||||
# if found_required_message or found_replfail_message or found_recformat_message or found_common_error_msg:
|
||||
# break
|
||||
#
|
||||
# if not found_required_message:
|
||||
# print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait)
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def generate_sync_settings_sql(db_main, fb_port):
|
||||
#
|
||||
# def generate_inject_setting_sql(working_mode, mcode, new_value, allow_insert_if_eof = 0):
|
||||
# sql_inject_setting = ''
|
||||
# if allow_insert_if_eof == 0:
|
||||
# sql_inject_setting = '''
|
||||
# update settings set svalue = %(new_value)s
|
||||
# where working_mode = upper('%(working_mode)s') and mcode = upper('%(mcode)s');
|
||||
# if (row_count = 0) then
|
||||
# exception ex_record_not_found using('SETTINGS', q'{working_mode = upper('%(working_mode)s') and mcode = upper('%(mcode)s')}');
|
||||
# ''' % locals()
|
||||
# else:
|
||||
# sql_inject_setting = '''
|
||||
# update or insert into settings(working_mode, mcode, svalue)
|
||||
# values( upper('%(working_mode)s'), upper('%(mcode)s'), %(new_value)s )
|
||||
# matching (working_mode, mcode);
|
||||
# ''' % locals()
|
||||
#
|
||||
# return sql_inject_setting
|
||||
#
|
||||
#
|
||||
# sql_adjust_settings_table = '''
|
||||
# set list on;
|
||||
# select 'Adjust settings: start at ' || cast('now' as timestamp) as msg from rdb$database;
|
||||
# set term ^;
|
||||
# execute block as
|
||||
# begin
|
||||
# '''
|
||||
#
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'working_mode', "upper('small_03')" ) ) )
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'enable_mon_query', "'0'" ) ) )
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'unit_selection_method', "'random'" ) ) )
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'build_with_split_heavy_tabs', "'1'" ) ) )
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'build_with_qd_compound_ordr', "lower('most_selective_first')" ) ) )
|
||||
#
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'build_with_separ_qdistr_idx', "'0'" ) ) )
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'used_in_replication', "'1'" ) ) )
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'separate_workers', "'1'" ) ) )
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'workers_count', "'100'" ) ) )
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'update_conflict_percent', "'0'" ) ) )
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'connect_str', "'connect ''localhost:%(db_main)s'' user ''SYSDBA'' password ''masterkey'';'" % locals(), 1 ) ) )
|
||||
#
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'mon_unit_list', "'//'" ) ) )
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'halt_test_on_errors', "'/CK/'" ) ) )
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'qmism_verify_bitset', "'1'" ) ) )
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'recalc_idx_min_interval', "'9999999'" ) ) )
|
||||
#
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'warm_time', "'0'", 1 ) ) )
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'test_intervals', "'10'", 1 ) ) )
|
||||
#
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'tmp_worker_role_name', "upper('tmp$oemul$worker')", 1 ) ) )
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'tmp_worker_user_prefix', "upper('tmp$oemul$user_')", 1 ) ) )
|
||||
#
|
||||
#
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'use_es', "'2'", 1 ) ) )
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'host', "'localhost'", 1 ) ) )
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'port', "'%(fb_port)s'" % locals(), 1 ) ) )
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'usr', "'SYSDBA'", 1 ) ) )
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'pwd', "'masterkey'", 1 ) ) )
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'tmp_worker_user_pswd', "'0Ltp-Emu1'", 1 ) ) )
|
||||
#
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'conn_pool_support', "'1'", 1 ) ) )
|
||||
# sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'resetting_support', "'1'", 1 ) ) )
|
||||
#
|
||||
# sql_adjust_settings_table += '''
|
||||
# end ^
|
||||
# set term ;^
|
||||
# commit;
|
||||
# select 'Adjust settings: finish at ' || cast('now' as timestamp) as msg from rdb$database;
|
||||
# set list off;
|
||||
# '''
|
||||
#
|
||||
# return sql_adjust_settings_table
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# # Extract .sql files with OLTP-EMUL DDL for applying
|
||||
# # ZipFile.extractall(path=None, members=None, pwd=None)
|
||||
# zf = zipfile.ZipFile( os.path.join(context['files_location'],'oltp-emul-ddl.zip') )
|
||||
# src_files = zf.namelist()
|
||||
# zf.extractall(path = context['temp_directory'])
|
||||
# zf.close()
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# oltp_emul_initial_scripts = [
|
||||
# 'oltp-emul-01_initial_DDL'
|
||||
# ,'oltp-emul-02_business_units'
|
||||
# ,'oltp-emul-03_common_units'
|
||||
# ,'oltp-emul-04_misc_debug_code'
|
||||
# ,'oltp-emul-05_main_tabs_filling'
|
||||
# ]
|
||||
#
|
||||
#
|
||||
# #src_dir = context['files_location']
|
||||
# src_dir = context['temp_directory']
|
||||
#
|
||||
# sql_apply = '\\n'
|
||||
# for f in oltp_emul_initial_scripts:
|
||||
# sql_apply += ' in ' + os.path.join(src_dir, f+'.sql;\\n')
|
||||
#
|
||||
# sql_ddl = ''' %(sql_apply)s
|
||||
# ''' % locals()
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# f_run_initial_ddl = open( os.path.join(context['temp_directory'],'tmp-oltp-emul-ddl.sql'), 'w')
|
||||
# f_run_initial_ddl.write(sql_ddl)
|
||||
#
|
||||
# # Add SQL code for adjust SETTINGS table with values which are commonly used in OLTP-EMUL config:
|
||||
# f_run_initial_ddl.write( generate_sync_settings_sql(db_main, fb_port) )
|
||||
# flush_and_close( f_run_initial_ddl )
|
||||
#
|
||||
# f_run_initial_log = open( ''.join( (os.path.splitext(f_run_initial_ddl.name)[0], '.log' ) ), 'w')
|
||||
# f_run_initial_err = open( ''.join( (os.path.splitext(f_run_initial_ddl.name)[0], '.err' ) ), 'w')
|
||||
# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_run_initial_ddl.name ], stdout = f_run_initial_log, stderr = f_run_initial_err)
|
||||
#
|
||||
# flush_and_close( f_run_initial_log )
|
||||
# flush_and_close( f_run_initial_err )
|
||||
#
|
||||
# with open(f_run_initial_err.name,'r') as f:
|
||||
# for line in f:
|
||||
# print('UNEXPECTED STDERR in initial SQL: ' + line)
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0
|
||||
#
|
||||
# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors
|
||||
#
|
||||
# post_handling_out = os.path.join( context['temp_directory'],'oltp_split_heavy_tabs.tmp' )
|
||||
# post_adjust_sep_wrk_out = os.path.join( context['temp_directory'], 'oltp_adjust_sep_wrk.tmp' )
|
||||
# post_adjust_replication = os.path.join( context['temp_directory'], 'post_adjust_repl_pk.tmp' )
|
||||
# post_adjust_ext_pool = os.path.join( context['temp_directory'], 'post_adjust_ext_pool.tmp' )
|
||||
# post_adjust_eds_perf = os.path.join( context['temp_directory'], 'post_adjust_eds_perf.tmp' )
|
||||
#
|
||||
# oltp_emul_post_handling_scripts = [
|
||||
# 'oltp-emul-06_split_heavy_tabs'
|
||||
# ,'oltp-emul-07_adjust_perf_tabs'
|
||||
# ,'oltp-emul-08_adjust_repl_ddl'
|
||||
# ,'oltp-emul-09_adjust_eds_calls'
|
||||
# ,'oltp-emul-10_adjust_eds_perf'
|
||||
# ]
|
||||
#
|
||||
#
|
||||
# sql_post_handling = '\\n'
|
||||
# for f in oltp_emul_post_handling_scripts:
|
||||
# run_post_handling_sql = os.path.join( src_dir, f+'.sql' )
|
||||
# tmp_post_handling_sql = os.path.join( context['temp_directory'], f + '.tmp' )
|
||||
# cleanup( (tmp_post_handling_sql,) )
|
||||
# sql_post_handling += '''
|
||||
# out %(tmp_post_handling_sql)s;
|
||||
# in %(run_post_handling_sql)s;
|
||||
# out;
|
||||
# in %(tmp_post_handling_sql)s;
|
||||
# -----------------------------
|
||||
# ''' % locals()
|
||||
#
|
||||
# if 'adjust_eds_calls' in f:
|
||||
# # We have to make RECONNECT here, otherwise get:
|
||||
# # Statement failed, SQLSTATE = 2F000
|
||||
# # Error while parsing procedure SP_PERF_EDS_LOGGING's BLR
|
||||
# # -attempted update of read-only column <unknown>
|
||||
# # After line 49 in file ... oltp-emul-10_adjust_eds_perf.sql
|
||||
#
|
||||
# sql_post_handling += " commit; connect 'localhost:%(db_main)s' user 'SYSDBA' password 'masterkey';" % locals()
|
||||
#
|
||||
# oltp_emul_final_actions_scripts = [
|
||||
# 'oltp-emul-11_ref_data_filling'
|
||||
# ,'oltp-emul-12_activate_db_triggers'
|
||||
# ]
|
||||
# for f in oltp_emul_final_actions_scripts:
|
||||
# sql_post_handling += ' in ' + os.path.join(src_dir, f+'.sql;\\n')
|
||||
#
|
||||
# f_post_handling_sql = open( os.path.join(context['temp_directory'],'tmp-oltp-emul-post.sql'), 'w')
|
||||
# f_post_handling_sql.write(sql_post_handling)
|
||||
#
|
||||
# sql4debug_only = '''
|
||||
# set echo off;
|
||||
# set list on;
|
||||
# commit;
|
||||
# set stat off;
|
||||
# select
|
||||
# rdb$get_context('SYSTEM','DB_NAME') as db_name
|
||||
# ,rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') as last_generated_repl_segment
|
||||
# from rdb$database;
|
||||
# quit;
|
||||
# '''
|
||||
# f_post_handling_sql.write( sql4debug_only )
|
||||
#
|
||||
# flush_and_close( f_post_handling_sql )
|
||||
#
|
||||
# f_post_handling_log = open( ''.join( (os.path.splitext(f_post_handling_sql.name)[0], '.log' ) ), 'w')
|
||||
# f_post_handling_err = open( ''.join( (os.path.splitext(f_post_handling_sql.name)[0], '.err' ) ), 'w')
|
||||
# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_post_handling_sql.name ], stdout = f_post_handling_log, stderr = f_post_handling_err)
|
||||
# flush_and_close( f_post_handling_log )
|
||||
# flush_and_close( f_post_handling_err )
|
||||
#
|
||||
# with open(f_post_handling_err.name,'r') as f:
|
||||
# for line in f:
|
||||
# print('UNEXPECTED STDERR in post-handling SQL: ' + line)
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0
|
||||
#
|
||||
# cleanup( (f_post_handling_sql, f_post_handling_log, f_post_handling_err ) )
|
||||
# for f in oltp_emul_post_handling_scripts:
|
||||
# tmp_post_handling_sql = os.path.join( context['temp_directory'], f + '.tmp' )
|
||||
# cleanup( (tmp_post_handling_sql,) )
|
||||
#
|
||||
# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors
|
||||
#
|
||||
# # Test connect to master DB, call initial business operation: create client order
|
||||
# ###########################
|
||||
# custom_tpb = fdb.TPB()
|
||||
# custom_tpb.lock_resolution = fdb.isc_tpb_nowait
|
||||
# custom_tpb.isolation_level = fdb.isc_tpb_concurrency
|
||||
#
|
||||
# con1 = fdb.connect( dsn = 'localhost:' + db_main, isolation_level = custom_tpb)
|
||||
# cur1 = con1.cursor()
|
||||
# cur1.execute( 'select ware_id from sp_client_order order by ware_id' )
|
||||
#
|
||||
# client_order_wares_main = []
|
||||
# for r in cur1:
|
||||
# client_order_wares_main.append(r[0])
|
||||
# cur1.close()
|
||||
# con1.commit()
|
||||
# con1.close()
|
||||
#
|
||||
# ##############################################################################
|
||||
# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ###
|
||||
# ##############################################################################
|
||||
# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' )
|
||||
#
|
||||
# con2 = fdb.connect( dsn = 'localhost:' + db_repl, no_db_triggers = 1)
|
||||
# cur2 = con2.cursor()
|
||||
# cur2.execute( 'select d.ware_id from doc_data d order by d.ware_id' )
|
||||
# client_order_wares_repl = []
|
||||
# for r in cur2:
|
||||
# client_order_wares_repl.append(r[0])
|
||||
# cur2.close()
|
||||
# con2.commit()
|
||||
# con2.close()
|
||||
#
|
||||
# print('Master and replica data: %s ' % ( 'THE SAME.' if client_order_wares_main and sorted(client_order_wares_main) == sorted(client_order_wares_repl) else '### FAIL: DIFFERS ###' ) )
|
||||
#
|
||||
# #print('client_order_wares_main=',client_order_wares_main)
|
||||
# #print('client_order_wares_repl=',client_order_wares_repl)
|
||||
#
|
||||
# # return initial state of master DB:
|
||||
# # remove all DB objects (tables, views, ...):
|
||||
# # --------------------------------------------
|
||||
# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql')
|
||||
#
|
||||
# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects.log'), 'w')
|
||||
# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err )
|
||||
# flush_and_close(f_clean_log)
|
||||
# flush_and_close(f_clean_err)
|
||||
#
|
||||
# with open(f_clean_err.name,'r') as f:
|
||||
# for line in f:
|
||||
# print('UNEXPECTED STDERR in cleanup SQL: ' + line)
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0
|
||||
#
|
||||
# with open(f_clean_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# # show number of dropped objects
|
||||
# print(line)
|
||||
#
|
||||
# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors
|
||||
#
|
||||
# ##############################################################################
|
||||
# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ###
|
||||
# ##############################################################################
|
||||
# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' )
|
||||
#
|
||||
# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_skip_gen_repl.sql'), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT )
|
||||
# flush_and_close( f_main_meta_sql )
|
||||
#
|
||||
# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_skip_gen_repl.sql'), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT )
|
||||
# flush_and_close( f_repl_meta_sql )
|
||||
#
|
||||
# db_main_meta=open(f_main_meta_sql.name, 'r')
|
||||
# db_repl_meta=open(f_repl_meta_sql.name, 'r')
|
||||
#
|
||||
# diffmeta = ''.join(difflib.unified_diff(
|
||||
# db_main_meta.readlines(),
|
||||
# db_repl_meta.readlines()
|
||||
# ))
|
||||
# db_main_meta.close()
|
||||
# db_repl_meta.close()
|
||||
#
|
||||
# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_skip_gen_repl.txt'), 'w', buffering = 0)
|
||||
# f_meta_diff.write(diffmeta)
|
||||
# flush_and_close( f_meta_diff )
|
||||
#
|
||||
# # Following must issue only TWO rows:
|
||||
# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */
|
||||
# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */
|
||||
# # Only thes lines will be suppressed further (see subst. section):
|
||||
# with open(f_meta_diff.name, 'r') as f:
|
||||
# for line in f:
|
||||
# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'):
|
||||
# print('UNEXPECTED METADATA DIFF.: ' + line)
|
||||
#
|
||||
#
|
||||
# cleanup( (f_main_meta_sql, f_repl_meta_sql, f_meta_diff) )
|
||||
#
|
||||
# ######################
|
||||
# ### A C H T U N G ###
|
||||
# ######################
|
||||
# # MANDATORY, OTHERWISE REPLICATION GETS STUCK ON SECOND RUN OF THIS TEST
|
||||
# # WITH 'ERROR: Record format with length 68 is not found for table TEST':
|
||||
# runProgram('gfix', ['-sweep', 'localhost:' + db_repl])
|
||||
# runProgram('gfix', ['-sweep', 'localhost:' + db_main])
|
||||
# #######################
|
||||
#
|
||||
# # cleanup:
|
||||
# ##########
|
||||
# cleanup( (f_run_initial_ddl, f_run_initial_log, f_run_initial_err, f_clean_log, f_clean_err) )
|
||||
#
|
||||
# # src_files - list of .sql files which were applied; got from zf.namelist().
|
||||
# # We have to remove all of them:
|
||||
# cleanup( [ os.path.join(context['temp_directory'],f) for f in src_files ] )
|
||||
#
|
||||
#
|
||||
#---
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
POINT-1 FOUND message about replicated segment.
|
||||
Master and replica data: THE SAME.
|
||||
Start removing objects
|
||||
Finish. Total objects removed
|
||||
POINT-2 FOUND message about replicated segment.
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=4.0')
|
||||
@pytest.mark.platform('Windows')
|
||||
def test_1(act_1: Action):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
@ -0,0 +1,365 @@
|
||||
#coding:utf-8
|
||||
#
|
||||
# id: tests.functional.replication.permission_error_on_ddl_issued_by_non_sysdba
|
||||
# title: Permission error with replication
|
||||
# decription:
|
||||
# See: https://github.com/FirebirdSQL/firebird/issues/6856
|
||||
#
|
||||
# Test run actions which are specified in the ticket (create user with granting admin access to him, etc).
|
||||
# After this we wait until replica becomes actual to master, and this delay will last no more then threshold
|
||||
# that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds).
|
||||
# During this delay, we check every second for replication log and search there line with number of last generated
|
||||
# segment (which was replicated and deleting finally).
|
||||
# We can assume that replication finished OK only when such line is found see ('POINT-1').
|
||||
#
|
||||
# Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch).
|
||||
# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2').
|
||||
#
|
||||
# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff').
|
||||
# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it,
|
||||
# thus metadata difference must not be issued.
|
||||
#
|
||||
# ################
|
||||
# ### N O T E ###
|
||||
# ################
|
||||
# Test assumes that master and replica DB have been created beforehand.
|
||||
# Also, it assumes that %FB_HOME%
|
||||
# eplication.conf has been prepared with apropriate parameters for replication.
|
||||
# Particularly, name of directories and databases must have info about checked FB major version and ServerMode.
|
||||
# * verbose = true // in order to find out line with message that required segment was replicated
|
||||
# * section for master database with specified parameters:
|
||||
# journal_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.journal"
|
||||
# journal_archive_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.archive"
|
||||
# journal_archive_command = "copy $(pathname) $(archivepathname)"
|
||||
# journal_archive_timeout = 10
|
||||
# * section for replica database with specified parameter:
|
||||
# journal_source_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.archive"
|
||||
#
|
||||
# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these:
|
||||
# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic)
|
||||
# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic)
|
||||
# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc)
|
||||
#
|
||||
# These two databases must NOT be dropped in any of tests related to replication!
|
||||
# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode
|
||||
# and make cleanup after it, i.e. when all tests will be completed.
|
||||
#
|
||||
# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'.
|
||||
#
|
||||
# Temporary comment. For debug purpoces:
|
||||
# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc);
|
||||
# 2) copy file %fbt-repo% ests
|
||||
# unctional abloidatches\\setup-fb-for-replication.bat.txt
|
||||
# to some place and rename it "*.bat";
|
||||
# 3) open this .bat in editor and asjust value of 'fbt_repo' variable;
|
||||
# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE]
|
||||
# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check:
|
||||
# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc
|
||||
# 5) batch 'setup-fb-for-replication.bat' will:
|
||||
# * stop selected FB instance
|
||||
# * create test databases (in !fbt_repo! mp\\);
|
||||
# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\);
|
||||
# * replace %fb_home%
|
||||
# eplication.conf with apropriate
|
||||
# * start selected FB instance
|
||||
# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat):
|
||||
# %fpt_repo%
|
||||
# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc
|
||||
#
|
||||
# Confirmed bug on 4.0.1.2578 and 5.0.0.169: messages "ERROR: unsuccessful metadata update / CREATE OR ALTER VIEW V1 failed"
|
||||
# will be added into replication log and after this replication gets stuck.
|
||||
#
|
||||
# Checked on: 4.0.1.2585 (SS/CS); 5.0.0.192 (SS/CS)
|
||||
#
|
||||
#
|
||||
#
|
||||
# tracker_id:
|
||||
# min_versions: ['4.0.1']
|
||||
# versions: 4.0.1
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 4.0.1
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: \\d+', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR')]
|
||||
|
||||
init_script_1 = """"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
# import os
|
||||
# import subprocess
|
||||
# import re
|
||||
# import difflib
|
||||
# import shutil
|
||||
# import time
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
# #####################################
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65
|
||||
# #####################################
|
||||
#
|
||||
# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password)
|
||||
# FB_HOME = svc.get_home_directory()
|
||||
# svc.close()
|
||||
#
|
||||
# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float
|
||||
# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50'
|
||||
#
|
||||
# cur = db_conn.cursor()
|
||||
# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')")
|
||||
# server_mode = 'XX'
|
||||
# for r in cur:
|
||||
# if r[0] == 'Super':
|
||||
# server_mode = 'SS'
|
||||
# elif r[0] == 'SuperClassic':
|
||||
# server_mode = 'SC'
|
||||
# elif r[0] == 'Classic':
|
||||
# server_mode = 'CS'
|
||||
# cur.close()
|
||||
#
|
||||
# # 'fbt-main.fb50.ss.fdb' etc:
|
||||
# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' )
|
||||
# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.')
|
||||
#
|
||||
# # Folders for journalling and archieving segments.
|
||||
# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' )
|
||||
# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' )
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
# if type(f_names_list[i]) == file:
|
||||
# del_name = f_names_list[i].name
|
||||
# elif type(f_names_list[i]) == str:
|
||||
# del_name = f_names_list[i]
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ):
|
||||
#
|
||||
# global re
|
||||
# global difflib
|
||||
# global time
|
||||
#
|
||||
# # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-
|
||||
# def check_pattern_in_log( log_lines, pattern, prefix_msg = '' ):
|
||||
# found_required_message = False
|
||||
# for d in log_lines:
|
||||
# if pattern.search(d):
|
||||
# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' )
|
||||
# found_required_message = True
|
||||
# break
|
||||
# return found_required_message
|
||||
# # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-
|
||||
#
|
||||
# replold_lines = []
|
||||
# with open( os.path.join(fb_home,'replication.log'), 'r') as f:
|
||||
# replold_lines = f.readlines()
|
||||
#
|
||||
# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1)
|
||||
# cur = con.cursor()
|
||||
# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database")
|
||||
# for r in cur:
|
||||
# last_generated_repl_segment = r[0]
|
||||
# cur.close()
|
||||
# con.close()
|
||||
#
|
||||
# # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file
|
||||
# segment_replicated_pattern=re.compile( 'verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE)
|
||||
#
|
||||
# # 08.09.2021: replication content can remain unchanged if there was no user-defined object in DB that must be dropped!
|
||||
# # Because of this, it is crucial to check OLD content of replication log before loop.
|
||||
# # Also, segment_replicated_pattern must NOT start from '\\+' because it can occur only for diff_data (within loop):
|
||||
# #
|
||||
# found_required_message = check_pattern_in_log( replold_lines, segment_replicated_pattern, prefix_msg )
|
||||
#
|
||||
# if not found_required_message:
|
||||
#
|
||||
# for i in range(0,max_allowed_time_for_wait):
|
||||
# time.sleep(1)
|
||||
#
|
||||
# # Get content of fb_home replication.log _after_ isql finish:
|
||||
# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r')
|
||||
# diff_data = difflib.unified_diff(
|
||||
# replold_lines,
|
||||
# f_repllog_new.readlines()
|
||||
# )
|
||||
# f_repllog_new.close()
|
||||
#
|
||||
# found_required_message = check_pattern_in_log( diff_data, segment_replicated_pattern, prefix_msg )
|
||||
# if found_required_message:
|
||||
# break
|
||||
#
|
||||
# if not found_required_message:
|
||||
# print('UNEXPECTED RESULT: no message about replicated segment No. %d for %d seconds.' % (int(last_generated_repl_segment), max_allowed_time_for_wait) )
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# sql_ddl = ''' create or alter user dba_helper password '123' using plugin Srp grant admin role;
|
||||
# grant rdb$admin to user dba_helper;
|
||||
# set term ^;
|
||||
# execute block as
|
||||
# begin
|
||||
# execute statement 'create or alter view v1 as select 1 x from rdb$database'
|
||||
# with autonomous transaction
|
||||
# as user 'dba_helper' password '123' role 'rdb$admin';
|
||||
# end
|
||||
# ^
|
||||
# set term ;^
|
||||
# commit;
|
||||
# drop user dba_helper using plugin Srp;
|
||||
# commit;
|
||||
# drop view v1;
|
||||
# commit;
|
||||
# ''' % locals()
|
||||
#
|
||||
#
|
||||
# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_gh_6856_init.sql'), 'w')
|
||||
# f_sql_chk.write(sql_ddl)
|
||||
# flush_and_close( f_sql_chk )
|
||||
#
|
||||
# f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w')
|
||||
# f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w')
|
||||
# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err)
|
||||
# flush_and_close( f_sql_log )
|
||||
# flush_and_close( f_sql_err )
|
||||
#
|
||||
# last_generated_repl_segment = 0
|
||||
#
|
||||
# with open(f_sql_err.name,'r') as f:
|
||||
# for line in f:
|
||||
# print('UNEXPECTED STDERR in initial SQL: ' + line)
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0
|
||||
#
|
||||
# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors
|
||||
#
|
||||
# ##############################################################################
|
||||
# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ###
|
||||
# ##############################################################################
|
||||
# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' )
|
||||
#
|
||||
# # return initial state of master DB:
|
||||
# # remove all DB objects (tables, views, ...):
|
||||
# # --------------------------------------------
|
||||
# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql')
|
||||
#
|
||||
# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects-gh_6856.log'), 'w')
|
||||
# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err )
|
||||
# flush_and_close(f_clean_log)
|
||||
# flush_and_close(f_clean_err)
|
||||
#
|
||||
# with open(f_clean_err.name,'r') as f:
|
||||
# for line in f:
|
||||
# print('UNEXPECTED STDERR in cleanup SQL: ' + line)
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0
|
||||
#
|
||||
# with open(f_clean_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# # show number of dropped objects
|
||||
# print(line)
|
||||
#
|
||||
# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> previous SQL script finished w/o errors
|
||||
#
|
||||
# ##############################################################################
|
||||
# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ###
|
||||
# ##############################################################################
|
||||
# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' )
|
||||
#
|
||||
# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_gh_6856.sql'), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT )
|
||||
# flush_and_close( f_main_meta_sql )
|
||||
#
|
||||
# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_gh_6856.sql'), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT )
|
||||
# flush_and_close( f_repl_meta_sql )
|
||||
#
|
||||
# db_main_meta=open(f_main_meta_sql.name, 'r')
|
||||
# db_repl_meta=open(f_repl_meta_sql.name, 'r')
|
||||
#
|
||||
# diffmeta = ''.join(difflib.unified_diff(
|
||||
# db_main_meta.readlines(),
|
||||
# db_repl_meta.readlines()
|
||||
# ))
|
||||
# db_main_meta.close()
|
||||
# db_repl_meta.close()
|
||||
#
|
||||
# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_gh_6856.txt'), 'w', buffering = 0)
|
||||
# f_meta_diff.write(diffmeta)
|
||||
# flush_and_close( f_meta_diff )
|
||||
#
|
||||
# # Following must issue only TWO rows:
|
||||
# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */
|
||||
# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */
|
||||
# # Only thes lines will be suppressed further (see subst. section):
|
||||
# with open(f_meta_diff.name, 'r') as f:
|
||||
# for line in f:
|
||||
# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'):
|
||||
# print('UNEXPECTED METADATA DIFF.: ' + line)
|
||||
#
|
||||
# ######################
|
||||
# ### A C H T U N G ###
|
||||
# ######################
|
||||
# # MANDATORY, OTHERWISE REPLICATION GETS STUCK ON SECOND RUN OF THIS TEST
|
||||
# # WITH 'ERROR: Record format with length 68 is not found for table TEST':
|
||||
# runProgram('gfix', ['-sweep', 'localhost:' + db_repl])
|
||||
# runProgram('gfix', ['-sweep', 'localhost:' + db_main])
|
||||
# #######################
|
||||
#
|
||||
# # cleanup:
|
||||
# ##########
|
||||
# cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) )
|
||||
#
|
||||
#---
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
POINT-1 FOUND message about replicated segment.
|
||||
Start removing objects
|
||||
Finish. Total objects removed
|
||||
POINT-2 FOUND message about replicated segment.
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=4.0.1')
|
||||
@pytest.mark.platform('Windows')
|
||||
def test_1(act_1: Action):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
@ -0,0 +1,506 @@
|
||||
#coding:utf-8
|
||||
#
|
||||
# id: tests.functional.replication.shutdown_during_applying_segments_leads_to_crash
|
||||
# title: Crash or hang while shutting down the replica database if segments are being applied
|
||||
# decription:
|
||||
# See: https://github.com/FirebirdSQL/firebird/issues/6975
|
||||
#
|
||||
# Bug initially was found during heavy test of replication performed by OLTP-EMUL, for FB 4.x
|
||||
# (see letters to dimitr 13.09.2021; reply from dimitr, 18.09.2021 08:42 - all in mailbox: pz at ibase.ru).
|
||||
#
|
||||
# It *can* be reproduced without heavy/concurrent workload, but we have to operate with data that are written
|
||||
# into database 'slowly'. Such data can be wide INDEXED column which has GUID-based values.
|
||||
#
|
||||
# Test creates a table with 'wide' indexed field and adds data to it.
|
||||
# Then we save current timestamp (with accuracy up to SECONDS, i.e. cut off milli- or microseconds) to variable.
|
||||
# After this we start check replicationb.log for appearance of phrase 'Added <N> segment(s) to the processing queue'.
|
||||
# After founding each such phrase we skip two lines above and parse timestamp when this occurred.
|
||||
# If timestamp in log less than saved timestamp of our DML action then we go on to the next such phrase.
|
||||
# Otherwise we can assume that replication BEGINS to apply just generated segment.
|
||||
# See function wait_for_add_queue_in_replica() which does this parsing of replication.log.
|
||||
#
|
||||
# Because we operate with table which have very 'wide' index and, moreover, data in this index are GUID-generated
|
||||
# text strings, we can safely assume that applying of segment will take at least 5...10 seconds (actually this
|
||||
# can take done for 30...35 seconds).
|
||||
#
|
||||
# During this time we change replica mode to full shutdown and (immediately after that) return to online.
|
||||
# NO message like 'error reading / writing from/to connection' must appear at this step.
|
||||
#
|
||||
# After this, we have to wait for replica finish applying segment and when this occur we drop the table.
|
||||
#
|
||||
# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff').
|
||||
# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it,
|
||||
# thus metadata difference must not be issued.
|
||||
#
|
||||
# ################
|
||||
# ### N O T E ###
|
||||
# ################
|
||||
# Test assumes that master and replica DB have been created beforehand.
|
||||
# Also, it assumes that %FB_HOME%
|
||||
# eplication.conf has been prepared with apropriate parameters for replication.
|
||||
# Particularly, name of directories and databases must have info about checked FB major version and ServerMode.
|
||||
# * verbose = true // in order to find out line with message that required segment was replicated
|
||||
# * section for master database with specified parameters:
|
||||
# journal_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.journal"
|
||||
# journal_archive_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.archive"
|
||||
# journal_archive_command = "copy $(pathname) $(archivepathname)"
|
||||
# journal_archive_timeout = 10
|
||||
# * section for replica database with specified parameter:
|
||||
# journal_source_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.archive"
|
||||
#
|
||||
# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these:
|
||||
# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic)
|
||||
# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic)
|
||||
# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc)
|
||||
#
|
||||
# These two databases must NOT be dropped in any of tests related to replication!
|
||||
# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode
|
||||
# and make cleanup after it, i.e. when all tests will be completed.
|
||||
#
|
||||
# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'.
|
||||
#
|
||||
# Temporary comment. For debug purpoces:
|
||||
# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc);
|
||||
# 2) copy file %fbt-repo% ests
|
||||
# unctional abloidatches\\setup-fb-for-replication.bat.txt
|
||||
# to some place and rename it "*.bat";
|
||||
# 3) open this .bat in editor and asjust value of 'fbt_repo' variable;
|
||||
# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE]
|
||||
# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check:
|
||||
# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc
|
||||
# 5) batch 'setup-fb-for-replication.bat' will:
|
||||
# * stop selected FB instance
|
||||
# * create test databases (in !fbt_repo! mp\\);
|
||||
# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\);
|
||||
# * replace %fb_home%
|
||||
# eplication.conf with apropriate
|
||||
# * start selected FB instance
|
||||
# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat):
|
||||
# %fpt_repo%
|
||||
# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc
|
||||
#
|
||||
# Confirmed bug on 5.0.0.215: server crashed when segment was applied to replica and at the same time we issued
|
||||
# 'gfix -shut full -force 0 ...'. Regardless of that command, replica DB remained in NORMAL mode, not in shutdown.
|
||||
# If this command was issued after this again - FB process hanged (gfix could not return control to OS).
|
||||
# This is the same bug as described in the ticked (discussed with dimitr, letters 22.09.2021).
|
||||
#
|
||||
# Checked on: 4.0.1.2613 (SS/CS); 5.0.0.219 (SS/CS)
|
||||
#
|
||||
# tracker_id:
|
||||
# min_versions: ['4.0.1']
|
||||
# versions: 4.0.1
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 4.0.1
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR'), ('[ \t]+', ' ')]
|
||||
|
||||
init_script_1 = """"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
# import os
|
||||
# import subprocess
|
||||
# import re
|
||||
# import difflib
|
||||
# import shutil
|
||||
# import time
|
||||
# from datetime import datetime
|
||||
# from datetime import timedelta
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
# # NB: with default values of 'apply_idle_timeout' and 'apply_error_timeout' (10 and 60 s)
|
||||
# # total time of this test is about 130...132s.
|
||||
# #####################################
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 135
|
||||
# MAX_TIME_FOR_WAIT_ADDED_TO_QUEUE = 135
|
||||
# #####################################
|
||||
#
|
||||
# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password)
|
||||
# FB_HOME = svc.get_home_directory()
|
||||
# svc.close()
|
||||
#
|
||||
# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float
|
||||
# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50'
|
||||
#
|
||||
# cur = db_conn.cursor()
|
||||
# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')")
|
||||
# server_mode = 'XX'
|
||||
# for r in cur:
|
||||
# if r[0] == 'Super':
|
||||
# server_mode = 'SS'
|
||||
# elif r[0] == 'SuperClassic':
|
||||
# server_mode = 'SC'
|
||||
# elif r[0] == 'Classic':
|
||||
# server_mode = 'CS'
|
||||
# cur.close()
|
||||
#
|
||||
# # 'fbt-main.fb50.ss.fdb' etc:
|
||||
# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' )
|
||||
# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.')
|
||||
#
|
||||
# # Folders for journalling and archieving segments.
|
||||
# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' )
|
||||
# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' )
|
||||
#
|
||||
# runProgram('gfix', ['-w', 'async', 'localhost:' + db_main])
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
# if type(f_names_list[i]) == file:
|
||||
# del_name = f_names_list[i].name
|
||||
# elif type(f_names_list[i]) == str:
|
||||
# del_name = f_names_list[i]
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ):
|
||||
# global re
|
||||
# global difflib
|
||||
# global time
|
||||
#
|
||||
# # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-
|
||||
# def check_pattern_in_log( log_lines, pattern, prefix_msg = '' ):
|
||||
# found_required_message = False
|
||||
# for d in log_lines:
|
||||
# if pattern.search(d):
|
||||
# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' )
|
||||
# found_required_message = True
|
||||
# break
|
||||
# return found_required_message
|
||||
# # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-
|
||||
#
|
||||
# replold_lines = []
|
||||
# with open( os.path.join(fb_home,'replication.log'), 'r') as f:
|
||||
# replold_lines = f.readlines()
|
||||
#
|
||||
# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1)
|
||||
# cur = con.cursor()
|
||||
# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database")
|
||||
# for r in cur:
|
||||
# last_generated_repl_segment = r[0]
|
||||
# cur.close()
|
||||
# con.close()
|
||||
#
|
||||
# # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file
|
||||
# segment_replicated_pattern=re.compile( 'verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE)
|
||||
#
|
||||
# # 08.09.2021: replication content can remain unchanged if there was no user-defined object in DB that must be dropped!
|
||||
# # Because of this, it is crucial to check OLD content of replication log before loop.
|
||||
# # Also, segment_replicated_pattern must NOT start from '\\+' because it can occur only for diff_data (within loop):
|
||||
# #
|
||||
# found_required_message = check_pattern_in_log( replold_lines, segment_replicated_pattern, prefix_msg )
|
||||
#
|
||||
# if not found_required_message:
|
||||
#
|
||||
# for i in range(0,max_allowed_time_for_wait):
|
||||
# time.sleep(1)
|
||||
#
|
||||
# # Get content of fb_home replication.log _after_ isql finish:
|
||||
# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r')
|
||||
# diff_data = difflib.unified_diff(
|
||||
# replold_lines,
|
||||
# f_repllog_new.readlines()
|
||||
# )
|
||||
# f_repllog_new.close()
|
||||
#
|
||||
# found_required_message = check_pattern_in_log( diff_data, segment_replicated_pattern, prefix_msg )
|
||||
# if found_required_message:
|
||||
# break
|
||||
#
|
||||
# if not found_required_message:
|
||||
# print('UNEXPECTED RESULT: no message about replicated segment No. %d for %d seconds.' % (int(last_generated_repl_segment), max_allowed_time_for_wait) )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def wait_for_add_queue_in_replica( fb_home, max_allowed_time_for_wait, min_timestamp, prefix_msg = '' ):
|
||||
#
|
||||
# global re
|
||||
# global difflib
|
||||
# global time
|
||||
# global datetime
|
||||
#
|
||||
# # <hostname> (replica) Tue Sep 21 20:24:57 2021
|
||||
# # Database: ...
|
||||
# # Added 3 segment(s) to the processing queue
|
||||
#
|
||||
# # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-
|
||||
# def check_pattern_in_log( log_lines, pattern, min_timestamp, prefix_msg = '' ):
|
||||
# found_required_message = False
|
||||
# for i,r in enumerate(log_lines):
|
||||
# if pattern.search(r):
|
||||
# if i>=2 and log_lines[i-2]:
|
||||
# # a = r.replace('(',' ').split()
|
||||
# a = log_lines[i-2].split()
|
||||
# if len(a)>=4:
|
||||
# # s='replica_host_name (slave) Sun May 30 17:46:43 2021'
|
||||
# # s.split()[-5:] ==> ['Sun', 'May', '30', '17:46:43', '2021']
|
||||
# # ' '.join( ...) ==> 'Sun May 30 17:46:43 2021'
|
||||
# dts = ' '.join( log_lines[i-2].split()[-5:] )
|
||||
# segment_timestamp = datetime.strptime( dts, '%a %b %d %H:%M:%S %Y')
|
||||
# if segment_timestamp >= min_timestamp:
|
||||
# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about segments added to queue after given timestamp.') #, 'segment_timestamp=%s' % segment_timestamp, '; min_timestamp=%s' % min_timestamp )
|
||||
# found_required_message = True
|
||||
# break
|
||||
# return found_required_message
|
||||
#
|
||||
# # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-
|
||||
#
|
||||
# replold_lines = []
|
||||
# with open( os.path.join(fb_home,'replication.log'), 'r') as f:
|
||||
# replold_lines = f.readlines()
|
||||
#
|
||||
#
|
||||
# segments_to_queue_pattern=re.compile( 'verbose:\\s+added\\s+\\d+\\s+segment.*to.*queue', re.IGNORECASE)
|
||||
#
|
||||
# # 08.09.2021: replication content can remain unchanged if there was no user-defined object in DB that must be dropped!
|
||||
# # Because of this, it is crucial to check OLD content of replication log before loop.
|
||||
# # Also, segments_to_queue_pattern must NOT start from '\\+' because it can occur only for diff_data (within loop):
|
||||
# #
|
||||
# found_required_message = check_pattern_in_log( replold_lines, segments_to_queue_pattern, min_timestamp, prefix_msg )
|
||||
#
|
||||
# if not found_required_message:
|
||||
#
|
||||
# for i in range(0,max_allowed_time_for_wait):
|
||||
# time.sleep(1)
|
||||
#
|
||||
# # Get content of fb_home replication.log _after_ isql finish:
|
||||
# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r')
|
||||
# diff_data = difflib.unified_diff(
|
||||
# replold_lines,
|
||||
# f_repllog_new.readlines()
|
||||
# )
|
||||
# f_repllog_new.close()
|
||||
#
|
||||
# found_required_message = check_pattern_in_log( list(diff_data), segments_to_queue_pattern, min_timestamp, prefix_msg )
|
||||
# if found_required_message:
|
||||
# break
|
||||
#
|
||||
# if not found_required_message:
|
||||
# print('UNEXPECTED RESULT: no message about segments added to queue after %s.' % min_timestamp)
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# sql_ddl = ''' set bail on;
|
||||
# recreate table test(s varchar(700), constraint test_s_unq unique(s));
|
||||
# commit;
|
||||
#
|
||||
# set term ^;
|
||||
# execute block as
|
||||
# declare fld_len int;
|
||||
# declare n int;
|
||||
# begin
|
||||
# select ff.rdb$field_length
|
||||
# from rdb$relation_fields rf
|
||||
# join rdb$fields ff on rf.rdb$field_source = ff.rdb$field_name
|
||||
# where upper(rf.rdb$relation_name) = upper('test') and upper(rf.rdb$field_name) = upper('s')
|
||||
# into fld_len;
|
||||
#
|
||||
#
|
||||
# n = 10000;
|
||||
# while (n > 0) do
|
||||
# begin
|
||||
# insert into test(s) values( lpad('', :fld_len, uuid_to_char(gen_uuid())) );
|
||||
# n = n - 1;
|
||||
# end
|
||||
#
|
||||
# end
|
||||
# ^
|
||||
# set term ;^
|
||||
# commit;
|
||||
# ''' % locals()
|
||||
#
|
||||
#
|
||||
# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_gh_6975_init.sql'), 'w')
|
||||
# f_sql_chk.write(sql_ddl)
|
||||
# flush_and_close( f_sql_chk )
|
||||
#
|
||||
# f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w')
|
||||
# f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w')
|
||||
# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err)
|
||||
# flush_and_close( f_sql_log )
|
||||
# flush_and_close( f_sql_err )
|
||||
#
|
||||
# last_generated_repl_segment = 0
|
||||
#
|
||||
# with open(f_sql_err.name,'r') as f:
|
||||
# for line in f:
|
||||
# print('UNEXPECTED STDERR in initial SQL: ' + line)
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0
|
||||
#
|
||||
# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors
|
||||
#
|
||||
#
|
||||
# ##################################
|
||||
# ### A.C.H.T.U.N.G ###
|
||||
# ### do NOT use datetime.now() ###
|
||||
# ### because of missed accuracy ###
|
||||
# ### of timestamps in repl.log ###
|
||||
# ### (it is HH:MM:SS only) ###
|
||||
# ##################################
|
||||
# current_date_with_hhmmss = datetime.today().replace(microsecond=0)
|
||||
#
|
||||
#
|
||||
# ##############################################################################
|
||||
# ### W A I T F O R S E G M E N T S A D D E D T O Q U E U E ###
|
||||
# ##############################################################################
|
||||
# wait_for_add_queue_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_ADDED_TO_QUEUE, current_date_with_hhmmss, 'POINT-A' )
|
||||
#
|
||||
# # This led to crash and appearance of message:
|
||||
# # "Fatal lock manager error: invalid lock id (0), errno: 0" in firebird.log:
|
||||
# #
|
||||
# runProgram('gfix', ['-shut', 'full', '-force', '0', 'localhost:' + db_repl])
|
||||
#
|
||||
# f_repl_hdr_log=open( os.path.join(context['temp_directory'],'db_repl_hdr.log'), 'w')
|
||||
# subprocess.call( [context['gstat_path'], db_repl, '-h'], stdout=f_repl_hdr_log, stderr=subprocess.STDOUT )
|
||||
# flush_and_close( f_repl_hdr_log )
|
||||
#
|
||||
# with open(f_repl_hdr_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# if 'Attributes' in line:
|
||||
# print('POINT-B ' + line.strip())
|
||||
#
|
||||
#
|
||||
# # This (issuing 'gfix -shu ...' second time) led FB process to hang!
|
||||
# # runProgram('gfix', ['-shut', 'full', '-force', '0', 'localhost:' + db_repl])
|
||||
#
|
||||
# runProgram('gfix', ['-online', 'localhost:' + db_repl])
|
||||
#
|
||||
#
|
||||
# ##############################################################################
|
||||
# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ###
|
||||
# ##############################################################################
|
||||
# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' )
|
||||
#
|
||||
# # return initial state of master DB:
|
||||
# # remove all DB objects (tables, views, ...):
|
||||
# # --------------------------------------------
|
||||
# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql')
|
||||
#
|
||||
# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects-gh_6975.log'), 'w')
|
||||
# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err )
|
||||
# flush_and_close(f_clean_log)
|
||||
# flush_and_close(f_clean_err)
|
||||
#
|
||||
# with open(f_clean_err.name,'r') as f:
|
||||
# for line in f:
|
||||
# print('UNEXPECTED STDERR in cleanup SQL: ' + line)
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0
|
||||
#
|
||||
# with open(f_clean_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# # show number of dropped objects
|
||||
# print(line)
|
||||
#
|
||||
# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> previous SQL script finished w/o errors
|
||||
#
|
||||
# ##############################################################################
|
||||
# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ###
|
||||
# ##############################################################################
|
||||
# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' )
|
||||
#
|
||||
# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_gh_6975.sql'), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT )
|
||||
# flush_and_close( f_main_meta_sql )
|
||||
#
|
||||
# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_gh_6975.sql'), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT )
|
||||
# flush_and_close( f_repl_meta_sql )
|
||||
#
|
||||
# db_main_meta=open(f_main_meta_sql.name, 'r')
|
||||
# db_repl_meta=open(f_repl_meta_sql.name, 'r')
|
||||
#
|
||||
# diffmeta = ''.join(difflib.unified_diff(
|
||||
# db_main_meta.readlines(),
|
||||
# db_repl_meta.readlines()
|
||||
# ))
|
||||
# db_main_meta.close()
|
||||
# db_repl_meta.close()
|
||||
#
|
||||
# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_gh_6975.txt'), 'w', buffering = 0)
|
||||
# f_meta_diff.write(diffmeta)
|
||||
# flush_and_close( f_meta_diff )
|
||||
#
|
||||
# # Following must issue only TWO rows:
|
||||
# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */
|
||||
# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */
|
||||
# # Only thes lines will be suppressed further (see subst. section):
|
||||
# with open(f_meta_diff.name, 'r') as f:
|
||||
# for line in f:
|
||||
# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'):
|
||||
# print('UNEXPECTED METADATA DIFF.: ' + line)
|
||||
#
|
||||
# runProgram('gfix', ['-w', 'sync', 'localhost:' + db_main])
|
||||
#
|
||||
# ######################
|
||||
# ### A C H T U N G ###
|
||||
# ######################
|
||||
# # MANDATORY, OTHERWISE REPLICATION GETS STUCK ON SECOND RUN OF THIS TEST
|
||||
# # WITH 'ERROR: Record format with length 68 is not found for table TEST':
|
||||
# runProgram('gfix', ['-sweep', 'localhost:' + db_repl])
|
||||
# runProgram('gfix', ['-sweep', 'localhost:' + db_main])
|
||||
# #######################
|
||||
#
|
||||
# # cleanup:
|
||||
# ##########
|
||||
# #cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) )
|
||||
#
|
||||
#---
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
POINT-A FOUND message about segments added to queue after given timestamp.
|
||||
POINT-B Attributes force write, full shutdown, read-only replica
|
||||
POINT-1 FOUND message about replicated segment.
|
||||
Start removing objects
|
||||
Finish. Total objects removed
|
||||
POINT-2 FOUND message about replicated segment.
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=4.0.1')
|
||||
@pytest.mark.platform('Windows')
|
||||
def test_1(act_1: Action):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
@ -0,0 +1,382 @@
|
||||
#coding:utf-8
|
||||
#
|
||||
# id: tests.functional.replication.some_updates_crash_server_on_replica_side
|
||||
# title: Some updates can crash Firebird server on replica side
|
||||
# decription:
|
||||
# See: https://github.com/FirebirdSQL/firebird/issues/6909
|
||||
#
|
||||
# We create table and add data in it according to ticket info.
|
||||
#
|
||||
# After this we wait until replica becomes actual to master, and this delay will last no more then threshold
|
||||
# that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds).
|
||||
# During this delay, we check every second for replication log and search there line with number of last generated
|
||||
# segment (which was replicated and deleting finally).
|
||||
# We can assume that replication finished OK only when such line is found see ('POINT-1').
|
||||
#
|
||||
# Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch).
|
||||
# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2').
|
||||
#
|
||||
# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff').
|
||||
# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it,
|
||||
# thus metadata difference must not be issued.
|
||||
#
|
||||
# ####################
|
||||
# ### CRUCIAL NOTE ###
|
||||
# ####################
|
||||
# Currently, 25.06.2021, there is bug in FB 4.x and 5.x which can be seen on SECOND run of this test: message with text
|
||||
# "ERROR: Record format with length 68 is not found for table TEST" will appear in it after inserting 1st record in master.
|
||||
# The reason of that is "dirty" pages that remain in RDB$RELATION_FIELDS both on master and replica after dropping table.
|
||||
# Following query show different data that appear in replica DB on 1st and 2nd run (just after table was created on master):
|
||||
# =======
|
||||
# set blobdisplay 6;
|
||||
# select rdb$descriptor as fmt_descr
|
||||
# from rdb$formats natural join rdb$relations where rdb$relation_name = 'TEST';
|
||||
# =======
|
||||
# This bug was explained by dimitr, see letters 25.06.2021 11:49 and 25.06.2021 16:56.
|
||||
# It will be fixed later.
|
||||
#
|
||||
# The only workaround to solve this problem is to make SWEEP after all DB objects have been dropped.
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
# !NB! BOTH master and replica must be cleaned up by sweep!
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
#
|
||||
# ################
|
||||
# ### N O T E ###
|
||||
# ################
|
||||
# Test assumes that master and replica DB have been created beforehand.
|
||||
# Also, it assumes that %FB_HOME%
|
||||
# eplication.conf has been prepared with apropriate parameters for replication.
|
||||
# Particularly, name of directories and databases must have info about checked FB major version and ServerMode.
|
||||
# * verbose = true // in order to find out line with message that required segment was replicated
|
||||
# * section for master database with specified parameters:
|
||||
# journal_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.journal"
|
||||
# journal_archive_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.archive"
|
||||
# journal_archive_command = "copy $(pathname) $(archivepathname)"
|
||||
# journal_archive_timeout = 10
|
||||
# * section for replica database with specified parameter:
|
||||
# journal_source_directory = "!fbt_repo! mp
|
||||
# b-replication.!fb_major!.!server_mode!.archive"
|
||||
#
|
||||
# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these:
|
||||
# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic)
|
||||
# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic)
|
||||
# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc)
|
||||
#
|
||||
# These two databases must NOT be dropped in any of tests related to replication!
|
||||
# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode
|
||||
# and make cleanup after it, i.e. when all tests will be completed.
|
||||
#
|
||||
# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'.
|
||||
#
|
||||
# Temporary comment. For debug purpoces:
|
||||
# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc);
|
||||
# 2) copy file %fbt-repo% ests
|
||||
# unctional abloidatches\\setup-fb-for-replication.bat.txt
|
||||
# to some place and rename it "*.bat";
|
||||
# 3) open this .bat in editor and asjust value of 'fbt_repo' variable;
|
||||
# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE]
|
||||
# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check:
|
||||
# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc
|
||||
# 5) batch 'setup-fb-for-replication.bat' will:
|
||||
# * stop selected FB instance
|
||||
# * create test databases (in !fbt_repo! mp\\);
|
||||
# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\);
|
||||
# * replace %fb_home%
|
||||
# eplication.conf with apropriate
|
||||
# * start selected FB instance
|
||||
# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat):
|
||||
# %fpt_repo%
|
||||
# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc
|
||||
#
|
||||
# Confirmed bug on 5.0.0.126 (31.07.2021), 4.0.1.2547 (30.07.2021)
|
||||
# FB crashes, segment is not delivered on replica.
|
||||
# Initial fix was for FB 4.x 30-jul-2021 16:28 (44f48955c250193096c244bee9e5cd7ddf9a099b),
|
||||
# frontported to FB 5.x 04-aug-2021 12:48 (220ca99b85289fdd7a5257e576499a1b9c345cd9)
|
||||
#
|
||||
# Checked on:
|
||||
# 5.0.0.126 SS: 27.290s // intermediate build, timestamp: 04-aug-2021 12:08.
|
||||
# 5.0.0.126 CS: 26.239s.
|
||||
# 4.0.1.2556 SS: 29.956s.
|
||||
# 4.0.1.2556 CS: 26.108s.
|
||||
#
|
||||
# tracker_id:
|
||||
# min_versions: ['4.0.1']
|
||||
# versions: 4.0.1
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 4.0.1
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR')]
|
||||
|
||||
init_script_1 = """"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
# import os
|
||||
# import subprocess
|
||||
# import re
|
||||
# import difflib
|
||||
# import shutil
|
||||
# import time
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
# #####################################
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65
|
||||
# #####################################
|
||||
#
|
||||
# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password)
|
||||
# FB_HOME = svc.get_home_directory()
|
||||
# svc.close()
|
||||
#
|
||||
# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float
|
||||
# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50'
|
||||
#
|
||||
# cur = db_conn.cursor()
|
||||
# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')")
|
||||
# server_mode = 'XX'
|
||||
# for r in cur:
|
||||
# if r[0] == 'Super':
|
||||
# server_mode = 'SS'
|
||||
# elif r[0] == 'SuperClassic':
|
||||
# server_mode = 'SC'
|
||||
# elif r[0] == 'Classic':
|
||||
# server_mode = 'CS'
|
||||
# cur.close()
|
||||
#
|
||||
# # 'fbt-main.fb50.ss.fdb' etc:
|
||||
# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' )
|
||||
# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.')
|
||||
#
|
||||
# # Folders for journalling and archieving segments.
|
||||
# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' )
|
||||
# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' )
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
# if type(f_names_list[i]) == file:
|
||||
# del_name = f_names_list[i].name
|
||||
# elif type(f_names_list[i]) == str:
|
||||
# del_name = f_names_list[i]
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ):
|
||||
#
|
||||
# global re
|
||||
# global difflib
|
||||
# global time
|
||||
#
|
||||
# replold_lines = []
|
||||
# with open( os.path.join(fb_home,'replication.log'), 'r') as f:
|
||||
# replold_lines = f.readlines()
|
||||
#
|
||||
# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1)
|
||||
# cur = con.cursor()
|
||||
# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database")
|
||||
# for r in cur:
|
||||
# last_generated_repl_segment = r[0]
|
||||
# cur.close()
|
||||
# con.close()
|
||||
#
|
||||
# #print('last_generated_repl_segment:', last_generated_repl_segment)
|
||||
#
|
||||
# # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file
|
||||
# p=re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE)
|
||||
#
|
||||
# found_required_message = False
|
||||
# for i in range(0,max_allowed_time_for_wait):
|
||||
# time.sleep(1)
|
||||
#
|
||||
# # Get content of fb_home replication.log _after_ isql finish:
|
||||
# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r')
|
||||
# diff_data = difflib.unified_diff(
|
||||
# replold_lines,
|
||||
# f_repllog_new.readlines()
|
||||
# )
|
||||
# f_repllog_new.close()
|
||||
#
|
||||
# for k,d in enumerate(diff_data):
|
||||
# if p.search(d):
|
||||
# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' )
|
||||
# found_required_message = True
|
||||
# break
|
||||
#
|
||||
# if found_required_message:
|
||||
# break
|
||||
#
|
||||
# if not found_required_message:
|
||||
# print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait)
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
# sql_ddl = ''' set bail on;
|
||||
# create table rep (
|
||||
# id_report integer not null,
|
||||
# report_data blob sub_type binary not null,
|
||||
# zvyrazneni_radku char(1) default 'n' not null,
|
||||
# kriteria_dotazu char(1) default 'n' not null,
|
||||
# stav char(1) default 'a' not null,
|
||||
# constraint rep_pk primary key (id_report)
|
||||
# );
|
||||
#
|
||||
# insert into rep (id_report, report_data, zvyrazneni_radku, kriteria_dotazu, stav)
|
||||
# values (0, 'report_data', 'n', 'n', 'a');
|
||||
#
|
||||
# commit;
|
||||
#
|
||||
# update rep set zvyrazneni_radku='a' where id_report =0;
|
||||
# update rep set kriteria_dotazu='a' where id_report =0;
|
||||
#
|
||||
# commit;
|
||||
# ''' % locals()
|
||||
#
|
||||
#
|
||||
# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_gh_6909_init.sql'), 'w')
|
||||
# f_sql_chk.write(sql_ddl)
|
||||
# flush_and_close( f_sql_chk )
|
||||
#
|
||||
# f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w')
|
||||
# f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w')
|
||||
# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err)
|
||||
# flush_and_close( f_sql_log )
|
||||
# flush_and_close( f_sql_err )
|
||||
#
|
||||
# last_generated_repl_segment = 0
|
||||
#
|
||||
# with open(f_sql_err.name,'r') as f:
|
||||
# for line in f:
|
||||
# print('UNEXPECTED STDERR in initial SQL: ' + line)
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0
|
||||
#
|
||||
# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors
|
||||
#
|
||||
# ##############################################################################
|
||||
# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ###
|
||||
# ##############################################################################
|
||||
# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' )
|
||||
#
|
||||
# # return initial state of master DB:
|
||||
# # remove all DB objects (tables, views, ...):
|
||||
# # --------------------------------------------
|
||||
# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql')
|
||||
#
|
||||
# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects-gh_6909.log'), 'w')
|
||||
# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err )
|
||||
# flush_and_close(f_clean_log)
|
||||
# flush_and_close(f_clean_err)
|
||||
#
|
||||
# with open(f_clean_err.name,'r') as f:
|
||||
# for line in f:
|
||||
# print('UNEXPECTED STDERR in cleanup SQL: ' + line)
|
||||
# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0
|
||||
#
|
||||
# with open(f_clean_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# # show number of dropped objects
|
||||
# print(line)
|
||||
#
|
||||
# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> previous SQL script finished w/o errors
|
||||
#
|
||||
# ##############################################################################
|
||||
# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ###
|
||||
# ##############################################################################
|
||||
# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' )
|
||||
#
|
||||
# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_gh_6909.sql'), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT )
|
||||
# flush_and_close( f_main_meta_sql )
|
||||
#
|
||||
# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_gh_6909.sql'), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT )
|
||||
# flush_and_close( f_repl_meta_sql )
|
||||
#
|
||||
# db_main_meta=open(f_main_meta_sql.name, 'r')
|
||||
# db_repl_meta=open(f_repl_meta_sql.name, 'r')
|
||||
#
|
||||
# diffmeta = ''.join(difflib.unified_diff(
|
||||
# db_main_meta.readlines(),
|
||||
# db_repl_meta.readlines()
|
||||
# ))
|
||||
# db_main_meta.close()
|
||||
# db_repl_meta.close()
|
||||
#
|
||||
# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_gh_6909.txt'), 'w', buffering = 0)
|
||||
# f_meta_diff.write(diffmeta)
|
||||
# flush_and_close( f_meta_diff )
|
||||
#
|
||||
# # Following must issue only TWO rows:
|
||||
# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */
|
||||
# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */
|
||||
# # Only thes lines will be suppressed further (see subst. section):
|
||||
# with open(f_meta_diff.name, 'r') as f:
|
||||
# for line in f:
|
||||
# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'):
|
||||
# print('UNEXPECTED METADATA DIFF.: ' + line)
|
||||
#
|
||||
# ######################
|
||||
# ### A C H T U N G ###
|
||||
# ######################
|
||||
# # MANDATORY, OTHERWISE REPLICATION GETS STUCK ON SECOND RUN OF THIS TEST
|
||||
# # WITH 'ERROR: Record format with length 68 is not found for table TEST':
|
||||
# runProgram('gfix', ['-sweep', 'localhost:' + db_repl])
|
||||
# runProgram('gfix', ['-sweep', 'localhost:' + db_main])
|
||||
# #######################
|
||||
#
|
||||
# # cleanup:
|
||||
# ##########
|
||||
# cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) )
|
||||
#
|
||||
#---
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
POINT-1 FOUND message about replicated segment.
|
||||
Start removing objects
|
||||
Finish. Total objects removed
|
||||
POINT-2 FOUND message about replicated segment.
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=4.0.1')
|
||||
@pytest.mark.platform('Windows')
|
||||
def test_1(act_1: Action):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user