6
0
mirror of https://github.com/FirebirdSQL/firebird-qa.git synced 2025-01-22 13:33:07 +01:00

More python tests

This commit is contained in:
Pavel Císař 2021-11-19 20:16:48 +01:00
parent 1ae09a95b9
commit dd67d1fcba
6 changed files with 972 additions and 405 deletions

View File

@ -2,42 +2,50 @@
#
# id: bugs.core_4236
# title: Database shutdown is reported as successfully completed before all active connections are in fact interrupted
# decription:
# decription:
# Test restores database with single table of following DDL:
# create table test(s varchar(1000));
# create index test_s on test(s);
# Than we start asynchronously several ISQL attachments which will do 'heavy DML' job: insert lot of rows in this table.
# After some delay (IMO, it should be at least 15..20 seconds) we start process of SHUTDOWN but with target mode = 'single'
# instead of 'full'.
# After control will return from shutdown command, we can ensure that database has no any access and its file is closed
# instead of 'full'.
# After control will return from shutdown command, we can ensure that database has no any access and its file is closed
# - this is done by call FBSVCMGR utility with arguments: action_repair rpr_validate_db rpr_full. This will launch process
# of database validation and it requires exclusive access, like 'gfix -v -full'.
# If validation will be able to open database in exclusive mode and do its job than NO any output will be produced.
# If validation will be able to open database in exclusive mode and do its job than NO any output will be produced.
# Any problem with exclusive DB access will lead to error with text like: "database <db_file> shutdown".
# Finally, we check that:
# 1) output of validation is really EMPTY - no any rows should present between two intentionally added lines
# 1) output of validation is really EMPTY - no any rows should present between two intentionally added lines
# ("Validation start" and "validation finish" - they will be added only to improve visual perception of log);
# 2) Every launched ISQL was really able to perform its task: at least to insert 100 row in the table, this result should
# be reflected in its log by message 'INS_PROGRESS ...' - it is suspended from EB every 100 rows.
# 3) Every launched ISQL was really received exception with SQLCODE = HY000 - it also should be added at the end of ISQL log.
#
#
# Checked on WI-V3.0.0.32253, SS/SC/CS (OS=Win XP), with 30 attachments that worked for 30 seconds.
# NB: Issue about hang CS was found during this test implementation, fixed here:
# NB: Issue about hang CS was found during this test implementation, fixed here:
# http://sourceforge.net/p/firebird/code/62737
#
#
# Refactored 13-aug-2020:
# validation result is verified by inspecting difflib.unified_diff() result between firebird.log that was
# before and after validation: it must contain phrase "Validation finished: 0 errors"
# (we check that both validation *did* complete and absense of errors in DB).
# Checked on 4.0.0.2144 CS, 4.0.0.2151 SS.
#
#
# tracker_id: CORE-4236
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
from __future__ import annotations
from typing import List
import pytest
from firebird.qa import db_factory, isql_act, Action
from subprocess import run, CompletedProcess, STDOUT, PIPE
import re
import time
from threading import Thread, Barrier, Lock
from difflib import unified_diff
from firebird.qa import db_factory, python_act, Action
from firebird.driver import ShutdownMode, ShutdownMethod, SrvRepairFlag
# version: 3.0
# resources: None
@ -58,32 +66,32 @@ db_1 = db_factory(from_backup='core4236.fbk', init=init_script_1)
# import re
# import difflib
# from fdb import services
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# db_file=db_conn.database_name
# db_conn.close()
#
#
# PLANNED_DML_ATTACHMENTS = 20
# WAIT_FOR_ALL_CONNECTIONS_START_JOB = 20
#
#
# #-----------------------------------
#
#
# def flush_and_close(file_handle):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# os.fsync(file_handle.fileno())
#
#
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -91,11 +99,11 @@ db_1 = db_factory(from_backup='core4236.fbk', init=init_script_1)
# os.remove( f_names_list[i] )
# if os.path.isfile( f_names_list[i]):
# print('ERROR: can not remove file ' + f_names_list[i])
#
#
# #--------------------------------------------
#
#
# def svc_get_fb_log( f_fb_log ):
#
#
# global subprocess
# subprocess.call( [ context['fbsvcmgr_path'],
# "localhost:service_mgr",
@ -104,9 +112,9 @@ db_1 = db_factory(from_backup='core4236.fbk', init=init_script_1)
# stdout=f_fb_log, stderr=subprocess.STDOUT
# )
# return
#
#
# #--------------------------------------------
#
#
# dmltxt='''
# set bail on;
# set list on;
@ -118,44 +126,44 @@ db_1 = db_factory(from_backup='core4236.fbk', init=init_script_1)
# while ( n > 0 ) do
# begin
# insert into test(s) values( rpad('', 500, uuid_to_char(gen_uuid())) );
#
#
# ins_progress = ins_progress + 1;
# if ( mod(ins_progress, 100) = 0 ) then suspend;
#
#
# n = n - 1;
# end
# end
# ^ set term ;^
# quit;
# '''
#
#
# f_sql_cmd=open( os.path.join(context['temp_directory'],'tmp_dml_4236.sql'), 'w')
# f_sql_cmd.write(dmltxt)
# flush_and_close(f_sql_cmd)
#
#
# isql_logs_list = []
# isql_pids_list = []
#
#
# ########################################################################################
# # S T A R T I N G S E V E R A L I S Q L s W I T H H E A V Y D M L J O B
# ########################################################################################
#
#
# for i in range(0, PLANNED_DML_ATTACHMENTS):
# sqllog=open( os.path.join(context['temp_directory'],'tmp_dml_4236_'+str(i)+'.log'), 'w')
# isql_logs_list.append(sqllog)
#
#
# for i in range(len(isql_logs_list)):
# p_isql=Popen( [ context['isql_path'] , "localhost:"+db_file, "-i", f_sql_cmd.name ],
# stdout=isql_logs_list[i], stderr=subprocess.STDOUT
# )
# isql_pids_list.append(p_isql)
#
#
# # Delay: let ISQL sessions do their job:
# ########
# time.sleep( WAIT_FOR_ALL_CONNECTIONS_START_JOB )
#
#
# # Move database to shutdown with ability to run after it validation (prp_sm_single):
#
#
# ###########################################################################################
# # S H U T D O W N D A T A B A S E W I T H T A R G E T A C C E S S = 'S I N G L E'
# ###########################################################################################
@ -166,26 +174,26 @@ db_1 = db_factory(from_backup='core4236.fbk', init=init_script_1)
# "prp_force_shutdown", "0"],
# stdout=f_shutdown_log, stderr=subprocess.STDOUT)
# flush_and_close( f_shutdown_log )
#
#
# # At this point no further I/O should be inside database, including internal engine actions
# # that relate to backouts. This mean that we *must* have ability to run DB validation in
# # _exclusive_ mode, like gfix -v -full does.
#
# # Only for DEBUG: when this line is uncommented, validation should FAIL with message: database <db_file> shutdown.
#
# # Only for DEBUG: when this line is uncommented, validation should FAIL with message: database <db_file> shutdown.
# # Normally this line should be commented.
# # conx = kdb.connect(dsn='localhost:'+db_file, user='SYSDBA', password='masterkey')
#
#
# # ........................ get firebird.log _before_ validation ..............................
# f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_4236_fblog_before.txt'), 'w')
# svc_get_fb_log( f_fblog_before )
# flush_and_close( f_fblog_before )
#
#
# # Run validation that requires exclusive database access.
#
#
# # Run validation that requires exclusive database access.
# # This process normally should produce NO output at all, it is "silent".
# # If database currently is in use by engine or some attachments than it shoudl fail
# # with message "database <db_file> shutdown."
#
#
# ######################################################################
# # V A L I D A T I O N W I T H E X C L U S I V E A C C E S S
# ######################################################################
@ -195,78 +203,79 @@ db_1 = db_factory(from_backup='core4236.fbk', init=init_script_1)
# "rpr_validate_db", "rpr_full"],
# stdout=f_validate_log, stderr=subprocess.STDOUT)
# flush_and_close( f_validate_log )
#
#
# # ........................ get firebird.log _before_ validation ..............................
# f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_4236_fblog_after.txt'), 'w')
# svc_get_fb_log( f_fblog_after )
# flush_and_close( f_fblog_after )
#
#
#
#
# # Compare firebird.log versions BEFORE and AFTER validation:
# ######################
#
#
# oldfb=open(f_fblog_before.name, 'r')
# newfb=open(f_fblog_after.name, 'r')
#
#
# difftext = ''.join(difflib.unified_diff(
# oldfb.readlines(),
# oldfb.readlines(),
# newfb.readlines()
# ))
# oldfb.close()
# newfb.close()
#
#
# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_4236_diff.txt'), 'w')
# f_diff_txt.write(difftext)
# flush_and_close( f_diff_txt )
#
#
# # We are ionteresting only for line that contains result of validation:
# p=re.compile('validation[ ]+finished(:){0,1}[ ]+\\d+[ ]errors', re.IGNORECASE)
# with open( f_diff_txt.name,'r') as f:
# for line in f:
# if line.startswith('+') and p.search(line):
# print( 'DIFF IN FIREBIRD.LOG: ' + (' '.join(line.split()).upper()) )
#
#
# #///////////////////////////////////////////////////////////////////////////////////
#
#
# for i in range(len(isql_pids_list)):
# isql_pids_list[i].terminate()
#
#
# for i in range(len(isql_logs_list)):
# flush_and_close( isql_logs_list[i] )
#
#
# actual_dml_attachments = 0
# logged_shutdown_count = 0
#
#
# for i in range(len(isql_logs_list)):
# f = open(isql_logs_list[i].name, 'r')
# sqllog=f.read()
# f.close()
#
#
# if 'INS_PROGRESS' in sqllog:
# actual_dml_attachments += 1
# if 'SQLSTATE = HY000' in sqllog:
# logged_shutdown_count += 1
#
#
# print("Check-1: how many DML attachments really could do their job ?")
# if PLANNED_DML_ATTACHMENTS == actual_dml_attachments:
# print("Result: OK, launched = actual")
# else:
# print("Result: BAD, launched<>actual")
#
#
# print("Check-2: how many sessions got SQLSTATE = HY000 on shutdown ?")
# if PLANNED_DML_ATTACHMENTS == logged_shutdown_count:
# print("Result: OK, launched = actual")
# else:
# print("Result: BAD, launched<>actual")
#
#
# # CLEANUP:
# ##########
# f_list = [x.name for x in [ f_sql_cmd,f_shutdown_log,f_validate_log,f_fblog_before,f_fblog_after,f_diff_txt ] + isql_logs_list ] #### + [ isql_logs_list ] ]
# cleanup( f_list )
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """
DIFF IN FIREBIRD.LOG: + VALIDATION FINISHED: 0 ERRORS, 0 WARNINGS, 0 FIXED
@ -274,11 +283,114 @@ expected_stdout_1 = """
Result: OK, launched = actual
Check-2: how many sessions got SQLSTATE = HY000 on shutdown ?
Result: OK, launched = actual
"""
"""
def isql_job(act: Action, b: Barrier, lock: Lock, result_list: List[str]):
dml_script = """
set bail on;
set list on;
set term ^;
execute block returns(ins_progress int) as
declare n int = 100000;
begin
ins_progress=0;
while (n > 0) do
begin
insert into test(s) values(rpad('', 500, uuid_to_char(gen_uuid())));
ins_progress = ins_progress + 1;
if (mod(ins_progress, 100) = 0) then suspend;
n = n - 1;
end
end
^ set term ;^
quit;
"""
b.wait()
result: CompletedProcess = run([act.vars['isql'], '-user', act.db.user,
'-password', act.db.password, act.db.dsn],
input=dml_script, encoding='utf8',
stdout=PIPE, stderr=STDOUT)
with lock:
result_list.append(result.stdout)
@pytest.mark.version('>=3.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action, capsys):
PLANNED_DML_ATTACHMENTS = 20
WAIT_FOR_ALL_CONNECTIONS_START_JOB = 10
lock = Lock()
threads = []
result_list = []
# Start multiple ISQL instances with heavy DML job
b = Barrier(PLANNED_DML_ATTACHMENTS + 1)
for i in range(PLANNED_DML_ATTACHMENTS):
isql_thread = Thread(target=isql_job, args=[act_1, b, lock, result_list])
threads.append(isql_thread)
isql_thread.start()
b.wait()
time.sleep(WAIT_FOR_ALL_CONNECTIONS_START_JOB)
with act_1.connect_server() as srv:
# Move database to shutdown with ability to run after it validation (prp_sm_single)
srv.database.shutdown(database=str(act_1.db.db_path), mode=ShutdownMode.SINGLE,
method=ShutdownMethod.FORCED, timeout=0)
# get firebird.log _before_ validation
srv.info.get_log()
log_before = srv.readlines()
# At this point no further I/O should be inside database, including internal engine actions
# that relate to backouts. This mean that we *must* have ability to run DB validation in
# _exclusive_ mode, like gfix -v -full does.
#
# Run validation that requires exclusive database access.
# This process normally should produce NO output at all, it is "silent".
# If database currently is in use by engine or some attachments than it shoudl fail
# with message "database <db_file> shutdown."
try:
srv.database.repair(database=str(act_1.db.db_path),
flags=SrvRepairFlag.FULL | SrvRepairFlag.VALIDATE_DB)
except Exception as exc:
print(f'Database repair failed with: {exc}')
#
# get firebird.log _after_ validation
srv.info.get_log()
log_after = srv.readlines()
# bring database online
srv.database.bring_online(database=str(act_1.db.db_path))
# At this point, threads should be dead
for thread in threads:
thread.join(1)
if thread.is_alive():
print(f'Thread {thread.ident} is still alive')
# Compare logs
log_diff = list(unified_diff(log_before, log_after))
# We are ionterested only for lines that contains result of validation:
p = re.compile('validation[ ]+finished(:){0,1}[ ]+\\d+[ ]errors', re.IGNORECASE)
for line in log_diff:
if line.startswith('+') and p.search(line):
print('DIFF IN FIREBIRD.LOG: ' + (' '.join(line.split()).upper()))
#
actual_dml_attachments = 0
logged_shutdown_count = 0
for sqllog in result_list:
if 'INS_PROGRESS' in sqllog:
actual_dml_attachments += 1
if 'SQLSTATE = HY000' in sqllog:
logged_shutdown_count += 1
#
print("Check-1: how many DML attachments really could do their job ?")
if PLANNED_DML_ATTACHMENTS == actual_dml_attachments:
print("Result: OK, launched = actual")
else:
print("Result: BAD, launched<>actual")
#
print("Check-2: how many sessions got SQLSTATE = HY000 on shutdown ?")
if PLANNED_DML_ATTACHMENTS == logged_shutdown_count:
print("Result: OK, launched = actual")
else:
print("Result: BAD, launched<>actual")
# Check
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,32 +2,33 @@
#
# id: bugs.core_4276
# title: Error on create table with "CHARACTER SET DOS775" field
# decription:
# decription:
# Confirmed problem on 2.5.3.26780:
# Statement failed, SQLSTATE = 2C000
# bad parameters on attach or create database
# -CHARACTER SET DOS775 is not defined
# Checked on 3.0.0.32136 RC1 - all OK.
#
#
# 02-mar-2021. Re-implemented in order to have ability to run this test on Linux.
# Test encodes to UTF8 all needed statements (SET NAMES; CONNECT; DDL and DML) and stores this text in .sql file.
# NOTE: 'SET NAMES' contain character set that must be used for reproducing problem (DOS775 in this test).
# Then ISQL is launched in separate (child) process which performs all necessary actions (using required charset).
# Result will be redirected to log(s) which will be opened further via codecs.open(...encoding='cp775').
# Finally, its content will be converted to UTF8 for showing in expected_stdout.
#
#
# Checked on:
# * Windows: 4.0.0.2377, 3.0.8.33420, 2.5.9.27152
# * Linux: 4.0.0.2377, 3.0.8.33415
#
#
#
#
# tracker_id: CORE-4276
# min_versions: ['2.5.5']
# versions: 2.5.5
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
# version: 2.5.5
# resources: None
@ -40,32 +41,32 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
#
#
# import os
# import codecs
# import subprocess
# import time
#
#
# db_conn.close()
#
#
# #--------------------------------------------
#
#
# def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -76,19 +77,19 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None
#
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
#
# #--------------------------------------------
#
#
# # Code to be executed further in separate ISQL process:
# #############################
# sql_txt='''
# set bail on;
# set names dos775;
# connect '%(dsn)s' user '%(user_name)s' password '%(user_password)s';
#
#
# recreate table "ĄČĘĢÆĖŠŚÖÜØ£"(
# "ąčęėįšųūž" varchar(50) character set dos775
# ,"Õisu ja kariste järved" blob sub_type 1 character set dos775
@ -96,72 +97,99 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# commit;
# show table;
# show table "ĄČĘĢÆĖŠŚÖÜØ£";
# insert into "ĄČĘĢÆĖŠŚÖÜØ£"("ąčęėįšųūž", "Õisu ja kariste järved")
# insert into "ĄČĘĢÆĖŠŚÖÜØ£"("ąčęėįšųūž", "Õisu ja kariste järved")
# values(
# 'ÓßŌŃõÕµńĶķĻļņĒŅ',
# 'Green - viens no trim primārās krāsas, zaļā tiek uzskatīts diapazontsvetov spektrs ar viļņa
# garumu aptuveni 500-565 nanometri. Sistēma CMYK druka zaļā iegūst, sajaucot dzelteno un
# 'Green - viens no trim primārās krāsas, zaļā tiek uzskatīts diapazontsvetov spektrs ar viļņa
# garumu aptuveni 500-565 nanometri. Sistēma CMYK druka zaļā iegūst, sajaucot dzelteno un
# zilganzaļi (cyan).Dabā, Catalpa - zaļa augs.
# Krāsu zaļie augi ir dabiski, ka cilvēks etalonomzeleni.
# Zaļā koku varde.
# Ir plaši izplatīti dabā. Lielākā daļa augu ir zaļā krāsā, jo tie satur pigmentu fotosintēzes -
# hlorofilu (hlorofils absorbē lielu daļu no sarkano stariem saules spektra, atstājot uztveri
# Ir plaši izplatīti dabā. Lielākā daļa augu ir zaļā krāsā, jo tie satur pigmentu fotosintēzes -
# hlorofilu (hlorofils absorbē lielu daļu no sarkano stariem saules spektra, atstājot uztveri
# atstarotās un filtrē zaļā krāsā). Dzīvnieki ar zaļo krāsu tā izmantošanu maskēties fona augiem.'
# );
# set list on;
# set blob all;
# select "ąčęėįšųūž", "Õisu ja kariste järved" as blob_content
# from "ĄČĘĢÆĖŠŚÖÜØ£";
#
#
# ''' % dict(globals(), **locals())
#
#
# f_run_sql = open( os.path.join(context['temp_directory'], 'tmp_4276_dos775.sql'), 'w' )
# f_run_sql.write( sql_txt.decode('utf8').encode('cp775') )
# flush_and_close( f_run_sql )
#
#
# # result: file tmp_4276_dos775.sql is encoded in dos775
#
#
# f_run_log = open( os.path.splitext(f_run_sql.name)[0]+'.log', 'w')
# subprocess.call( [ context['isql_path'], '-q', '-i', f_run_sql.name ],
# stdout = f_run_log,
# stderr = subprocess.STDOUT
# )
# flush_and_close( f_run_log ) # result: output will be encoded in dos775
#
#
# with codecs.open(f_run_log.name, 'r', encoding='cp775' ) as f:
# result_in_dos775 = f.readlines()
#
#
# for i in result_in_dos775:
# print( i.encode('utf8') )
#
#
# # cleanup:
# ###########
# cleanup( (f_run_sql, f_run_log) )
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """
ĄČĘĢÆĖŠŚÖÜØ£
ąčęėįšųūž VARCHAR(50) CHARACTER SET DOS775 Nullable
Õisu ja kariste järved BLOB segment 80, subtype TEXT CHARACTER SET DOS775 Nullable
ĄČĘĢÆĖŠŚÖÜØ£
ąčęėįšųūž VARCHAR(50) CHARACTER SET DOS775 Nullable
Õisu ja kariste järved BLOB segment 80, subtype TEXT CHARACTER SET DOS775 Nullable
ąčęėįšųūž ÓßŌŃõÕµńĶķĻļņĒŅ
BLOB_CONTENT 80:0
Green - viens no trim primārās krāsas, zaļā tiek uzskatīts diapazontsvetov spektrs ar viļņa
garumu aptuveni 500-565 nanometri. Sistēma CMYK druka zaļā iegūst, sajaucot dzelteno un
Green - viens no trim primārās krāsas, zaļā tiek uzskatīts diapazontsvetov spektrs ar viļņa
garumu aptuveni 500-565 nanometri. Sistēma CMYK druka zaļā iegūst, sajaucot dzelteno un
zilganzaļi (cyan).Dabā, Catalpa - zaļa augs.
Krāsu zaļie augi ir dabiski, ka cilvēks etalonomzeleni.
Zaļā koku varde.
Ir plaši izplatīti dabā. Lielākā daļa augu ir zaļā krāsā, jo tie satur pigmentu fotosintēzes -
hlorofilu (hlorofils absorbē lielu daļu no sarkano stariem saules spektra, atstājot uztveri
atstarotās un filtrē zaļā krāsā). Dzīvnieki ar zaļo krāsu izmantošanu maskēties fona augiem.
"""
Ir plaši izplatīti dabā. Lielākā daļa augu ir zaļā krāsā, jo tie satur pigmentu fotosintēzes -
hlorofilu (hlorofils absorbē lielu daļu no sarkano stariem saules spektra, atstājot uztveri
atstarotās un filtrē zaļā krāsā). Dzīvnieki ar zaļo krāsu izmantošanu maskēties fona augiem.
"""
test_script_1 = temp_file('test-script.sql')
@pytest.mark.version('>=2.5.5')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action, test_script_1: Path):
test_script_1.write_text("""
recreate table "ĄČĘĢÆĖŠŚÖÜØ£"(
"ąčęėįšųūž" varchar(50) character set dos775
,"Õisu ja kariste järved" blob sub_type 1 character set dos775
);
commit;
show table;
show table "ĄČĘĢÆĖŠŚÖÜØ£";
insert into "ĄČĘĢÆĖŠŚÖÜØ£"("ąčęėįšųūž", "Õisu ja kariste järved")
values(
'ÓßŌŃõÕµńĶķĻļņĒŅ',
'Green - viens no trim primārās krāsas, zaļā tiek uzskatīts diapazontsvetov spektrs ar viļņa
garumu aptuveni 500-565 nanometri. Sistēma CMYK druka zaļā iegūst, sajaucot dzelteno un
zilganzaļi (cyan).Dabā, Catalpa - zaļa augs.
Krāsu zaļie augi ir dabiski, ka cilvēks etalonomzeleni.
Zaļā koku varde.
Ir plaši izplatīti dabā. Lielākā daļa augu ir zaļā krāsā, jo tie satur pigmentu fotosintēzes -
hlorofilu (hlorofils absorbē lielu daļu no sarkano stariem saules spektra, atstājot uztveri
atstarotās un filtrē zaļā krāsā). Dzīvnieki ar zaļo krāsu izmantošanu maskēties fona augiem.'
);
set list on;
set blob all;
select "ąčęėįšųūž", "Õisu ja kariste järved" as blob_content
from "ĄČĘĢÆĖŠŚÖÜØ£";
""", encoding='cp775')
act_1.expected_stdout = expected_stdout_1
act_1.isql(switches=['-q', '-b'], input_file=test_script_1, charset='DOS775')
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,23 +2,26 @@
#
# id: bugs.core_4292
# title: Server ignores asynchronous (monitoring or cancellation) requests while preparing a query with lot of windowed functions
# decription:
# decription:
# Preparing stage of test query will last lot of time even on power host.
# We launch separate (child) process with ISQL and allow him to start preparing this query during several
# seconds. Then we launch second child process with ISQL and try to kill first one.
# Before this ticket was fixed it was NOT possible neither to cancel it by using MON$ATTACHMENTS nor even
# query MON$ tables at all (until this 'complex query' preparing finish).
#
#
# Checked on WI-V3.0.0.32081, SS / SC / CS. Result: all fine.
# 11.05.2017: checked on WI-T4.0.0.638 - added filtering to messages about shutdown state, see comments below.
#
#
# tracker_id: CORE-4292
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
import subprocess
import time
from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
# version: 3.0
# resources: None
@ -35,28 +38,28 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# import subprocess
# from subprocess import Popen
# import time
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# db_conn.close()
#
#
# #-----------------------------------
#
#
# def flush_and_close(file_handle):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# os.fsync(file_handle.fileno())
#
#
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -64,9 +67,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# os.remove( f_names_list[i] )
# if os.path.isfile( f_names_list[i]):
# print('ERROR: can not remove file ' + f_names_list[i])
#
#
# #--------------------------------------------
#
#
# heavy_sql = '''
# set bail on;
# set list on;
@ -113,7 +116,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# ) f
# )
# -- select max(r) r from b group by fid having max(r) < 6; -- ok
#
#
# ,c
# as (
# select fid, rnm, rid, iif(rid is null, 1, r) r
@ -152,7 +155,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# ) f
# )
# -- select * from c -- ok
#
#
# ,d
# as (
# select fid, rnm, rid, iif(rid is null, 1, r) r
@ -193,19 +196,19 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# select * from d rows 0;
# set count off;
# select 'WORKER FINISHED TOO FAST! DELAY IN TEST MUST BE REDUCED!' msg from rdb$database;
#
#
# '''
# f_worker_sql=open(os.path.join(context['temp_directory'],'tmp_worker_4292.sql'),'w')
# f_worker_sql.write(heavy_sql)
# flush_and_close( f_worker_sql )
#
#
# f_worker_log=open( os.path.join(context['temp_directory'],'tmp_worker_4292.log'), 'w')
#
#
# # Execute a child program in a new process, redirecting STDERR to the same target as of STDOUT:
# p_worker = Popen([ context['isql_path'], dsn, "-i", f_worker_sql.name], stdout=f_worker_log, stderr=subprocess.STDOUT )
#
#
# time.sleep(4)
#
#
# killer_sql='''
# set list on;
# set count on;
@ -224,7 +227,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# a.mon$attachment_id <> current_connection
# and a.mon$system_flag is distinct from 1
# ;
#
#
# delete from mon$attachments
# where
# mon$attachment_id <> current_connection
@ -237,7 +240,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# mon$system_flag as deleted_mon_sys_flag
# ;
# '''
#
#
# # 11.05.2017, FB 4.0 only!
# # Following messages can appear after 'connection shutdown'
# # (letter from dimitr, 08-may-2017 20:41):
@ -245,66 +248,245 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # isc_att_shut_idle: Idle timeout expired
# # isc_att_shut_db_down: Database is shutdown
# # isc_att_shut_engine: Engine is shutdown
#
#
#
#
# f_killer_sql=open( os.path.join(context['temp_directory'],'tmp_killer_4292.sql'), 'w')
# f_killer_sql.write(killer_sql)
# flush_and_close( f_killer_sql )
#
#
# # Execute a child program in a new process, redirecting STDERR to the same target as of STDOUT:
#
#
# f_killer_log=open( os.path.join(context['temp_directory'],'tmp_killer_4292.log'), 'w')
# p_killer = Popen([context['isql_path'] , dsn, "-i" , f_killer_sql.name], stdout=f_killer_log, stderr=subprocess.STDOUT )
#
#
# # Make delay at least on 6 seconds after that point.
# # Otherwise temp database will not be removed and we get:
# # Exception AttributeError: "'Connection' object has no attribute '_Connection__ic'"
# # in <bound method Connection.__del__ of <fdb.fbcore.Connection object at 0x023E4850>> ignored
# # Test cleanup: Exception raised while dropping database.
#
#
# time.sleep(6)
#
#
# p_worker.terminate()
# flush_and_close( f_worker_log )
#
#
# p_killer.terminate()
# flush_and_close( f_killer_log )
#
#
# with open( f_killer_log.name,'r') as f:
# for line in f:
# if line.split():
# print('KILLER LOG: ' + line)
#
#
# with open( f_worker_log.name,'r') as f:
# for line in f:
# if line.split():
# print('WORKER LOG: ' + line)
#
#
#
#
# # CLEANUP
# #########
# time.sleep(1)
# cleanup( [i.name for i in (f_worker_sql,f_worker_log,f_killer_sql,f_killer_log)] )
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
KILLER LOG: Found worker ? 1
KILLER LOG: Records affected: 1
KILLER LOG: DELETED_MON_ATT_ID 1
KILLER LOG: DELETED_MON_USER SYSDBA
KILLER LOG: DELETED_MON_PROTOCOL tcp
KILLER LOG: DELETED_MON_PROCESS isql
KILLER LOG: DELETED_MON_SYS_FLAG 0
WORKER LOG: Statement failed, SQLSTATE = 08003
WORKER LOG: connection shutdown
"""
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1_killer = """
Found worker ? 1
Records affected: 1
DELETED_MON_ATT_ID 1
DELETED_MON_USER SYSDBA
DELETED_MON_PROTOCOL tcp
DELETED_MON_PROCESS isql
DELETED_MON_SYS_FLAG 0
"""
expected_stdout_1_worker = """
Statement failed, SQLSTATE = 08003
connection shutdown
"""
heavy_script_1 = temp_file('heavy_script.sql')
heavy_output_1 = temp_file('heavy_script.out')
@pytest.mark.version('>=3.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action, heavy_script_1: Path, heavy_output_1: Path):
killer_sql = """
set list on;
set count on;
/*
select a.*, s.*
from mon$attachments a
left join mon$statements s on a.mon$attachment_id = s.mon$attachment_id
where
a.mon$attachment_id <> current_connection
and a.mon$system_flag is distinct from 1
;
*/
select count(*) as "Found worker ?"
from mon$attachments a
where
a.mon$attachment_id <> current_connection
and a.mon$system_flag is distinct from 1
;
delete from mon$attachments
where
mon$attachment_id <> current_connection
and mon$system_flag is distinct from 1
returning
sign(mon$attachment_id) as deleted_mon_att_id,
mon$user as deleted_mon_user,
iif(mon$remote_protocol containing 'tcp', 'tcp', null) as deleted_mon_protocol,
iif(mon$remote_process containing 'isql', 'isql', null) as deleted_mon_process,
mon$system_flag as deleted_mon_sys_flag
;
"""
heavy_script_1.write_text("""
set bail on;
set list on;
set count on;
with a as (
select rf.rdb$field_id fid, rr.rdb$relation_id rid, rr.rdb$relation_name rnm
from rdb$relation_fields rf join rdb$relations rr on rf.rdb$relation_name=rr.rdb$relation_name
)
,b as (
select fid, rnm, rid, iif(rid is null, 1, r) r
from (--f
select fid, rnm, rid,
iif(lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
, r + 1, r) r
from (--e
select fid, rnm, rid,
iif(lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
, r + 1, r) r
from (--d
select fid, rnm, rid,
iif(lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
, r + 1, r) r
from (--c
select fid, rnm, rid,
iif(lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
,r + 1, r) r
from (--b
select fid, rnm, rid,
iif( lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
,r + 1, r) r
from (
select a.*, 1 r
from a
) a
) b
) c
) d
) e
) f
)
-- select max(r) r from b group by fid having max(r) < 6; -- ok
,c
as (
select fid, rnm, rid, iif(rid is null, 1, r) r
from (--f
select fid, rnm, rid,
iif(lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
, r + 1, r) r
from (--e
select fid, rnm, rid,
iif(lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
, r + 1, r) r
from (--d
select fid, rnm, rid,
iif(lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
, r + 1, r) r
from (--c
select fid, rnm, rid,
iif(lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
,r + 1, r) r
from (--b
select fid, rnm, rid,
iif( lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
,r + 1, r) r
from (
select fid, rnm, rid, max(r) over(partition by fid) r from b
) a
) b
) c
) d
) e
) f
)
-- select * from c -- ok
,d
as (
select fid, rnm, rid, iif(rid is null, 1, r) r
from (--f
select fid, rnm, rid,
iif( lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
, r + 1, r) r
from (--e
select fid, rnm, rid,
iif( lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
, r + 1, r) r
from (--d
select fid, rnm, rid,
iif( lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
, r + 1, r) r
from (--c
select fid, rnm, rid,
iif( lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
,r + 1, r) r
from (--b
select fid, rnm, rid,
iif( lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
,r + 1, r) r
from (
select fid, rnm, rid, max(r) over(partition by fid) r from c
) a
) b
) c
) d
) e
) f
)
select * from d rows 0;
set count off;
select 'WORKER FINISHED TOO FAST! DELAY IN TEST MUST BE REDUCED!' msg from rdb$database;
""")
with open(heavy_output_1, mode='w') as heavy_out:
p_heavy_sql = subprocess.Popen([act_1.vars['isql'], '-i', str(heavy_script_1),
'-user', act_1.db.user,
'-password', act_1.db.password, act_1.db.dsn],
stdout=heavy_out, stderr=subprocess.STDOUT)
try:
time.sleep(4)
act_1.expected_stdout = expected_stdout_1_killer
act_1.isql(switches=[], input=killer_sql)
finally:
p_heavy_sql.terminate()
#
assert act_1.clean_stdout == act_1.clean_expected_stdout
# And worker...
act_1.reset()
act_1.expected_stdout = expected_stdout_1_worker
act_1.stdout = heavy_output_1.read_text()
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,9 +2,9 @@
#
# id: bugs.core_4298
# title: fbsvcmgr doesn't recognise sts_record_versions and other sts switches
# decription:
# decription:
# Test creates table and add 5 rows to it. Than we run in child async. process ISQL with EB which has ES/EDS
# and will stay in pause due to update conflict in lock-timeout transaction (Python currently can not operate
# and will stay in pause due to update conflict in lock-timeout transaction (Python currently can not operate
# with several attachments which open cursors with DML - this will crash).
# When EB will be paused, we start another ISQL which will add one row to the same table and finish.
# At this point table will have 5 versions and this should be shown in the output of fbsvcmgr when it is run
@ -18,7 +18,7 @@
# Classic keeps database file opened when hanged ISQL is killed by teminate(), thus leading to access error
# when fbtest tries to remove database by dropping it (get "Windows error (32)"). For that reason we have
# to allow ISQL that stays in pause to finish by Tx timeout expiration and close itself his own process.
#
#
# Checked on (28.10.2019):
# 4.0.0.1635 SS: 7.720s.
# 4.0.0.1633 CS: 7.378s.
@ -26,24 +26,26 @@
# 3.0.5.33178 CS: 6.720s.
# 2.5.9.27119 SS: 6.506s.
# 2.5.9.27146 SC: 5.388s.
#
#
# 13.04.2021. Adapted for run both on Windows and Linux. Checked on:
# Windows: 3.0.8.33445, 4.0.0.2416
# Linux: 3.0.8.33426, 4.0.0.2416
#
#
#
#
# tracker_id: CORE-4298
# min_versions: ['2.5.2']
# versions: 2.5.2
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from firebird.qa import db_factory, python_act, Action
from firebird.driver import SrvStatFlag
# version: 2.5.2
# resources: None
substitutions_1 = [('Average version length: [\\d]+.[\\d]+, total versions: 5, max versions: 1', 'total versions: 5, max versions: 1')]
substitutions_1 = [('Average version length: [\\d]+.[\\d]+, total versions: 5, max versions: 1',
'total versions: 5, max versions: 1')]
init_script_1 = """
recreate table test(id int, x int);
@ -54,7 +56,7 @@ init_script_1 = """
insert into test values(4, 400);
insert into test values(5, 500);
commit;
"""
"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
@ -65,30 +67,30 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# import subprocess
# from subprocess import Popen
# from fdb import services
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# # Obtain engine version:
# engine = str(db_conn.engine_version) # convert to text because 'float' object has no attribute 'startswith'
#
#
# #--------------------------------------------
#
#
# def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -100,12 +102,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# print('type(f_names_list[i])=',type(f_names_list[i]))
# del_name = None
#
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
#
# #--------------------------------------------
#
#
# sqltxt='''
# set term ^;
# execute block as
@ -115,8 +117,8 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# end ^
# set term ;^
# commit;
#
#
#
#
# commit;
# set transaction lock timeout 15;
# update test set x = -x;
@ -126,69 +128,69 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# execute statement 'update test set x = -x'
# on external 'localhost:' || rdb$get_context('SYSTEM','DB_NAME')
# as user '%s' password '%s' role 'TMP$R4298';
# when any do
# begin
# when any do
# begin
# end
# end
# ^
# set term ;^
# rollback;
#
#
# ''' % (user_name, user_password)
#
#
#
#
# f_hanged_sql=open( os.path.join(context['temp_directory'],'tmp_4298_hang.sql'), 'w')
# f_hanged_sql.write(sqltxt)
# flush_and_close( f_hanged_sql )
#
#
# f_hanged_log=open( os.path.join(context['temp_directory'],'tmp_4298_hang.log'), 'w')
#
#
# # Execute a child program in a new process, redirecting STDERR to the same target as of STDOUT:
# p_hang = Popen([context['isql_path'], dsn, "-i" , f_hanged_sql.name], stdout=f_hanged_log, stderr=subprocess.STDOUT)
#
#
# time.sleep(1)
#
#
# runProgram('isql',[dsn],'insert into test(id, x) values(-1, -100); commit;')
#
#
# this_fdb='$(DATABASE_LOCATION)bugs.core_4298.fdb'
#
#
# f_stat_log=open( os.path.join(context['temp_directory'],'tmp_4298_dbstat.log'), 'w')
# f_stat_err=open( os.path.join(context['temp_directory'],'tmp_4298_dbstat.err'), 'w')
#
#
# subprocess.call( [ context['fbsvcmgr_path'],"localhost:service_mgr",
# "action_db_stats", "dbname", this_fdb,
# "action_db_stats", "dbname", this_fdb,
# "sts_record_versions"
# ],
# stdout=f_stat_log,
# stdout=f_stat_log,
# stderr=f_stat_err
# )
#
#
# flush_and_close( f_stat_log )
# flush_and_close( f_stat_err )
#
#
# # do NOT remove this pause: Classic 3.0 keeps database opened even after we kill hanged ISQL by p_hang.terminate().
# # So we have to wait enough for 1st ISQL process that currently will hangs about 4 seconds to be CORRECTLY closed
# # So we have to wait enough for 1st ISQL process that currently will hangs about 4 seconds to be CORRECTLY closed
# # by itself:
#
#
# time.sleep(3)
#
#
# # These kill and close commands are also needed here, despite that corresponding ISQL has been already closed itself.
# # It is so at least for Cs 3.0:
# p_hang.terminate()
# flush_and_close( f_hanged_log )
#
#
# # ERRORS log of obtaining DB statistics should be EMPTY:
# with open( f_stat_err.name,'r') as f:
# for line in f:
# if line.split():
# print("UNEXPECTED STDERR for 'action_db_stats': " + line)
#
#
# # STDOUT of obtaining DB statistics should NOT be EMPTY:
# with open(f_stat_log.name,'r') as f:
# for line in f:
# if 'versions:' in line.lower():
# print(line)
#
#
# #####################################################################
# # 28.10.2019: add full shutdown to forcedly drop all attachments.
# ## ||||||||||||||||||||||||||||
@ -203,9 +205,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# ## This means that one need to kill all connections to prevent from exception on cleanup phase:
# ## SQLCODE: -901 / lock time-out on wait transaction / object <this_test_DB> is in use
# ## #############################################################################################
#
#
# f_shutdown_log=open( os.path.join(context['temp_directory'],'tmp_4298_shutdown.log'), 'w')
#
#
# subprocess.call( [context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_properties", "prp_shutdown_mode", "prp_sm_full", "prp_shutdown_db", "0", "dbname", this_fdb,
# ],
@ -218,34 +220,41 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stdout = f_shutdown_log,
# stderr = subprocess.STDOUT
# )
#
#
# flush_and_close( f_shutdown_log )
#
#
# with open( f_shutdown_log.name,'r') as f:
# for line in f:
# if line.split():
# print( 'UNEXPECTED OUTPUT IN DB SHUTDOWN LOG: ' + (' '.join(line.split()).upper()) )
#
#
#
#
# # Cleanup:
# ##########
# # do NOT remove this pause otherwise some of logs will not be enable for deletion and test will finish with
# # do NOT remove this pause otherwise some of logs will not be enable for deletion and test will finish with
# # Exception raised while executing Python test script. exception: WindowsError: 32
# time.sleep(1)
#
#
# cleanup( (f_hanged_sql, f_hanged_log, f_stat_log, f_stat_err, f_shutdown_log) )
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """
Average version length: 9.00, total versions: 5, max versions: 1
"""
"""
@pytest.mark.version('>=2.5.2')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action):
with act_1.db.connect() as con:
c = con.cursor()
c.execute('update test set x = -x')
con.commit()
act_1.svcmgr(switches=['localhost:service_mgr', 'user', act_1.db.user,
'password', act_1.db.password, 'action_db_stats', 'dbname',
str(act_1.db.db_path), 'sts_record_versions'])
act_1.stdout = '\n'.join([line for line in act_1.stdout.splitlines() if 'versions:' in line.lower()])
act_1.expected_stdout = expected_stdout_1
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,32 +2,34 @@
#
# id: bugs.core_4319
# title: Engine crashes when trace config contains line "connection_id=NN" and we attempt to connect to non-existent database/alias
# decription:
# decription:
# Test receives version of FB (2.5 or 3.0) and prepares text file that will serve as TRACE config.
# This text file will contain trivial content appropriate to FB version with 'connection_id=NNN' key.
#
#
# Than we make async start of trace session using FBTRACEMGR utility and after small pause (~1 sec)
# run ISQL with attempt to make connect to non-existent alias.
#
# Confirmed crash on WI-T3.0.0.30566 (Alpha 1). Note: utility fbsvcmgr can not start when trace config
#
# Confirmed crash on WI-T3.0.0.30566 (Alpha 1). Note: utility fbsvcmgr can not start when trace config
# contains connection_id: standard Windows crash report appears with such data:
# ...
# AppName: fbsvcmgr.exe AppVer: 3.0.0.30566 ModName: msvcr100.dll
# ModVer: 10.0.30319.1 Offset: 0008ae6e
# ...
#
#
# tracker_id: CORE-4319
# min_versions: ['2.5.3']
# versions: 2.5.3
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
import time
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
# version: 2.5.3
# resources: None
substitutions_1 = [('TRACE SESSION ID.*', 'TRACE SESSION ID')]
substitutions_1 = []
init_script_1 = """"""
@ -39,30 +41,30 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# import time
# import subprocess
# from subprocess import Popen
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# # Obtain engine version, 2.5 or 3.0, for make trace config in appropriate format:
# engine=str(db_conn.engine_version)
# db_conn.close()
#
#
# #-----------------------------------
#
#
# def flush_and_close(file_handle):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# os.fsync(file_handle.fileno())
#
#
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -70,25 +72,25 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# os.remove( f_names_list[i] )
# if os.path.isfile( f_names_list[i]):
# print('ERROR: can not remove file ' + f_names_list[i])
#
#
# #--------------------------------------------
#
#
#
#
# txt25 = '''# Trace config, format for 2.5. Generated auto, do not edit!
# <database %[\\\\\\\\/]bugs.core_4319.fdb>
# enabled true
# log_errors true
# time_threshold 0
# time_threshold 0
# connection_id 1234
# log_connections true
# </database>
# '''
#
#
# # NOTES ABOUT TRACE CONFIG FOR 3.0:
# # 1) Header contains `database` clause in different format vs FB 2.5: its data must be enclosed with '{' '}'
# # 2) Name and value must be separated by EQUALITY sign ('=') in FB-3 trace.conf, otherwise we get runtime error:
# # element "<. . .>" have no attribute value set
#
#
# txt30 = '''# Trace config, format for 3.0. Generated auto, do not edit!
# database=%[\\\\\\\\/]bugs.core_4319.fdb
# {
@ -99,70 +101,112 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# log_connections = true
# }
# '''
#
#
# f_trccfg=open( os.path.join(context['temp_directory'],'tmp_trace_4319.cfg'), 'w')
# if engine.startswith('2.5'):
# f_trccfg.write(txt25)
# else:
# f_trccfg.write(txt30)
# flush_and_close( f_trccfg )
#
#
# #####################################################
# # Starting trace session in new child process (async.):
# # Execute a child program in a new process, redirecting STDERR to the same target as of STDOUT:
#
#
# f_trclog=open( os.path.join(context['temp_directory'],'tmp_trace_4319.log'), 'w')
# p_trace=Popen([ context['fbtracemgr_path'],
# "-se", "localhost:service_mgr",
# "-sta", "-c", f_trccfg.name],
# stdout=f_trclog, stderr=subprocess.STDOUT)
#
#
# # Wait! Trace session is initialized not instantly!
# time.sleep(1)
#
#
#
#
# f_sqltxt=open( os.path.join(context['temp_directory'],'tmp_connect_4319.sql'), 'w')
# f_sqltxt.write("connect 'localhost:some_non_existent' user 'SYSDBA' password 'masterkey'; show database; show version;")
# flush_and_close( f_sqltxt )
#
#
# f_sqllog=open( os.path.join(context['temp_directory'],'tmp_connect_4319.log'), 'w')
# p_isql = subprocess.call([ context['isql_path'], dsn, "-i", f_sqltxt.name ],
# stdout=f_sqllog, stderr=subprocess.STDOUT)
# flush_and_close( f_sqllog )
#
#
# # Terminate child process of launched trace session:
#
#
# time.sleep(1)
#
#
# p_trace.terminate()
# flush_and_close( f_trclog )
#
#
# with open( f_trclog.name,'r') as f:
# for line in f:
# print(line.upper())
#
#
# with open( f_sqllog.name,'r') as f:
# for line in f:
# if 'SQLSTATE = 08001' in line:
# print(line.upper())
#
#
# # CLEANUP
# ##########
# time.sleep(1)
# cleanup( [i.name for i in (f_sqltxt, f_sqllog, f_trccfg, f_trclog)] )
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
TRACE SESSION ID STARTED
STATEMENT FAILED, SQLSTATE = 08001
"""
act_1 = python_act('db_1', substitutions=substitutions_1)
@pytest.mark.version('>=2.5.3')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
#expected_stdout_1 = """
#TRACE SESSION ID STARTED
#STATEMENT FAILED, SQLSTATE = 08001
#"""
expected_stderr_1 = """
Rolling back work.
Statement failed, SQLSTATE = 08001
I/O error during "open" operation for file "some_non_existent"
-Error while trying to open file
-No such file or directory
Use CONNECT or CREATE DATABASE to specify a database
Command error: show database
Cannot get server version without database connection
"""
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' time_threshold = 0',
' log_errors = true',
' connection_id = 1234',
' log_connections = true',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line.upper())
@pytest.mark.version('>=3.0')
def test_1(act_1: Action, capsys):
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
try:
act_1.expected_stderr = expected_stderr_1
act_1.isql(switches=['-n'],
input="connect 'localhost:some_non_existent' user 'SYSDBA' password 'masterkey'; show database; show version;")
time.sleep(2)
finally:
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
# check that we are still kicking and got expected result from isql
assert act_1.clean_stderr == act_1.clean_expected_stderr

View File

@ -2,29 +2,29 @@
#
# id: bugs.core_4337
# title: gfix -sweep makes "reconnect" when it is removed from mon$attachments by delete command (issued in another window)
# decription:
# We create table with very long char field and fill it with uuid data.
# decription:
# We create table with very long char field and fill it with uuid data.
# Then we create index for this field and finally - delete all rows.
# Such table will require valuable time to be swept, about 4 seconds.
#
#
# After this, we launch asynchronously ISQL which makes delay ~2 seconds inside itself.
# Also, we launch trace for logging.
#
#
# After this we run 'gfix -sweep' in another process (synchronously).
# When delay in ISQL expires, ISQL connection executes 'DELETE FROM MON$ATTACHMENT' command
# which should kill gfix connection. This command is issued with 'set count on' for additional
# check that isql really could find this record in momn$attachments table.
#
# When delay in ISQL expires, ISQL connection executes 'DELETE FROM MON$ATTACHMENT' command
# which should kill gfix connection. This command is issued with 'set count on' for additional
# check that isql really could find this record in momn$attachments table.
#
# Process of GFIX should raise error 'connection shutdown'.
# Trace log should contain line with text 'SWEEP_FAILED' and after this line we should NOT
# discover any lines with 'ATTACH_DATABASE' event - this is especially verified.
#
# Finally, we compare content of firebird.log: new lines in it should contain messages about
#
# Finally, we compare content of firebird.log: new lines in it should contain messages about
# interrupted sweep ('error during sweep' and 'connection shutdown').
#
#
# Checked on WI-V3.0.2.32620 (SS/SC/CS), 4.0.0.420 (SS/SC).
# Total time of test run is ~25 seconds (data filling occupies about 3 seconds).
#
#
# 11.05.2017: checked on WI-T4.0.0.638 - added filtering to messages about shutdown state, see comments below.
# 26.09.2017: adjusted expected_stdout section to current FB 3.0+ versions: firebird.log now does contain info
# about DB name which was swept (see CORE-5610, "Provide info about database (or alias) which was in use...")
@ -33,19 +33,41 @@
# 30SS, build 3.0.3.32805: OK, 21.063s.
# 40CS, build 4.0.0.748: OK, 31.313s.
# 40SS, build 4.0.0.748: OK, 22.578s.
#
#
# [pcisar] 19.11.2021
# Small difference in reimplementation of sweep killer script
# On v4.0.0.2496 COMMIT after delete from mon#attachments fails with:
# STATEMENT FAILED, SQLSTATE = HY008, OPERATION WAS CANCELLED
# Without commit this test PASSES, i.e. sweep is terminated with all outputs as expected
#
# tracker_id: CORE-4337
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
import re
import time
import subprocess
from difflib import unified_diff
from threading import Thread, Barrier
from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
from firebird.driver import DbWriteMode
# version: 3.0
# resources: None
substitutions_1 = [('[ ]+', ' '), ('TRACE_LOG:.* SWEEP_START', 'TRACE_LOG: SWEEP_START'), ('TRACE_LOG:.* SWEEP_FAILED', 'TRACE_LOG: SWEEP_FAILED'), ('TRACE_LOG:.* ERROR AT JPROVIDER::ATTACHDATABASE', 'TRACE_LOG: ERROR AT JPROVIDER::ATTACHDATABASE'), ('.*KILLED BY DATABASE ADMINISTRATOR.*', ''), ('TRACE_LOG:.*GFIX.EXE.*', 'TRACE_LOG: GFIX.EXE'), ('OIT [0-9]+', 'OIT'), ('OAT [0-9]+', 'OAT'), ('OST [0-9]+', 'OST'), ('NEXT [0-9]+', 'NEXT'), ('FIREBIRD.LOG:.* ERROR DURING SWEEP OF .*BUGS.CORE_4337.FDB.*', 'ERROR DURING SWEEP OF BUGS.CORE_4337.FDB')]
substitutions_1 = [('[ ]+', ' '), ('TRACE_LOG:.* SWEEP_START', 'TRACE_LOG: SWEEP_START'),
('TRACE_LOG:.* SWEEP_FAILED', 'TRACE_LOG: SWEEP_FAILED'),
('TRACE_LOG:.* ERROR AT JPROVIDER::ATTACHDATABASE',
'TRACE_LOG: ERROR AT JPROVIDER::ATTACHDATABASE'),
('.*KILLED BY DATABASE ADMINISTRATOR.*', ''),
('TRACE_LOG:.*GFIX.EXE.*', 'TRACE_LOG: GFIX.EXE'),
('OIT [0-9]+', 'OIT'), ('OAT [0-9]+', 'OAT'), ('OST [0-9]+', 'OST'),
('NEXT [0-9]+', 'NEXT'),
('FIREBIRD.LOG:.* ERROR DURING SWEEP OF .*TEST.FDB.*',
'FIREBIRD.LOG: + ERROR DURING SWEEP OF TEST.FDB')]
init_script_1 = """"""
@ -59,29 +81,29 @@ db_1 = db_factory(page_size=16384, sql_dialect=3, init=init_script_1)
# from subprocess import Popen
# import difflib
# import re
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
# db_file=db_conn.database_name
# engine=str(db_conn.engine_version)
# db_conn.close()
#
#
# #-----------------------------------
#
#
# def flush_and_close(file_handle):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# os.fsync(file_handle.fileno())
#
#
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -89,18 +111,18 @@ db_1 = db_factory(page_size=16384, sql_dialect=3, init=init_script_1)
# os.remove( f_names_list[i] )
# if os.path.isfile( f_names_list[i]):
# print('ERROR: can not remove file ' + f_names_list[i])
#
#
# #--------------------------------------------
#
#
# def svc_get_fb_log( engine, f_fb_log ):
#
#
# global subprocess
#
#
# if engine.startswith('2.5'):
# get_firebird_log_key='action_get_ib_log'
# else:
# get_firebird_log_key='action_get_fb_log'
#
#
# subprocess.call([ context['fbsvcmgr_path'],
# "localhost:service_mgr",
# get_firebird_log_key
@ -108,12 +130,12 @@ db_1 = db_factory(page_size=16384, sql_dialect=3, init=init_script_1)
# stdout=f_fb_log, stderr=subprocess.STDOUT
# )
# return
#
#
# #--------------------------------------------
#
#
# # Change FW to OFF in order to speed up initial data filling:
# ##################
#
#
# fn_nul = open(os.devnull, 'w')
# subprocess.call([ context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_properties", "prp_write_mode", "prp_wm_async",
@ -122,9 +144,9 @@ db_1 = db_factory(page_size=16384, sql_dialect=3, init=init_script_1)
# stderr = subprocess.STDOUT
# )
# fn_nul.close()
#
#
# f_work_sql=open( os.path.join(context['temp_directory'],'tmp_work_4337.sql'), 'w')
#
#
# sql_dml='''
# set list on;
# select current_time from rdb$database;
@ -140,17 +162,17 @@ db_1 = db_factory(page_size=16384, sql_dialect=3, init=init_script_1)
# join rdb$fields f on rf.rdb$field_source=f.rdb$field_name
# where rf.rdb$relation_name=upper('t')
# into w;
#
#
# while (n>0) do
# insert into t(s01) values( rpad('', :w, uuid_to_char(gen_uuid())) ) returning :n-1 into n;
# end^
# set term ;^
# commit;
# select count(*) check_total_cnt, min(char_length(s01)) check_min_length from t;
#
#
# create index t_s01 on t(s01);
# commit;
#
#
# delete from t;
# commit;
# -- overall time for data filling , create index and delete all rows: ~ 3 seconds.
@ -159,28 +181,28 @@ db_1 = db_factory(page_size=16384, sql_dialect=3, init=init_script_1)
# --show database;
# quit;
# '''
#
#
# f_work_sql.write(sql_dml)
# flush_and_close( f_work_sql )
#
#
# f_work_log=open( os.path.join(context['temp_directory'],'tmp_work_4337.log'), 'w')
# f_work_err=open( os.path.join(context['temp_directory'],'tmp_work_4337.err'), 'w')
# subprocess.call( [context['isql_path'], dsn, "-i", f_work_sql.name],
# stdout = f_work_log,
# stderr = f_work_err
# )
#
#
# flush_and_close( f_work_log )
# flush_and_close( f_work_err )
#
#
# # REDUCE number of cache buffers in DB header in order to sweep make its work as long as possible
# ################################
# '''
#
#
# temply disabled, see CORE-5385:
# "Attempt to change number of buffers in DB header leads to crash (either using gfix -b ... or fbsvcmgr prp_page_buffers ... ). Only 4.0 SuperServer is affected."
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#
# fn_nul = open(os.devnull, 'w')
# subprocess.call([ context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_properties", "prp_page_buffers", "100",
@ -190,11 +212,11 @@ db_1 = db_factory(page_size=16384, sql_dialect=3, init=init_script_1)
# )
# fn_nul.close()
# '''
#
#
#
#
# # Change FW to ON (in order to make sweep life harder :))
# ##################
#
#
# fn_nul = open(os.devnull, 'w')
# subprocess.call([ context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_properties", "prp_write_mode", "prp_wm_sync",
@ -203,12 +225,12 @@ db_1 = db_factory(page_size=16384, sql_dialect=3, init=init_script_1)
# stderr = subprocess.STDOUT
# )
# fn_nul.close()
#
#
# f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_4337_fblog_before.txt'), 'w')
# svc_get_fb_log( engine, f_fblog_before )
# flush_and_close( f_fblog_before )
#
#
#
#
# trc30_cfg = '''# Trace config, format for 3.0. Generated auto, do not edit!
# database=%[\\\\\\\\/]bugs.core_4337.fdb
# {
@ -223,39 +245,39 @@ db_1 = db_factory(page_size=16384, sql_dialect=3, init=init_script_1)
# log_service_query = true
# }
# '''
#
#
# f_trccfg=open( os.path.join(context['temp_directory'],'tmp_trace_4337.cfg'), 'w')
# f_trccfg.write(trc30_cfg)
# flush_and_close( f_trccfg )
#
#
# # Starting trace session in new child process (async.):
# #######################################################
#
#
# f_trclog=open( os.path.join(context['temp_directory'],'tmp_trace_4337.log'), 'w')
# f_trcerr=open( os.path.join(context['temp_directory'],'tmp_trace_4337.err'), 'w')
#
#
# p_trace=Popen([context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_trace_start",
# "trc_cfg", f_trccfg.name],
# stdout=f_trclog,
# stdout=f_trclog,
# stderr=f_trcerr
# )
#
#
# # Wait! Trace session is initialized not instantly!
# time.sleep(1)
#
#
#
#
# # Launch (async.) ISQL which will make small delay and then kill GFIX attachment:
# ######################
#
#
# sql_mon='''
# set list on;
#
#
# recreate table tmp4wait(id int);
# commit;
# insert into tmp4wait(id) values(1);
# commit;
#
#
# set transaction lock timeout 2; ------------------ D E L A Y
# update tmp4wait set id=id;
# select 'Waiting for GFIX start SWEEP' as " " from rdb$database;
@ -265,8 +287,8 @@ db_1 = db_factory(page_size=16384, sql_dialect=3, init=init_script_1)
# in autonomous transaction do
# begin
# update tmp4wait set id=id;
# when any do
# begin
# when any do
# begin
# -- NOP --
# end
# end
@ -284,23 +306,23 @@ db_1 = db_factory(page_size=16384, sql_dialect=3, init=init_script_1)
# set count off;
# select 'Finished deleting GFIX process from mon$attachments' as " " from rdb$database;
# '''
#
#
# f_wait_sql=open( os.path.join(context['temp_directory'],'tmp_wait_4337.sql'), 'w')
# f_wait_sql.write(sql_mon)
# flush_and_close( f_wait_sql )
#
#
# f_wait_log=open( os.path.join(context['temp_directory'],'tmp_wait_4337.log'), 'w')
# f_wait_err=open( os.path.join(context['temp_directory'],'tmp_wait_4337.err'), 'w')
# p_isql = subprocess.Popen( [ context['isql_path'], dsn, "-i", f_wait_sql.name],
# stdout = f_wait_log,
# stderr = f_wait_err
# )
#
#
#
#
# # Launch GFIX -SWEEP (sync.). It should be killed by ISQL which we have launched previously
# # after delay in its script will expire:
# ########################################
#
#
# f_gfix_log=open( os.path.join(context['temp_directory'],'tmp_gfix_4337.log'), 'w')
# f_gfix_log.write('Point before GFIX -SWEEP.'+os.linesep)
# f_gfix_log.seek(0,2)
@ -308,35 +330,35 @@ db_1 = db_factory(page_size=16384, sql_dialect=3, init=init_script_1)
# stdout = f_gfix_log,
# stderr = subprocess.STDOUT
# )
#
#
#
#
# f_gfix_log.seek(0,2)
# f_gfix_log.write('Point after GFIX -SWEEP.')
#
#
# # Small delay to be sure that ISQL was successfully completed.
# ##############
#
#
# time.sleep(2)
#
#
# p_isql.terminate()
#
#
# flush_and_close( f_wait_log )
# flush_and_close( f_wait_err )
# flush_and_close( f_gfix_log )
#
#
#
#
# #####################################################
# # Getting ID of launched trace session and STOP it:
#
#
# # Save active trace session info into file for further parsing it and obtain session_id back (for stop):
# f_trclst=open( os.path.join(context['temp_directory'],'tmp_trace_4337.lst'), 'w')
#
#
# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_trace_list"],
# stdout=f_trclst, stderr=subprocess.STDOUT
# )
# f_trclst.close()
#
#
# trcssn=0
# with open( f_trclst.name,'r') as f:
# for line in f:
@ -347,7 +369,7 @@ db_1 = db_factory(page_size=16384, sql_dialect=3, init=init_script_1)
# trcssn=word
# i=i+1
# break
#
#
# # Result: `trcssn` is ID of active trace session. Now we have to terminate it:
# f_trclst=open(f_trclst.name,'a')
# f_trclst.seek(0,2)
@ -357,17 +379,17 @@ db_1 = db_factory(page_size=16384, sql_dialect=3, init=init_script_1)
# stdout=f_trclst, stderr=subprocess.STDOUT
# )
# flush_and_close( f_trclst )
#
#
# p_trace.terminate()
# flush_and_close( f_trclog )
# flush_and_close( f_trcerr )
#
#
# # Make DB shutdown and bring online because some internal server process still can be active!
# # If we skip this step than runtime error related to dropping test DB can occur!
# #########################################
#
#
# f_db_reset_log=open( os.path.join(context['temp_directory'],'tmp_reset_4337.log'), 'w')
#
#
# f_db_reset_log.write('Point before DB shutdown.'+os.linesep)
# f_db_reset_log.seek(0,2)
# subprocess.call( [context['fbsvcmgr_path'], "localhost:service_mgr",
@ -378,7 +400,7 @@ db_1 = db_factory(page_size=16384, sql_dialect=3, init=init_script_1)
# stderr = subprocess.STDOUT
# )
# f_db_reset_log.write(os.linesep+'Point after DB shutdown.'+os.linesep)
#
#
# subprocess.call( [context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_properties", "prp_db_online",
# "dbname", db_file,
@ -386,49 +408,49 @@ db_1 = db_factory(page_size=16384, sql_dialect=3, init=init_script_1)
# stdout = f_db_reset_log,
# stderr = subprocess.STDOUT
# )
#
#
# f_db_reset_log.write(os.linesep+'Point after DB online.'+os.linesep)
# flush_and_close( f_db_reset_log )
#
#
#
#
# # Get content of firebird.log AFTER test finish.
# #############################
#
#
# f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_4337_fblog_after.txt'), 'w')
# svc_get_fb_log( engine, f_fblog_after )
# flush_and_close( f_fblog_after )
#
#
# # _!_!_!_!_!_!_!_!_!_! do NOT reduce this delay: firebird.log get new messages NOT instantly !_!_!_!_!_!_!_!_
# # Currently firebird.log can stay with OLD content if heavy concurrent workload exists on the same host!
# # ??? DISABLED 18.02.2021, BUT STILL IN DOUBT... time.sleep(5)
#
#
# # Compare firebird.log versions BEFORE and AFTER this test:
# ######################
#
#
# oldfb=open(f_fblog_before.name, 'r')
# newfb=open(f_fblog_after.name, 'r')
#
#
# difftext = ''.join(difflib.unified_diff(
# oldfb.readlines(),
# oldfb.readlines(),
# newfb.readlines()
# ))
# oldfb.close()
# newfb.close()
#
#
# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_4337_diff.txt'), 'w')
# f_diff_txt.write(difftext)
# flush_and_close( f_diff_txt )
#
#
# # Check logs:
# #############
#
#
# # 1. Log of GFIX -SWEEP should contain:
# '''
# Point before GFIX -SWEEP.
# connection shutdown
# Point after GFIX -SWEEP.
# '''
#
#
# # 11.05.2017, FB 4.0 only!
# # Following messages can appear after 'connection shutdown'
# # (letter from dimitr, 08-may-2017 20:41):
@ -436,13 +458,13 @@ db_1 = db_factory(page_size=16384, sql_dialect=3, init=init_script_1)
# # isc_att_shut_idle: Idle timeout expired
# # isc_att_shut_db_down: Database is shutdown
# # isc_att_shut_engine: Engine is shutdown
#
#
# with open( f_gfix_log.name,'r') as f:
# for line in f:
# if line.strip():
# print( line.upper() )
#
#
#
#
# # 2. Log of ISQL that was launched to kill 'GFIX -SWEEP' attachment should contain:
# '''
# MSG Waiting for GFIX start SWEEP
@ -454,8 +476,8 @@ db_1 = db_factory(page_size=16384, sql_dialect=3, init=init_script_1)
# for line in f:
# if line.strip():
# print( 'ISQL LOG: ' + line.upper() )
#
#
#
#
# # 3. Log of TRACE should contain 'SWEEP_FAILED' only 'DETACH_DATABASE' event after this line:
# '''
# 2016-10-26T21:49:06.4040 (2184:00D33200) SWEEP_FAILED
@ -463,39 +485,39 @@ db_1 = db_factory(page_size=16384, sql_dialect=3, init=init_script_1)
# C:\\MIX\\Firebird
# b40\\gfix.exe:4700
# 2061 ms, 17273 read(s), 781 write(s), 538838 fetch(es), 146541 mark(s)
#
#
# 2016-10-26T21:49:06.4040 (2184:00D33200) ERROR AT JProvider::attachDatabase
# C:\\MIX\\FIREBIRD\\QA\\FBT-REPO\\TMP\\BUGS.CORE_4337.FDB (ATT_16, SYSDBA:NONE, NONE, TCPv4:127.0.0.1/1406)
# C:\\MIX\\Firebird
# b40\\gfix.exe:4700
# 335544856 : connection shutdown
#
#
# 2016-10-26T21:49:06.4040 (2184:00D33200) DETACH_DATABASE
# C:\\MIX\\FIREBIRD\\QA\\FBT-REPO\\TMP\\BUGS.CORE_4337.FDB (ATT_16, SYSDBA:NONE, NONE, TCPv4:127.0.0.1/1406)
# C:\\MIX\\Firebird
# b40\\gfix.exe:4700
# '''
#
#
# found_sweep_failed=0
# with open( f_trclog.name,'r') as f:
# for line in f:
# if 'SWEEP_FAILED' in line:
# print( 'TRACE_LOG:' + (' '.join(line.split()).upper()) )
# found_sweep_failed = 1
#
#
# if found_sweep_failed == 1 and ('ATTACH_DATABASE' in line):
# print( 'TRACE: ATTACH DETECTED AFTER SWEEP FAILED! ' )
# print( 'TRACE_LOG:' + (' '.join(line.split()).upper()) )
#
#
#
#
# # Output should contain only ONE message with 'SWEEP_FAILED', and NO any rows related to ATTACH_DATABASE.
#
#
# # 4. Difference between old and current firebird.log should be like this:
# '''
# +
# +CSPROG Wed Oct 26 21:49:04 2016
# + Sweep is started by SYSDBA
# + Database "C:\\MIX\\FIREBIRD\\QA\\FBT-REPO\\TMP\\BUGS.CORE_4337.FDB"
# + Database "C:\\MIX\\FIREBIRD\\QA\\FBT-REPO\\TMP\\BUGS.CORE_4337.FDB"
# + OIT 21, OAT 22, OST 19, Next 25
# +
# +
@ -503,44 +525,78 @@ db_1 = db_factory(page_size=16384, sql_dialect=3, init=init_script_1)
# + Error during sweep:
# + connection shutdown
# '''
#
#
# # 5. Log of fbsvcmgr when it did shutdown DB and bring it back online should be EMPTY.
# # NB: difflib.unified_diff() can show line(s) that present in both files, without marking that line(s) with "+".
# # NB: difflib.unified_diff() can show line(s) that present in both files, without marking that line(s) with "+".
# # Usually these are 1-2 lines that placed just BEFORE difference starts.
# # So we have to check output before display diff content: lines that are really differ must start with "+".
#
#
# pattern = re.compile("\\+[\\s]+OIT[ ]+[0-9]+,[\\s]*OAT[\\s]+[0-9]+,[\\s]*OST[\\s]+[0-9]+,[\\s]*NEXT[\\s]+[0-9]+")
# # OIT 160, OAT 159, OST 159, Next 161
#
#
# with open( f_diff_txt.name,'r') as f:
# for line in f:
# if line.startswith('+'):
# if 'sweep'.upper() in line.upper() or 'connection'.upper() in line.upper() or pattern.match(line.upper()):
# print( 'FIREBIRD.LOG: ' + (' '.join(line.split()).upper()) )
#
#
# # 6. ERROR logs of ISQL for initial data filling should be EMPTY:
# with open( f_work_err.name,'r') as f:
# for line in f:
# print('UNEXPECTED ERROR IN ISQL WHEN DID INITIAL DATA FILLING: ' + line)
#
#
# # 7. ERROR logs of ISQL for initial data filling should be EMPTY:
# with open( f_wait_err.name,'r') as f:
# for line in f:
# print('UNEXPECTED ERROR IN ISQL WHEN MADE DELAY AND WAITING: ' + line)
#
#
#
#
# # Cleanup.
# ###############################
# time.sleep(1)
# cleanup( [i.name for i in (f_work_sql,f_work_log,f_work_err,f_fblog_before,f_trccfg,f_trclog,f_trcerr,f_wait_sql,f_wait_log,f_wait_err,f_gfix_log,f_trclst,f_db_reset_log,f_fblog_after,f_diff_txt) ] )
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
make_garbage_1 = """
set list on;
select current_time from rdb$database;
recreate table t(s01 varchar(4000));
commit;
set term ^;
execute block as
declare n int = 20000;
declare w int;
begin
select f.rdb$field_length
from rdb$relation_fields rf
join rdb$fields f on rf.rdb$field_source=f.rdb$field_name
where rf.rdb$relation_name=upper('t')
into w;
while (n>0) do
insert into t(s01) values( rpad('', :w, uuid_to_char(gen_uuid())) ) returning :n-1 into n;
end^
set term ;^
commit;
select count(*) check_total_cnt, min(char_length(s01)) check_min_length from t;
create index t_s01 on t(s01);
commit;
delete from t;
commit;
-- overall time for data filling , create index and delete all rows: ~ 3 seconds.
-- This database requires about 4 seconds to be swept (checked on P-IV 3.0 GHz).
select current_time from rdb$database;
--show database;
quit;
"""
expected_stdout_1 = """
POINT BEFORE GFIX -SWEEP.
CONNECTION SHUTDOWN
POINT AFTER GFIX -SWEEP.
ISQL LOG: WAITING FOR GFIX START SWEEP
ISQL LOG: STARTING TO DELETE GFIX PROCESS FROM MON$ATTACHMENTS
ISQL LOG: RECORDS AFFECTED: 1
@ -548,13 +604,149 @@ expected_stdout_1 = """
TRACE_LOG: SWEEP_FAILED
FIREBIRD.LOG: + SWEEP IS STARTED BY SYSDBA
FIREBIRD.LOG: + OIT, OAT, OST, NEXT
FIREBIRD.LOG: + ERROR DURING SWEEP OF BUGS.CORE_4337.FDB
FIREBIRD.LOG: + ERROR DURING SWEEP OF TEST.FDB
FIREBIRD.LOG: + CONNECTION SHUTDOWN
"""
"""
sweep_killer_script_1 = temp_file('killer.sql')
sweep_killer_out_1 = temp_file('killer.out')
sweep_killer_err_1 = temp_file('killer.err')
sweep_out_1 = temp_file('sweep.out')
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' time_threshold = 0',
' log_errors = true',
' log_sweep = true',
' log_connections = true',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line.upper())
@pytest.mark.version('>=3.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action, capsys, sweep_killer_script_1: Path, sweep_killer_out_1: Path,
sweep_killer_err_1: Path, sweep_out_1: Path):
sweep_killer_script_1.write_text("""
set list on;
recreate table tmp4wait(id int);
commit;
insert into tmp4wait(id) values(1);
commit;
set transaction lock timeout 2; ------------------ D E L A Y
update tmp4wait set id=id;
select 'Waiting for GFIX start SWEEP' as " " from rdb$database;
set term ^;
execute block as
begin
in autonomous transaction do
begin
update tmp4wait set id=id;
when any do
begin
-- NOP --
end
end
end
^
set term ;^
commit;
--select MON$PAGE_BUFFERS from mon$database;
select 'Starting to delete GFIX process from mon$attachments' as " " from rdb$database;
set count on;
delete from mon$attachments where mon$remote_process containing 'gfix';
-- On v4.0.0.2496 COMMIT fails with: STATEMENT FAILED, SQLSTATE = HY008, OPERATION WAS CANCELLED
-- Without commit this test PASSES, i.e. sweep is terminated with all outputs as expected
-- commit;
set count off;
select 'Finished deleting GFIX process from mon$attachments' as " " from rdb$database;
""")
with act_1.connect_server() as srv:
# get firebird log before action
srv.info.get_log()
log_before = srv.readlines()
# Change FW to OFF in order to speed up initial data filling
srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.ASYNC)
# make garbage
act_1.isql(switches=[], input=make_garbage_1)
# REDUCE number of cache buffers in DB header in order to sweep make its work as long as possible
srv.database.set_default_cache_size(database=str(act_1.db.db_path), size=100)
# Change FW to ON (in order to make sweep life harder :))
srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.SYNC)
# Start trace
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
try:
# Launch (async.) ISQL which will make small delay and then kill GFIX attachment
with open(sweep_killer_out_1, 'w') as killer_out, \
open(sweep_killer_err_1, 'w') as killer_err:
p_killer = subprocess.Popen([act_1.vars['isql'],
'-i', str(sweep_killer_script_1),
'-user', act_1.db.user,
'-password', act_1.db.password, act_1.db.dsn],
stdout=killer_out, stderr=killer_err)
try:
time.sleep(2)
# Launch GFIX -SWEEP (sync.). It should be killed by ISQL which we have
# launched previously after delay in its script will expire:
act_1.expected_stderr = 'We expect errors'
act_1.gfix(switches=['-sweep', act_1.db.dsn])
gfix_out = act_1.stdout
gfix_err = act_1.stderr
finally:
p_killer.terminate()
time.sleep(2)
finally:
# stop trace
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
#
trace_log = capsys.readouterr().out
# get firebird log after action
with act_1.connect_server() as srv:
srv.info.get_log()
log_after = srv.readlines()
# construct final stdout for checks
print(gfix_out.upper())
print(gfix_err.upper())
# sweep filler output
for line in sweep_killer_out_1.read_text().splitlines():
if line:
print('ISQL LOG:', line.upper())
for line in sweep_killer_err_1.read_text().splitlines():
if line:
print('ISQL ERR:', line.upper())
# Trace log
found_sweep_failed = 0
for line in trace_log.splitlines():
if 'SWEEP_FAILED' in line:
print('TRACE_LOG:' + (' '.join(line.split()).upper()))
found_sweep_failed = 1
if found_sweep_failed == 1 and ('ATTACH_DATABASE' in line):
print('TRACE: ATTACH DETECTED AFTER SWEEP FAILED! ')
print('TRACE_LOG:' + (' '.join(line.split()).upper()))
#
pattern = re.compile("\\+[\\s]+OIT[ ]+[0-9]+,[\\s]*OAT[\\s]+[0-9]+,[\\s]*OST[\\s]+[0-9]+,[\\s]*NEXT[\\s]+[0-9]+")
for line in unified_diff(log_before, log_after):
if line.startswith('+'):
line = line.upper()
if 'SWEEP' in line or 'CONNECTION' in line or pattern.match(line):
print( 'FIREBIRD.LOG: ' + (' '.join(line.split())) )
#
# check
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
assert act_1.clean_stdout == act_1.clean_expected_stdout