6
0
mirror of https://github.com/FirebirdSQL/firebird-qa.git synced 2025-01-22 13:33:07 +01:00

More pytyhon tests

This commit is contained in:
Pavel Císař 2021-11-17 19:43:06 +01:00
parent 7822a79624
commit ea95f54d07
21 changed files with 1198 additions and 733 deletions

View File

@ -40,7 +40,7 @@
# qmid: None
import pytest
from threading import Thread
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
# version: 2.5
@ -264,7 +264,7 @@ test_script_1 = """
set term ;^
"""
def trace_session(act: Action):
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
@ -280,13 +280,16 @@ def trace_session(act: Action):
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line)
@pytest.mark.version('>=3.0')
def test_1(act_1: Action, capsys):
trace_thread = Thread(target=trace_session, args=[act_1])
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
act_1.isql(switches=['-n'], input=test_script_1)
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):

View File

@ -16,7 +16,7 @@
# qmid: None
import pytest
from threading import Thread
from threading import Thread, Barrier
from difflib import unified_diff
from firebird.qa import db_factory, python_act, Action
@ -482,7 +482,7 @@ select '*Лев Николаевич Толстой *
' from rdb$database;
"""
def trace_session(act: Action):
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
@ -515,17 +515,20 @@ def trace_session(act: Action):
with act.connect_server() as srv:
srv.encoding = 'utf8'
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
pass # we are not interested in trace output
@pytest.mark.version('>=2.5.1')
def test_1(act_1: Action):
b = Barrier(2)
# Get content of firebird.log BEFORE test
with act_1.connect_server() as srv:
srv.info.get_log()
log_before = srv.readlines()
trace_thread = Thread(target=trace_session, args=[act_1])
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
# RUN QUERY WITH NON-ASCII CHARACTERS
act_1.isql(switches=['-n', '-q'], input=test_script_1)
with act_1.connect_server() as srv:

View File

@ -20,7 +20,7 @@
# qmid: None
import pytest
from threading import Thread
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
# version: 2.5
@ -188,7 +188,7 @@ expected_stdout_1 = """
SYSDBA:NONE, ISO88591, TCP
"""
def trace_session(act: Action):
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
@ -198,13 +198,16 @@ def trace_session(act: Action):
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line.upper())
@pytest.mark.version('>=3.0')
def test_1(act_1: Action, capsys):
trace_thread = Thread(target=trace_session, args=[act_1])
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
# make two connections with different charset
with act_1.db.connect(charset='utf8'):
pass

View File

@ -2,14 +2,14 @@
#
# id: bugs.core_3231
# title: OVERLAY() fails when used with text BLOBs containing multi-byte chars
# decription:
# decription:
# tracker_id: CORE-3231
# min_versions: ['2.1.5']
# versions: 2.1.5
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from firebird.qa import db_factory, python_act, Action
# version: 2.1.5
# resources: None
@ -48,12 +48,16 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
# else:
# pass
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
@pytest.mark.version('>=2.1.5')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
act_1 = python_act('db_1', substitutions=substitutions_1)
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
with act_1.db.connect() as con:
c = con.cursor()
# Test non multi-bytes
c.execute("with q(s) as (select cast('abcdefghijklmno' as blob sub_type 1 character set utf8) from rdb$database) select overlay (s placing cast('0123456789' as blob sub_type 1 character set utf8) from 5) from q")
# Test UTF8
c.execute("with q(s) as (select cast('abcdefghijklmno' as blob sub_type 1 character set utf8) from rdb$database) select overlay (s placing cast(_iso8859_1 'áé' as blob sub_type 1 character set utf8) from 5) from q")
# Test ISO8859_1
c.execute("with q(s) as (select cast('abcdefghijklmno' as blob sub_type 1 character set utf8) from rdb$database) select overlay (s placing cast(_iso8859_1 'áé' as blob sub_type 1 character set iso8859_1) from 5) from q")

View File

@ -2,26 +2,26 @@
#
# id: bugs.core_3323
# title: Ability to cancel waiting in lock manager
# decription:
# decription:
# Fully reimplemented 10.01.2020. Reason: ISQL-"killer" could not find record in mon$attachments that should be deleted.
#
#
# Test asynchronously launches ISQL with script that will hang because of two concurrent attachments trying to update
# the same record (second attachment is created using ES/EDS).
# After this we have to launch second instance of ISQL which will attempt to kill both connections created in the 1st ISQL.
#
#
# The *most* problem here is properly determine time that we have to wait until 1st ISQL will really establish its connect!
# If this time is too short then second ISQL ("killer") will NOT able to see 1st ISQL in mon$attachments and will not be able
# to delete (because there will be NOT YET attachment to delete!). This mean that 2nd ISQL will finish without really check
# that it could kill hanged attachments. Test in this case will not finish if 1st ISQL uses tx with infinite WAIT!
#
#
# To be sure that 2nd ISQL ("killer") will be able to see 1st one ("hanged") we have to make pretty long PSQL-loop which tries
# to find any record in mon$attachment that is from concurrent connection (which user name we know for advance: 'tmp$c3323').
# This PSQL loop must finish as fast as we find record that will be then deleted.
#
#
# Lot of runs show that there is a problem in 4.0.0 Classic: it requires too long time in PSQL loop to find such attachment.
# Time in 4.0 CS can be about 1-2 seconds and number of iterations will be greater than 100.
# No such problem in all 3.0 and in 4.0 for other modes.
#
#
# 24.12.2020
# Waiting for completion of child ISQL async process is done by call <isql_PID>.wait() instead of old (and "fragile")
# assumption about maximal time that it could last before forcedly terminate it.
@ -34,9 +34,12 @@
# 3.0.8.33401 CS: 4.543s.
# 2.5.9.27152 SC: 1.006s.
# 2.5.9.27152 CS: 1.333s.
#
#
#
#
#
# [pcisar] 17.11.2021
# This test is too complicated and fragile (can screw the test environment)
# It should be reimplemnted in more robust way, or removed from suite
#
# tracker_id: CORE-3323
# min_versions: ['2.5.1']
# versions: 2.5.1
@ -60,24 +63,24 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# import subprocess
# from subprocess import Popen
# import time
#
#
# #--------------------------------------------
#
#
# def flush_and_close(file_handle):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb'):
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -85,25 +88,25 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# os.remove( f_names_list[i] )
# if os.path.isfile( f_names_list[i]):
# print('ERROR: can not remove file ' + f_names_list[i])
#
#
# #--------------------------------------------
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# clear_ext_pool_statement=''
# usr_plugin_clause=''
#
#
# if db_conn.engine_version >= 4.0:
# clear_ext_pool_statement = 'ALTER EXTERNAL CONNECTIONS POOL CLEAR ALL;'
#
#
# if db_conn.engine_version >= 3.0:
# # usr_plugin_clause='using plugin Legacy_userManager'
# usr_plugin_clause='using plugin Srp'
#
#
# db_conn.close()
#
#
#
#
# init_ddl='''
# set term ^;
# execute block as
@ -115,39 +118,39 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# end^
# set term ;^
# commit;
#
#
# create user tmp$c3323 password '456' %(usr_plugin_clause)s;
# commit;
#
#
# recreate table test(id int);
# commit;
# insert into test values(1);
# commit;
#
#
# grant select,update on test to tmp$c3323;
# commit;
# ''' % locals()
#
#
# runProgram('isql', [dsn], init_ddl)
#
#
#
#
# lock_sql='''
# set list on;
# commit;
# set transaction wait;
#
#
# update test set id = -id;
# select 'starting EB with lock-conflict' as "Point_A:" -------------- p o i n t [ A ]
# ,id as "id_at_point_A:"
# from test;
#
# from test;
#
# set term ^;
# execute block as
# begin
# -- This statement will for sure finish with exception, but
# -- in 2.5.0 it will be 'lock-conflict' (and this was WRONG),
# -- while on 2.5.1 and above it should be 'connection shutdown'.
#
#
# -- 11.05.2017, FB 4.0 only!
# -- Following messages can appear after 'connection shutdown'
# -- (letter from dimitr, 08-may-2017 20:41):
@ -155,45 +158,45 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# -- isc_att_shut_idle: Idle timeout expired
# -- isc_att_shut_db_down: Database is shutdown
# -- isc_att_shut_engine: Engine is shutdown
#
#
# execute statement 'update test set id = - (1000 + id)'
# on external 'localhost:' || rdb$get_context('SYSTEM','DB_NAME')
# as user 'TMP$C3323' password '456';
# end
# ^
# set term ;^
#
#
# select 'finished EB with lock-conflict' as "Point_B" -------------- p o i n t [ B ]
# ,id as "id_at_point_B:"
# from test;
# from test;
# rollback;
# '''
#
#
# f_hanged_sql=open( os.path.join(context['temp_directory'],'tmp_3323_hang.sql'), 'w')
# f_hanged_sql.write(lock_sql)
# f_hanged_sql.close()
#
#
# f_hanged_log=open( os.path.join(context['temp_directory'],'tmp_3323_hang.log'), "w", buffering = 0)
#
#
# # Execute a child program in a new process, redirecting STDERR to the same target as of STDOUT:
# p_hanged_isql = Popen([context['isql_path'], dsn, "-ch", 'utf8', "-i" , f_hanged_sql.name],stdout=f_hanged_log, stderr=subprocess.STDOUT)
#
#
# usr=user_name
# pwd=user_password
#
#
# # Limit: how long "killer" must wait until "victim" will establish its connect, milliseconds.
# # It was encountered on Classic that this time can be valueable if heavy concurrent workload
# # presents on host where tests run:
# ###############################################
# MAX_WAIT_FOR_VICTIM_ESTABLISH_ITS_CONNECT=15000
# ###############################################
#
#
# killer_script='''
# set list on;
#
#
# select 'Intro script that must kill other attachment' as "point_C:" ------------------ p o i n t [ C ]
# from test;
#
# from test;
#
# set term ^;
# execute block returns( found_other_attach_for_attempts int, found_other_attach_for_ms int ) as
# declare c smallint = 0;
@ -205,9 +208,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# begin
# found_other_attach_for_attempts = found_other_attach_for_attempts + 1;
# in autonomous transaction do
# select count(*)
# select count(*)
# from (
# select mon$attachment_id
# select mon$attachment_id
# from mon$attachments a
# where mon$user = upper('TMP$C3323' )
# rows 1
@ -226,69 +229,69 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# ^
# set term ;^
# rollback;
#
#
#
#
# select 'starting kill hanged connection' as "point_D:" ------------------ p o i n t [ D ]
# ,id as "id_at_point_D:"
# from test;
#
# from test;
#
# select iif( a.mon$attachment_id is not null, '<EXPECTED: NOT NULL>', '### UNEXPECTED: NULL ###' ) as attachment_to_be_killed
# from rdb$database
# left join mon$attachments a on mon$user = upper('TMP$C3323' )
# left join mon$attachments a on mon$user = upper('TMP$C3323' )
# ;
#
#
# select 'Running delete from mon$attachments statement' as "point_E:" ------------------ p o i n t [ E ]
# from rdb$database;
#
#
# set count on;
# delete from mon$attachments
# where mon$user = upper('TMP$C3323' )
# where mon$user = upper('TMP$C3323' )
# ;
# set count off;
# commit;
#
#
# connect '$(DSN)' user '%(usr)s' password '%(pwd)s';
#
#
# %(clear_ext_pool_statement)s
#
#
# select 'Reconnect and look for attachment of other user' as "point_F:" ------------------ p o i n t [ F ]
# ,id as "id_at_point_F:"
# from test;
#
# from test;
#
# select iif( a.mon$attachment_id is null, '<EXPECTED: NULL>', '### UNEXPECTED NOT NULL: attach_id=' || a.mon$attachment_id || '; state=' || coalesce(a.mon$state, '<null>') || ' ###' ) as still_alive_attachment_id
# from rdb$database r
# left join mon$attachments a on a.mon$user = upper('TMP$C3323');
# commit;
#
#
# set blob all;
#
#
# select a.*, s.*
# from mon$attachments a left join mon$statements s using(mon$attachment_id)
# where a.mon$user = upper('TMP$C3323')
# ;
#
#
# select 'finished kill hanged connection' as "pointG:" ----------------- p o i n t [ G ]
# from rdb$database;
# from rdb$database;
# commit;
#
#
# drop user tmp$c3323 %(usr_plugin_clause)s;
# commit;
#
#
# ''' % locals()
#
#
#
#
# f_killer_sql=open( os.path.join(context['temp_directory'],'tmp_3323_kill.sql'), 'w')
# f_killer_sql.write(killer_script)
# flush_and_close(f_killer_sql)
#
#
#
#
# # starting ISQL-KILLER:
# #######################
#
#
# f_killer_log=open( os.path.join(context['temp_directory'],'tmp_3323_kill.log'), "w", buffering = 0)
# subprocess.call( [context['isql_path'], dsn, "-ch", "utf8", "-i" , f_killer_sql.name],stdout=f_killer_log, stderr=subprocess.STDOUT )
# flush_and_close(f_killer_log)
#
#
# # :::::::::::::::::::::::::::::::::::::::::::::
# # ::: A.C.H.T.U.N.G :::
# # ::: DO NOT call p_hanged_isql.terminate() :::
@ -296,31 +299,31 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # Here we must W.A.I.T until ISQL-victim (which just has been killed) finished
# # with raising exception ("SQLSTATE = 42000 / connection shutdown") and fully
# # flushes its log on disk.
#
#
# # Wait until ISQL complete its mission:
# p_hanged_isql.wait()
#
#
# flush_and_close(f_hanged_log)
#
#
# with open( f_hanged_log.name,'r') as f:
# print(f.read())
#
#
# with open( f_killer_log.name,'r') as f:
# print(f.read())
#
#
#
#
# # We have to change DB state to full shutdown in order to prevent "Object in use"
# # while fbtest will try to drop these databases (set linger = 0 does not help!)
# ###############################################################################
# runProgram('gfix',[dsn,'-shut','full','-force','0'])
# runProgram('gfix',[dsn,'-online'])
#
#
# # CLEANUP
# #########
# f_list=(f_hanged_sql,f_killer_sql,f_killer_log,f_hanged_log)
# cleanup( [f.name for f in f_list] )
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)

View File

@ -2,7 +2,7 @@
#
# id: bugs.core_3328
# title: Client writes error messages into firebird.log when database is shutted down
# decription:
# decription:
# Test retrieves FB engine version in order to issue proper command option for getting firebird.log content
# (in 2.5 this is 'action_get_ib_log' rather then expected 'action_get_fb_log' - note on letter 'i' instead 'f').
# Initial content of firebird.log is saved into file, see var. 'f_fblog_before'.
@ -13,55 +13,40 @@
# Comparison is done by using standard Python package 'difflib'.
# Difference between old and new firebird.log should _NOT_ contain lines with words 'gds__detach' or 'lost'.
# If these words absent - all fine, actual and expected output both have to be empty.
#
# 04-dec-2016.
#
# 04-dec-2016.
# Checked on: WI-V2.5.7.27028, WI-V3.0.2.32642, WI-T4.0.0.460 (all on SS/SC/CS).
# Reduced time of ISQL working from 5 to 2 seconds.
#
#
# Samples of call with '-c <path_to_fbclient_dll>':
#
# fbt_run -b C:\\MIX
# irebird
# b25in bugs.core_3328 -o localhost/3255 -c C:\\MIX
# irebird
# b25in
# bclient.dll
# fbt_run -b C:\\MIX
# irebird
# b25Csin bugs.core_3328 -o localhost/3249 -c C:\\MIX
# irebird
# b25csin
# bclient.dll
# fbt_run -b C:\\MIX
# irebird
# b40Cs bugs.core_3328 -o localhost/3439 -c C:\\MIX
# irebird
# b40cs
# bclient.dll
# fbt_run -b C:\\MIX
# irebird
# b40sc bugs.core_3328 -o localhost/3430 -c C:\\MIX
# irebird
# b40sc
# bclient.dll & fbt_view -d results.trf
#
#
# fbt_run -b C:\\MIX\\firebird\\fb25\\bin bugs.core_3328 -o localhost/3255 -c C:\\MIX\\firebird\\fb25\\fbinbclient.dll
# fbt_run -b C:\\MIX\\firebird\\fb25Cs\\bin bugs.core_3328 -o localhost/3249 -c C:\\MIX\\firebird\\fb25cs\\bin\\fbclient.dll
# fbt_run -b C:\\MIX\\firebird\\fb40Cs bugs.core_3328 -o localhost/3439 -c C:\\MIX\\firebird\\fb40cs\\fbclient.dll
# fbt_run -b C:\\MIX\\firebird\\fb40sc bugs.core_3328 -o localhost/3430 -c C:\\MIX\\firebird\\fb40sc\\fbclient.dll & fbt_view -d results.trf
#
# tracker_id: CORE-3328
# min_versions: ['2.5.1']
# versions: 2.5.1
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
import time
from difflib import unified_diff
from threading import Thread
from firebird.qa import db_factory, python_act, Action
from firebird.driver import ShutdownMethod, ShutdownMode
# version: 2.5.1
# resources: None
substitutions_1 = [('attachments: [1-9]+', 'attachments: 1'), ('[\\s]+', ' ')]
# substitutions_1 = [('attachments: [1-9]+', 'attachments: 1'), ('[\\s]+', ' ')]
substitutions_1 = [('database.*shutdown', 'database shutdown')]
init_script_1 = """
create table test(s varchar(36) unique);
commit;
"""
"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
@ -73,51 +58,51 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# import time
# import difflib
# from fdb import services
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# db_file=db_conn.database_name
# engine = str(db_conn.engine_version) # convert to text because 'float' object has no attribute 'startswith'
# db_conn.close()
#
#
# #---------------------------------------------------
# def svc_get_fb_log( engine, f_fb_log ):
#
#
# import subprocess
#
#
# if engine.startswith('2.5'):
# get_firebird_log_key='action_get_ib_log'
# else:
# get_firebird_log_key='action_get_fb_log'
#
#
# subprocess.call( [ context['fbsvcmgr_path'],
# "localhost:service_mgr",
# get_firebird_log_key
# ],
# stdout=f_fb_log,
# stdout=f_fb_log,
# stderr=subprocess.STDOUT
# )
# return
#
#
#
#
# #---------------------------------------------
#
#
# def flush_and_close(file_handle):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb'):
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -125,24 +110,24 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# os.remove( f_names_list[i] )
# if os.path.isfile( f_names_list[i]):
# print('ERROR: can not remove file ' + f_names_list[i])
#
#
# #--------------------------------------------
#
#
#
#
# # Get HOME dir of FB instance that is now checked.
# # We will concatenate this string with 'fbsvcmgr' command in order to choose
# # We will concatenate this string with 'fbsvcmgr' command in order to choose
# # PROPER binary file when querying services API to shutdown/online DB
# # NOTE: this is NECESSARY if several instances are in work and we did not change
# # PATH variable before call this test!
#
# # NB, 06.12.2016: as of fdb 1.6.1 one need to EXPLICITLY specify user+password pair when doing connect
#
# # NB, 06.12.2016: as of fdb 1.6.1 one need to EXPLICITLY specify user+password pair when doing connect
# # via to FB services API by services.connect() - see FB tracker, PYFB-69
# # ("Can not connect to FB services if set ISC_USER & ISC_PASSWORD by os.environ[ ... ]")
#
#
# f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_3328_fblog_before.txt'), 'w')
# svc_get_fb_log( engine, f_fblog_before )
# flush_and_close( f_fblog_before )
#
#
# sql_dml='''
# show version;
# set term ^;
@ -158,20 +143,20 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# ^
# set term ;^
# '''
#
#
# f_client_sql = open( os.path.join(context['temp_directory'],'tmp_client_3328.sql'), 'w')
# f_client_sql.write(sql_dml)
# flush_and_close( f_client_sql )
#
#
# f_client_log = open( os.path.join(context['temp_directory'],'tmp_client_3328.log'), 'w')
# p_client_dml=subprocess.Popen( [context['isql_path'], dsn, "-n", "-i", f_client_sql.name ],
# stdout = f_client_log,
# stderr = subprocess.STDOUT
# )
# time.sleep(2)
#
#
# f_shutdown_log = open( os.path.join(context['temp_directory'],'tmp_shutdown_and_online_3328.log'), 'w')
#
#
# # Databases:
# # Number of attachments: 1
# # Number of databases: 1
@ -182,7 +167,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stdout = f_shutdown_log,
# stderr=subprocess.STDOUT
# )
#
#
# subprocess.call( [context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_properties", "prp_shutdown_mode", "prp_sm_full", "prp_shutdown_db", "0",
# "dbname", db_file,
@ -190,7 +175,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stdout = f_shutdown_log,
# stderr = subprocess.STDOUT
# )
#
#
# # Databases:
# # Number of attachments: 0
# # Number of databases: 0
@ -200,7 +185,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stdout = f_shutdown_log,
# stderr=subprocess.STDOUT
# )
#
#
# subprocess.call( [context['fbsvcmgr_path'],"localhost:service_mgr",
# "action_db_stats",
# "dbname", db_file, "sts_hdr_pages"
@ -208,7 +193,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stdout = f_shutdown_log,
# stderr=subprocess.STDOUT
# )
#
#
# subprocess.call( [context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_properties", "prp_db_online",
# "dbname", db_file,
@ -216,7 +201,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stdout = f_shutdown_log,
# stderr = subprocess.STDOUT
# )
#
#
# subprocess.call( [context['fbsvcmgr_path'],"localhost:service_mgr",
# "action_db_stats",
# "dbname", db_file, "sts_hdr_pages"
@ -224,37 +209,37 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stdout = f_shutdown_log,
# stderr=subprocess.STDOUT
# )
#
#
# flush_and_close( f_shutdown_log )
#
#
#
#
# f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_3328_fblog_after.txt'), 'w')
# svc_get_fb_log( engine, f_fblog_after )
# flush_and_close( f_fblog_after )
#
#
# p_client_dml.terminate()
# flush_and_close( f_client_log )
#
#
# # Now we can compare two versions of firebird.log and check their difference.
#
#
# oldfb=open(f_fblog_before.name, 'r')
# newfb=open(f_fblog_after.name, 'r')
#
#
# difftext = ''.join(difflib.unified_diff(
# oldfb.readlines(),
# oldfb.readlines(),
# newfb.readlines()
# ))
# oldfb.close()
# newfb.close()
#
#
# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_3328_diff.txt'), 'w')
# f_diff_txt.write(difftext)
# flush_and_close( f_diff_txt )
#
#
# # New lines in firebird.log must |NOT| contain these:
# # ===
# # REMOTE INTERFACE/gds__detach: Unsuccesful detach from database.
# # Uncommitted work may have been lost
# # Uncommitted work may have been lost
# # ===
# # If such lines present - this is regression and we output them.
# # When all fine, final output is empty.
@ -262,39 +247,78 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # BTW: for 3.0, firebird.log will contain such text:
# # INET/INET_ERROR: READ ERRNO = 10054, SERVER HOST = LOCALHOST, ADDRESS = 127.0.0.1/3333
# # -- but this is checked by another .fbt
#
#
# with open( f_shutdown_log.name,'r') as f:
# for line in f:
# if ( 'unknown' in line.lower() or 'attributes' in line.lower() ):
# print(line)
# #or 'attachments' in line.lower()
#
# #or 'attachments' in line.lower()
#
# with open( f_diff_txt.name,'r') as f:
# for line in f:
# if line.startswith('+') and ('gds__detach' in line or 'lost' in line):
# print(line.upper())
#
#
# ###############################
# # Cleanup.
# cleanup( [i.name for i in (f_shutdown_log,f_client_sql,f_client_log,f_fblog_before,f_fblog_after,f_diff_txt) ] )
#
#
# # NB: temply removed following lines from expected_stdout,
# # see core-5413, "fbsvcmgr info_svr_db_info does not see active attachments and databases in use (CLASSIC only)"
# # Number of attachments: 1
# # Number of attachments: 0
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
Attributes force write, full shutdown
Attributes force write
"""
act_1 = python_act('db_1', substitutions=substitutions_1)
@pytest.mark.version('>=2.5.1')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
expected_stderr_1 = """
Statement failed, SQLSTATE = HY000
database /tmp/pytest-of-pcisar/pytest-528/test_10/test.fdb shutdown
Statement failed, SQLSTATE = HY000
database /tmp/pytest-of-pcisar/pytest-528/test_10/test.fdb shutdown
"""
def run_work(act: Action):
test_script = """
show version;
set term ^;
execute block as
declare v_role varchar(31);
begin
v_role = left(replace( uuid_to_char(gen_uuid()), '-', ''), 31);
while (1=1) do
begin
insert into test(s) values( uuid_to_char( gen_uuid() ) );
end
end
^
set term ;^
"""
act.expected_stderr = expected_stderr_1
act.isql(switches=['-n'], input=test_script)
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
with act_1.connect_server() as srv:
srv.info.get_log()
log_before = srv.readlines()
#
work_thread = Thread(target=run_work, args=[act_1])
work_thread.start()
time.sleep(2)
#
srv.database.shutdown(database=str(act_1.db.db_path), mode=ShutdownMode.FULL,
method=ShutdownMethod.FORCED, timeout=0)
srv.database.bring_online(database=str(act_1.db.db_path))
#
srv.info.get_log()
log_after = srv.readlines()
#
work_thread.join(2)
if work_thread.is_alive():
pytest.fail('Work thread is still alive')
#
assert list(unified_diff(log_before, log_after)) == []
assert act_1.clean_stderr == act_1.clean_expected_stderr

View File

@ -2,23 +2,25 @@
#
# id: bugs.core_3357
# title: Generators are set to 0 after restore
# decription:
# decription:
# NOTE: FB 4.x has incompatible behaviour with all previous versions since build 4.0.0.2131 (06-aug-2020):
# statement 'alter sequence <seq_name> restart with 0' changes rdb$generators.rdb$initial_value to -1 thus
# next call of gen_id(<seq_name>,1) will return 0 (ZERO!) rather than 1.
# next call of gen_id(<seq_name>,1) will return 0 (ZERO!) rather than 1.
# See also CORE-6084 and its fix: https://github.com/FirebirdSQL/firebird/commit/23dc0c6297825b2e9006f4d5a2c488702091033d
# This is considered as *expected* and is noted in doc/README.incompatibilities.3to4.txt
#
#
# Because of this it was decided to create separate section for check FB 4.x results.
# Checked on 4.0.0.2164
#
#
# tracker_id: CORE-3357
# min_versions: ['2.5.0']
# versions: 3.0, 4.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from io import BytesIO
from firebird.qa import db_factory, python_act, Action
from firebird.driver import SrvRestoreFlag
# version: 3.0
# resources: None
@ -29,7 +31,7 @@ init_script_1 = """
recreate sequence g1 start with 9223372036854775807 increment by -2147483647;
recreate sequence g2 start with -9223372036854775808 increment by 2147483647;
commit;
"""
"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
@ -44,21 +46,29 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# show sequ g2;
# '''
# runProgram('isql',[dsn,'-user',user_name,'-pas',user_password],sql)
#
#
# if os.path.isfile(fbk):
# os.remove(fbk)
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """
Generator G1, current value: 9223372036854775807, initial value: 9223372036854775807, increment: -2147483647
Generator G2, current value: -9223372036854775808, initial value: -9223372036854775808, increment: 2147483647
"""
"""
@pytest.mark.version('>=3.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
@pytest.mark.version('>=3.0,<4')
def test_1(act_1: Action):
with act_1.connect_server() as srv:
backup = BytesIO()
srv.database.local_backup(database=str(act_1.db.db_path), backup_stream=backup)
backup.seek(0)
srv.database.local_restore(backup_stream=backup, database=str(act_1.db.db_path),
flags=SrvRestoreFlag.REPLACE)
act_1.expected_stdout = expected_stdout_1
act_1.isql(switches=[], input="show sequ g1; show sequ g2;")
assert act_1.clean_stdout == act_1.clean_expected_stdout
# version: 4.0
@ -70,7 +80,7 @@ init_script_2 = """
recreate sequence g1 start with 9223372036854775807 increment by -2147483647;
recreate sequence g2 start with -9223372036854775808 increment by 2147483647;
commit;
"""
"""
db_2 = db_factory(sql_dialect=3, init=init_script_2)
@ -85,11 +95,12 @@ db_2 = db_factory(sql_dialect=3, init=init_script_2)
# show sequ g2;
# '''
# runProgram('isql',[dsn,'-user',user_name,'-pas',user_password],sql)
#
#
# if os.path.isfile(fbk):
# os.remove(fbk)
#---
#act_2 = python_act('db_2', test_script_2, substitutions=substitutions_2)
act_2 = python_act('db_2', substitutions=substitutions_2)
expected_stdout_2 = """
Generator G1, current value: -9223372034707292162, initial value: 9223372036854775807, increment: -2147483647
@ -97,8 +108,15 @@ expected_stdout_2 = """
"""
@pytest.mark.version('>=4.0')
@pytest.mark.xfail
def test_2(db_2):
pytest.fail("Test not IMPLEMENTED")
def test_2(act_2: Action):
with act_2.connect_server() as srv:
backup = BytesIO()
srv.database.local_backup(database=str(act_2.db.db_path), backup_stream=backup)
backup.seek(0)
srv.database.local_restore(backup_stream=backup, database=str(act_2.db.db_path),
flags=SrvRestoreFlag.REPLACE)
act_2.expected_stdout = expected_stdout_2
act_2.isql(switches=[], input="show sequ g1; show sequ g2;")
assert act_2.clean_stdout == act_2.clean_expected_stdout

View File

@ -2,30 +2,33 @@
#
# id: bugs.core_3413
# title: Improve diagnostics of internal trace errors
# decription:
# decription:
# 1. Obtain engine_version from built-in context variable.
# 2. Make config for trace in proper format according to FB engine version,
# with adding invalid element 'foo' instead on boolean ('true' or 'false')
# 3. Launch trace session in separate child process using 'FBSVCMGR action_trace_start'
# 4. Run ISQL with trivial command in order trace session will register error in its log.
# 5. Stop trace session. Output its log with filtering only messages related to error.
#
#
# Checked on: WI-V2.5.5.26916 (SS, SC, CS); WI-V3.0.0.32008 (SS, SC, CS). Result: OK.
# ::: NB :::
# Several delays (time.sleep) added in main thread because of OS buffering. Couldn't switch this buffering off.
#
#
# tracker_id: CORE-3413
# min_versions: ['2.5.1']
# versions: 2.5.1
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
# version: 2.5.1
# resources: None
substitutions_1 = [('^((?!ERROR|ELEMENT).)*$', ''), ('ERROR CREATING TRACE SESSION.*', 'ERROR CREATING TRACE SESSION'), ('.*"FOO" IS NOT A VALID.*', '"FOO" IS NOT A VALID')]
substitutions_1 = [('^((?!ERROR|ELEMENT).)*$', ''),
('ERROR CREATING TRACE SESSION.*', 'ERROR CREATING TRACE SESSION'),
('.*"FOO" IS NOT A VALID.*', '"FOO" IS NOT A VALID')]
init_script_1 = """"""
@ -37,35 +40,35 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# import subprocess
# from subprocess import Popen
# import time
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# db_file=db_conn.database_name
#
#
# # Obtain engine version, 2.5 or 3.0, for make trace config in appropriate format:
# engine = str(db_conn.engine_version)
#
#
# db_conn.close()
#
#
#
#
# #---------------------------------------------
#
#
# def flush_and_close(file_handle):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb'):
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -73,70 +76,70 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# os.remove( f_names_list[i] )
# if os.path.isfile( f_names_list[i]):
# print('ERROR: can not remove file ' + f_names_list[i])
#
#
# #--------------------------------------------
#
#
#
#
# txt25 = '''# Trace config, format for 2.5. Generated auto, do not edit!
# <database %[\\\\\\\\/]bugs.core_3413.fdb>
# enabled true
# time_threshold 0
#
# time_threshold 0
#
# # Value for this parameter was intentionally choosen *** INVALID ***
# log_statement_finish foo
# </database>
# '''
#
#
# # NOTES ABOUT TRACE CONFIG FOR 3.0:
# # 1) Header contains `database` clause in different format vs FB 2.5: its data must be enclosed with '{' '}'
# # 2) Name and value must be separated by EQUALITY sign ('=') in FB-3 trace.conf, otherwise we get runtime error:
# # element "<. . .>" have no attribute value set
#
#
# txt30 = '''# Trace config, format for 3.0. Generated auto, do not edit!
# database=%[\\\\\\\\/]bugs.core_3413.fdb
# {
# enabled = true
# time_threshold = 0
#
#
# # Value for this parameter was intentionally choosen *** INVALID ***
# log_statement_finish = foo
# }
# '''
#
#
# f_trccfg=open( os.path.join(context['temp_directory'],'tmp_trace_3413.cfg'), 'w')
# if engine.startswith('2.5'):
# f_trccfg.write(txt25)
# else:
# f_trccfg.write(txt30)
# flush_and_close( f_trccfg )
#
#
# #####################################################
# # Starting trace session in new child process (async.):
#
#
# f_trclog=open( os.path.join(context['temp_directory'],'tmp_trace_3413.log'), 'w')
#
#
# # Execute a child program in a new process, redirecting STDERR to the same target as of STDOUT:
# p_trace=Popen([context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_trace_start",
# "trc_cfg", f_trccfg.name],
# stdout=f_trclog, stderr=subprocess.STDOUT)
#
#
# # Wait! Trace session is initialized not instantly!
# time.sleep(1)
#
#
# sqltxt='''
# set list on;
# select 1 as c from rdb$database;
# '''
#
#
# runProgram('isql',[dsn,'-user',user_name,'-pas',user_password],sqltxt)
#
#
# # do NOT remove this otherwise trace log can contain only message about its start before being closed!
# time.sleep(3)
#
#
# #####################################################
# # Getting ID of launched trace session and STOP it:
#
#
# # Save active trace session info into file for further parsing it and obtain session_id back (for stop):
# f_trclst=open( os.path.join(context['temp_directory'],'tmp_trace_3413.lst'), 'w')
# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr",
@ -144,7 +147,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stdout=f_trclst, stderr=subprocess.STDOUT
# )
# flush_and_close( f_trclst )
#
#
# trcssn=0
# with open( f_trclst.name,'r') as f:
# for line in f:
@ -155,7 +158,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# trcssn=word
# i=i+1
# break
#
#
# # Result: `trcssn` is ID of active trace session. Now we have to terminate it:
# f_trclst=open(f_trclst.name,'a')
# f_trclst.seek(0,2)
@ -165,36 +168,62 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stdout=f_trclst, stderr=subprocess.STDOUT
# )
# flush_and_close( f_trclst )
#
#
# # Terminate child process of launched trace session (though it should already be killed):
# p_trace.terminate()
# flush_and_close( f_trclog )
#
#
# with open( f_trclog.name,'r') as f:
# for line in f:
# print(line.upper())
#
#
# # do NOT remove this delay otherwise get access error 'Windows 32'
# # (The process cannot access the file because it is being used by another process):
# time.sleep(1)
#
#
# # Cleanup
# #############
# cleanup( [i.name for i in (f_trccfg, f_trclst, f_trclog)] )
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """
ERROR CREATING TRACE SESSION FOR DATABASE
ERROR WHILE PARSING TRACE CONFIGURATION
ELEMENT "LOG_STATEMENT_FINISH": "FOO" IS NOT A VALID
"""
"""
@pytest.mark.version('>=2.5.1')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' time_threshold = 0',
' log_statement_finish = foo',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line.upper())
@pytest.mark.version('>=3.0')
def test_1(act_1: Action, capsys):
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
act_1.isql(switches=['-n'], input='select 1 as c from rdb$database;')
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,25 +2,26 @@
#
# id: bugs.core_3416
# title: Inserting Käse into a CHARACTER SET ASCII column succeeds
# decription:
# decription:
# 02-mar-2021. Re-implemented in order to have ability to run this test on Linux.
# Ttest creates table and fills it with non-ascii characters in init_script, using charset = UTF8.
# Test creates table and fills it with non-ascii characters in init_script, using charset = UTF8.
# Then it generates .sql script for running it in separae ISQL process.
# This script makes connection to test DB using charset = WIN1252 and perform needed DML.
# Result will be redirected to .log which will be opened via codecs.open(...encoding='cp1252').
# Its content will be converted to UTF8 for showing in expected_stdout.
#
#
# Checked on:
# * Windows: 4.0.0.2377, 3.0.8.33420, 2.5.9.27152
# * Windows: 4.0.0.2377, 3.0.8.33420, 2.5.9.27152
# * Linux: 4.0.0.2377, 3.0.8.33415
#
#
# tracker_id: CORE-3416
# min_versions: ['2.5.0']
# versions: 2.5
# qmid:
# qmid:
import pytest
from firebird.qa import db_factory, isql_act, Action
from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
# version: 2.5
# resources: None
@ -28,40 +29,40 @@ from firebird.qa import db_factory, isql_act, Action
substitutions_1 = [('After line .*', ''), ('[\t ]+', ' ')]
init_script_1 = """
create table tascii(s_ascii varchar(10) character set ascii);
create table tlatin(s_latin varchar(10) character set latin1);
commit;
"""
create table tascii(s_ascii varchar(10) character set ascii);
create table tlatin(s_latin varchar(10) character set latin1);
commit;
"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
# import os
# import codecs
# import subprocess
# import time
#
#
# db_conn.close()
#
#
# #--------------------------------------------
#
#
# def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -72,12 +73,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None
#
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
#
# #--------------------------------------------
#
#
# sql_txt=''' set names WIN1252;
# connect '%(dsn)s' user '%(user_name)s' password '%(user_password)s';
# set list on;
@ -85,60 +86,79 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# set echo on;
# insert into tascii values ('Käse');
# select s_ascii from tascii;
#
#
# insert into tlatin values ('Käse');
# select s_latin from tlatin;
# select s_latin from tlatin;
# ''' % dict(globals(), **locals())
#
#
# f_run_sql = open( os.path.join(context['temp_directory'], 'tmp_3416_win1252.sql'), 'w' )
# f_run_sql.write( sql_txt.decode('utf8').encode('cp1252') )
# flush_and_close( f_run_sql )
# # result: file tmp_3416_win1252.sql is encoded in win1252
#
#
# f_run_log = open( os.path.splitext(f_run_sql.name)[0]+'.log', 'w')
# subprocess.call( [ context['isql_path'], '-q', '-i', f_run_sql.name ],
# stdout = f_run_log,
# stderr = subprocess.STDOUT
# )
# flush_and_close( f_run_log ) # result: output will be encoded in win1252
#
#
# with codecs.open(f_run_log.name, 'r', encoding='cp1252' ) as f:
# result_in_cp1252 = f.readlines()
#
#
# for i in result_in_cp1252:
# print( i.encode('utf8') )
#
#
# # cleanup:
# ###########
# ###########
# cleanup( (f_run_sql, f_run_log) )
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """
insert into tascii values ('Käse');
Statement failed, SQLSTATE = 22018
arithmetic exception, numeric overflow, or string truncation
-Cannot transliterate character between character sets
Records affected: 0
select s_ascii from tascii;
Records affected: 0
insert into tlatin values ('Käse');
Records affected: 1
select s_latin from tlatin;
S_LATIN Käse
Records affected: 1
"""
expected_stderr_1 = """
Statement failed, SQLSTATE = 22018
arithmetic exception, numeric overflow, or string truncation
-Cannot transliterate character between character sets
After line 4 in file /tmp/pytest-of-pcisar/pytest-559/test_10/test_script.sql
"""
test_script_1 = temp_file('test_script.sql')
@pytest.mark.version('>=2.5')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action, test_script_1: Path):
test_script_1.write_text("""
set list on;
set count on;
set echo on;
insert into tascii values ('Käse');
select s_ascii from tascii;
insert into tlatin values ('Käse');
select s_latin from tlatin;
""", encoding='cp1252')
act_1.expected_stdout = expected_stdout_1
act_1.expected_stderr = expected_stderr_1
act_1.isql(switches=[], input_file=test_script_1, charset='WIN1252')
assert act_1.clean_stderr == act_1.clean_expected_stderr
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,24 +2,25 @@
#
# id: bugs.core_3489
# title: Blob transliteration may not happen inside the union
# decription:
# decription:
# 02-mar-2021. Re-implemented in order to have ability to run this test on Linux.
# Ttest creates table and fills it with non-ascii characters in init_script, using charset = UTF8.
# Test creates table and fills it with non-ascii characters in init_script, using charset = UTF8.
# Then it generates .sql script for running it in separae ISQL process.
# This script makes connection to test DB using charset = WIN1251 and perform needed DML.
# Result will be redirected to .log which will be opened via codecs.open(...encoding='cp1251').
# Its content will be converted to UTF8 for showing in expected_stdout.
#
#
# Confirmed bug on 2.5.0.26074: two lines with "raw" (not transliterated) data were displayed.
# Works OK on 2.5.1.26351: two readable lines will be issued.
#
#
# tracker_id: CORE-3489
# min_versions: ['2.5.1']
# versions: 2.5.1
# qmid:
# qmid:
import pytest
from firebird.qa import db_factory, isql_act, Action
from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
# version: 2.5.1
# resources: None
@ -45,31 +46,31 @@ db_1 = db_factory(charset='WIN1251', sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
# import os
# import codecs
# import subprocess
# import time
#
#
# db_conn.close()
#
#
# #--------------------------------------------
#
#
# def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -80,62 +81,78 @@ db_1 = db_factory(charset='WIN1251', sql_dialect=3, init=init_script_1)
# else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None
#
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
#
# #--------------------------------------------
#
#
#
#
# sql_txt=''' set names WIN1251;
# connect '%(dsn)s' user '%(user_name)s' password '%(user_password)s';
# set list on;
# set blob all;
# set count on;
# set list on;
#
#
# select msg_blob_id
# from sp_test
# union
# select msg_blob_id
# from sp_test;
# ''' % dict(globals(), **locals())
#
#
# f_run_sql = open( os.path.join(context['temp_directory'], 'tmp_3489_win1251.sql'), 'w' )
# f_run_sql.write( sql_txt.decode('utf8').encode('cp1251') )
# flush_and_close( f_run_sql )
# # result: file tmp_3489_win1251.sql is encoded in win1251
#
#
# f_run_log = open( os.path.splitext(f_run_sql.name)[0]+'.log', 'w')
# subprocess.call( [ context['isql_path'], '-q', '-i', f_run_sql.name ],
# stdout = f_run_log,
# stderr = subprocess.STDOUT
# )
# flush_and_close( f_run_log ) # result: output will be encoded in win1251
#
#
# with codecs.open(f_run_log.name, 'r', encoding='cp1251' ) as f:
# result_in_cp1251 = f.readlines()
#
#
# for i in result_in_cp1251:
# print( i.encode('utf8') )
#
#
# # cleanup:
# ###########
# ###########
# cleanup( (f_run_sql, f_run_log) )
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """
Это проверка на вывод строки "Йцукёнг"
Это проверка на вывод строки "Йцукёнг"
Records affected: 2
"""
Это проверка на вывод строки "Йцукёнг"
Это проверка на вывод строки "Йцукёнг"
Records affected: 2
"""
test_script_1 = temp_file('test_script.sql')
@pytest.mark.version('>=2.5.1')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action, test_script_1: Path):
test_script_1.write_text("""
set list on;
set blob all;
set count on;
set list on;
select msg_blob_id
from sp_test
union
select msg_blob_id
from sp_test;
""", encoding='cp1251')
act_1.expected_stdout = expected_stdout_1
act_1.isql(switches=[], input_file=test_script_1, charset='WIN1251')
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,30 +2,30 @@
#
# id: bugs.core_3537
# title: There is no need to undo changes made in GTT created with ON COMMIT DELETE ROWS option when transaction is rolled back
# decription:
# 19.12.2016.
# After discuss with hvlad it was decided to use fetches & marks values that are issued in trace
# decription:
# 19.12.2016.
# After discuss with hvlad it was decided to use fetches & marks values that are issued in trace
# ROLLBACK_TRANSACTION statistics and evaluate ratio of these values with:
# 1) number of inserted rows(see 'NUM_ROWS_TO_BE_ADDED' constant);
# 2) number of data pages that table occupies (it's retieved via 'gstat -t T_FIX_TAB').
#
#
# We use three tables with the same DDL: permanent ('t_fix_tab'), GTT PRESERVE and GTT DELETE rows.
# All these tables are subject to DML which does insert rows.
# Permanent table is used for retrieving statistics of data pages that are in use after this DML.
# Number of rows that we add into tables should not be very high, otherwise rollback will be done via TIP,
# i.e. without real undone actions ==> we will not see proper ratios.
# i.e. without real undone actions ==> we will not see proper ratios.
# After serveral runs it was decided to use value = 45000 (rows).
#
#
# All ratios should belong to some range with +/-5% of possible difference from one run to another.
# Concrete values of ratio were found after several runs on 2.5.7, 3.0.2 & 4.0.0
#
#
# Checked on 2.5.7.27030 (SS/SC), WI-V3.0.2.32644 (SS/SC/CS) and WI-T4.0.0.468 (SS/SC); 4.0.0.633 (CS/SS)
#
#
# Notes.
# 1. We can estimate volume of UNDO changes in trace statistics for ROLLBACK event.
# This statistics was added since 2.5.2 (see CORE-3598).
# 2. We have to use 'gstat -t <table>'instead of 'fbsvcmgr sts_table <...>'in 2.5.x - see CORE-5426.
#
#
# 19.08.2020. Fixed wrong expression for difference evaluation in percents. Checked on:
# 4.0.0.2164 SS: 8.674s.
# 4.0.0.2119 SS: 9.736s.
@ -33,15 +33,18 @@
# 3.0.7.33356 SS: 7.333s.
# 3.0.7.33356 CS: 9.700s.
# 2.5.9.27150 SC: 5.884s.
#
#
#
#
# tracker_id: CORE-3537
# min_versions: ['2.5.2']
# versions: 2.5.2
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
import time
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
from firebird.driver import DbWriteMode
# version: 2.5.2
# resources: None
@ -49,8 +52,8 @@ from firebird.qa import db_factory, isql_act, Action
substitutions_1 = []
init_script_1 = """
set bail on;
set echo on;
set bail on;
set echo on;
create or alter procedure sp_fill_fix_tab as begin end;
create or alter procedure sp_fill_gtt_del_rows as begin end;
create or alter procedure sp_fill_gtt_sav_rows as begin end;
@ -70,12 +73,12 @@ set echo on;
s1 varchar(50)
-- unique using index t_gtt_del_rows_s1
) on commit DELETE rows;
recreate global temporary table t_gtt_sav_rows(
s1 varchar(50)
-- unique using index t_gtt_sav_rows_s1
) on commit PRESERVE rows;
commit;
set term ^;
@ -85,7 +88,7 @@ set echo on;
begin
k=a_rows;
select v.fld_len from v_field_len v where v.rel_name=upper('t_fix_tab') into w;
while(k>0) do
while(k>0) do
begin
insert into t_fix_tab(s1) values( rpad('', :w, uuid_to_char(gen_uuid()) ) );
if (mod(k-1, 5000) = 0) then
@ -100,7 +103,7 @@ set echo on;
begin
k=a_rows;
select v.fld_len from v_field_len v where v.rel_name=upper('t_gtt_del_rows') into w;
while(k>0) do
while(k>0) do
begin
insert into t_gtt_del_rows(s1) values( rpad('', :w, uuid_to_char(gen_uuid()) ) );
if (mod(k-1, 5000) = 0) then
@ -116,7 +119,7 @@ set echo on;
begin
k=a_rows;
select v.fld_len from v_field_len v where v.rel_name=upper('t_gtt_sav_rows') into w;
while(k>0) do
while(k>0) do
begin
insert into t_gtt_sav_rows(s1) values( rpad('', :w, uuid_to_char(gen_uuid()) ) );
if (mod(k-1, 5000) = 0) then
@ -128,44 +131,44 @@ set echo on;
^
set term ;^
commit;
"""
"""
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
# import os
# import subprocess
# import time
# from fdb import services
# from subprocess import Popen
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# # Obtain engine version:
# engine = str(db_conn.engine_version) # convert to text because 'float' object has no attribute 'startswith'
# db_file = db_conn.database_name
# db_conn.close()
#
#
# #---------------------------------------------
#
#
# def flush_and_close(file_handle):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb'):
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -173,25 +176,25 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
# os.remove( f_names_list[i] )
# if os.path.isfile( f_names_list[i]):
# print('ERROR: can not remove file ' + f_names_list[i])
#
#
# #--------------------------------------------
#
#
# # Change FW to OFF in order to speed up initial data filling:
# ##################
#
#
# fn_nul = open(os.devnull, 'w')
#
#
# subprocess.call([ context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_properties", "prp_write_mode", "prp_wm_async",
# "dbname", db_file ],
# stdout = fn_nul,
# stderr = subprocess.STDOUT
# )
#
#
# fn_nul.close()
#
#
#
#
#
#
# # ::: NB ::: Trace config file format in 3.0 differs from 2.5 one:
# # 1) header section must be enclosed in "[" and "]",
# # 2) parameter-value pairs must be separated with '=' sign:
@ -199,13 +202,13 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
# # {
# # parameter = value
# # }
#
#
# if engine.startswith('2.5'):
# txt = '''# Generated auto, do not edit!
# <database %[\\\\\\\\/]security?.fdb>
# enabled false
# </database>
#
#
# <database %[\\\\\\\\/]bugs.core_3537.fdb>
# enabled true
# time_threshold 0
@ -226,66 +229,66 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
# time_threshold = 0
# log_transactions = true
# print_perf = true
#
#
# #log_connections = true
# #log_context = true
# log_initfini = false
# }
# '''
#
#
# f_trc_cfg=open( os.path.join(context['temp_directory'],'tmp_trace_3537.cfg'), 'w')
# f_trc_cfg.write(txt)
# flush_and_close( f_trc_cfg )
#
#
# ############################
# NUM_ROWS_TO_BE_ADDED = 45000
# ############################
#
#
# con1 = fdb.connect(dsn=dsn)
# cur1=con1.cursor()
#
#
# # Make initial data filling into PERMANENT table for retrieving later number of data pages
# # (it should be the same for any kind of tables, including GTTs):
# cur1.callproc('sp_fill_fix_tab', (NUM_ROWS_TO_BE_ADDED,))
# con1.commit()
# con1.close()
#
#
# # ##############################################################
# # S T A R T T R A C E i n S E P A R A T E P R O C E S S
# # ##############################################################
#
#
# f_trc_log=open( os.path.join(context['temp_directory'],'tmp_trace_3537.log'), "w")
# f_trc_err=open( os.path.join(context['temp_directory'],'tmp_trace_3537.err'), "w")
#
#
# p_trace = Popen( [ context['fbsvcmgr_path'], 'localhost:service_mgr', 'action_trace_start' , 'trc_cfg', f_trc_cfg.name],stdout=f_trc_log,stderr=f_trc_err)
#
#
# time.sleep(1)
#
#
# con1 = fdb.connect(dsn=dsn)
# cur1=con1.cursor()
# cur1.callproc('sp_fill_gtt_sav_rows', (NUM_ROWS_TO_BE_ADDED,))
# con1.rollback()
# con1.close()
#
#
# con1 = fdb.connect(dsn=dsn)
# cur1=con1.cursor()
# cur1.callproc('sp_fill_gtt_del_rows', (NUM_ROWS_TO_BE_ADDED,))
# con1.rollback()
# con1.close()
#
#
#
#
# # ####################################################
# # G E T A C T I V E T R A C E S E S S I O N I D
# # ####################################################
# # Save active trace session info into file for further parsing it and obtain session_id back (for stop):
#
#
# f_trc_lst = open( os.path.join(context['temp_directory'],'tmp_trace_3537.lst'), 'w')
# subprocess.call([context['fbsvcmgr_path'], 'localhost:service_mgr', 'action_trace_list'], stdout=f_trc_lst)
# flush_and_close( f_trc_lst )
#
#
# # !!! DO NOT REMOVE THIS LINE !!!
# time.sleep(1)
#
#
# trcssn=0
# with open( f_trc_lst.name,'r') as f:
# for line in f:
@ -298,7 +301,7 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
# break
# f.close()
# # Result: `trcssn` is ID of active trace session. Now we have to terminate it:
#
#
# # ####################################################
# # S E N D R E Q U E S T T R A C E T O S T O P
# # ####################################################
@ -308,29 +311,29 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
# fn_nul.close()
# # DO NOT REMOVE THIS LINE:
# time.sleep(2)
#
#
#
#
# p_trace.terminate()
# flush_and_close( f_trc_log )
# flush_and_close( f_trc_err )
#
#
# ###################
# # Obtain statistics for table T_FIX_TAB in order to estimate numberof data pages
# ###################
#
#
# f_stat_log = open( os.path.join(context['temp_directory'],'tmp_stat_3537.log'), 'w')
# f_stat_err = open( os.path.join(context['temp_directory'],'tmp_stat_3537.err'), 'w')
#
#
# subprocess.call( [ context['gstat_path'], dsn, "-t", 't_fix_tab'.upper() ],
# stdout = f_stat_log,
# stderr = f_stat_err
# )
# flush_and_close( f_stat_log )
# flush_and_close( f_stat_err )
#
#
# # Following files should be EMPTY:
# #################
#
#
# f_list=[f_stat_err, f_trc_err]
# for i in range(len(f_list)):
# f_name=f_list[i].name
@ -338,22 +341,22 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
# with open( f_name,'r') as f:
# for line in f:
# print("Unexpected STDERR, file "+f_name+": "+line)
#
#
#
#
# dp_cnt = -1
# with open( f_stat_log.name,'r') as f:
# for line in f:
# if 'data pages' in line.lower():
# # Data pages: 1098, data page slots: 1098, average fill: 74% ==> 1098
# dp_cnt = int(line.replace(',',' ').split()[2])
#
#
# gtt_sav_fetches=-1
# gtt_sav_marks = -1
# gtt_del_fetches=-1
# gtt_del_marks = -1
# gtt_del_trace = ''
# gtt_sav_trace = ''
#
#
# with open( f_trc_log.name,'r') as f:
# for line in f:
# if 'fetch' in line:
@ -372,13 +375,13 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
# else:
# gtt_del_fetches = int( words[k-1] )
# gtt_del_trace = line.strip()
#
#
# if words[k].startswith('mark'):
# if gtt_sav_marks==-1:
# gtt_sav_marks = int( words[k-1] )
# else:
# gtt_del_marks = int( words[k-1] )
#
#
# # 2.5.7 3.0.2, 4.0.0
# # ---------------------
# '''
@ -386,7 +389,7 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
# ratio_fetches_to_row_count_for_GTT_DELETE_ROWS = (1.00 * gtt_del_fetches / NUM_ROWS_TO_BE_ADDED, 0.0245, 0.00015 )
# ratio_marks_to_row_count_for_GTT_PRESERVE_ROWS = (1.00 * gtt_sav_marks / NUM_ROWS_TO_BE_ADDED, 2.0732, 2.05186 )
# ratio_marks_to_row_count_for_GTT_DELETE_ROWS = (1.00 * gtt_del_marks / NUM_ROWS_TO_BE_ADDED, 0.0245, 0.000089 )
#
#
# ratio_fetches_to_datapages_for_GTT_PRESERVE_ROWS = (1.00 * gtt_sav_fetches / dp_cnt, 373.85, 209.776 )
# ratio_fetches_to_datapages_for_GTT_DELETE_ROWS = (1.00 * gtt_del_fetches / dp_cnt, 1.0063, 0.00634 )
# ratio_marks_to_datapages_for_GTT_PRESERVE_ROWS = (1.00 * gtt_sav_marks / dp_cnt, 84.9672, 83.6358 )
@ -404,37 +407,38 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
# ,'ratio_marks_to_datapages_for_GTT_PRESERVE_ROWS' : (1.00 * gtt_sav_marks / dp_cnt, 84.9672, 83.6358 )
# ,'ratio_marks_to_datapages_for_GTT_DELETE_ROWS' : (1.00 * gtt_del_marks / dp_cnt, 1.0036, 0.00362 )
# }
#
#
#
#
# i = 1 if engine.startswith('2.5') else 2
#
#
# MAX_DIFF_PERCENT=5.00
# # ^
# #############################
# ### T H R E S H O L D ###
# #############################
#
#
# fail = False
# for k, v in sorted(check_data.iteritems()):
# msg = ( 'Check ' + k + ': ' +
# msg = ( 'Check ' + k + ': ' +
# ( 'OK' if v[i] * ((100 - MAX_DIFF_PERCENT)/100) <= v[0] <= v[i] * (100+MAX_DIFF_PERCENT)/100
# else 'value '+str(v[0])+' not in range '+str( v[i] ) + ' +/-'+str(MAX_DIFF_PERCENT)+'%'
# )
# )
# print(msg)
# failed_flag = ('not in range' in msg)
#
#
# if failed_flag:
# print('Trace for GTT PRESERVE rows: ' + gtt_sav_trace)
# print('Trace for GTT DELETE rows: ' + gtt_del_trace)
#
#
# # CLEANUP
# #########
# cleanup( [i.name for i in (f_trc_cfg, f_trc_log, f_trc_err, f_stat_log, f_stat_err, f_trc_lst) ] )
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """
Check ratio_fetches_to_datapages_for_GTT_DELETE_ROWS: OK
@ -445,11 +449,126 @@ expected_stdout_1 = """
Check ratio_marks_to_datapages_for_GTT_PRESERVE_ROWS: OK
Check ratio_marks_to_row_count_for_GTT_DELETE_ROWS: OK
Check ratio_marks_to_row_count_for_GTT_PRESERVE_ROWS: OK
"""
"""
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' log_transactions = true',
' print_perf = true',
#' log_connections = true',
#' log_procedure_start = true',
#' log_procedure_finish = true',
' log_initfini = false',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line)
@pytest.mark.version('>=2.5.2')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action, capsys):
NUM_ROWS_TO_BE_ADDED = 45000
# Change FW to OFF in order to speed up initial data filling
with act_1.connect_server() as srv:
srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.ASYNC)
# Make initial data filling into PERMANENT table for retrieving later number of data pages
# (it should be the same for any kind of tables, including GTTs):
with act_1.db.connect() as con:
c = con.cursor()
c.call_procedure('sp_fill_fix_tab', [NUM_ROWS_TO_BE_ADDED])
con.commit()
#
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
#
with act_1.db.connect() as con1:
c = con1.cursor()
c.call_procedure('sp_fill_gtt_sav_rows', [NUM_ROWS_TO_BE_ADDED])
con1.rollback()
with act_1.db.connect() as con2:
c = con2.cursor()
c.call_procedure('sp_fill_gtt_del_rows', [NUM_ROWS_TO_BE_ADDED])
con2.rollback()
# Somehow sleep is necessary otherwise "sp_fill_gtt_del_rows" will not show up in trace log
time.sleep(3)
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(3.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
trace_output = capsys.readouterr().out
# Obtain statistics for table T_FIX_TAB in order to estimate numberof data pages
dp_cnt = 0
act_1.gstat(switches=['-a','-t', 'T_FIX_TAB', '-u', act_1.db.user, '-p', act_1.db.password])
for line in act_1.stdout.splitlines():
if 'data pages' in line.lower():
# Data pages: 1098, data page slots: 1098, average fill: 74% ==> 1098
dp_cnt = int(line.replace(',', ' ').split()[2])
#
gtt_sav_fetches = -1
gtt_sav_marks = -1
gtt_del_fetches = -1
gtt_del_marks = -1
gtt_del_trace = ''
gtt_sav_trace = ''
for line in trace_output.splitlines():
if 'fetch' in line:
# 2.5.7:
# ['370', 'ms,', '1100', 'read(s),', '1358', 'write(s),', '410489', 'fetch(es),', '93294', 'mark(s)']
# ['2', 'ms,', '1', 'read(s),', '257', 'write(s),', '1105', 'fetch(es),', '1102', 'mark(s)']
# 3.0.2:
# 618 ms, 1 read(s), 2210 write(s), 231593 fetch(es), 92334 mark(s)
# 14 ms, 1109 write(s), 7 fetch(es), 4 mark(s)
words = line.split()
for k in range(len(words)):
if words[k].startswith('fetch'):
if gtt_sav_fetches == -1:
gtt_sav_fetches = int(words[k-1])
gtt_sav_trace = line.strip()
else:
gtt_del_fetches = int(words[k-1])
gtt_del_trace = line.strip()
if words[k].startswith('mark'):
if gtt_sav_marks==-1:
gtt_sav_marks = int(words[k-1])
else:
gtt_del_marks = int(words[k-1])
#
check_data = {
'ratio_fetches_to_row_count_for_GTT_PRESERVE_ROWS' : (1.00 * gtt_sav_fetches / NUM_ROWS_TO_BE_ADDED, 9.1219, 5.1465),
'ratio_fetches_to_row_count_for_GTT_DELETE_ROWS' : (1.00 * gtt_del_fetches / NUM_ROWS_TO_BE_ADDED, 0.0245, 0.00015),
'ratio_marks_to_row_count_for_GTT_PRESERVE_ROWS' : (1.00 * gtt_sav_marks / NUM_ROWS_TO_BE_ADDED, 2.0732, 2.05186),
'ratio_marks_to_row_count_for_GTT_DELETE_ROWS' : (1.00 * gtt_del_marks / NUM_ROWS_TO_BE_ADDED, 0.0245, 0.000089),
'ratio_fetches_to_datapages_for_GTT_PRESERVE_ROWS' : (1.00 * gtt_sav_fetches / dp_cnt, 373.85, 209.776),
'ratio_fetches_to_datapages_for_GTT_DELETE_ROWS' : (1.00 * gtt_del_fetches / dp_cnt, 1.0063, 0.00634),
'ratio_marks_to_datapages_for_GTT_PRESERVE_ROWS' : (1.00 * gtt_sav_marks / dp_cnt, 84.9672, 83.6358),
'ratio_marks_to_datapages_for_GTT_DELETE_ROWS' : (1.00 * gtt_del_marks / dp_cnt, 1.0036, 0.00362),
}
i = 2 # FB 3+
MAX_DIFF_PERCENT = 5.0
# THRESHOLD
failed_flag = False
for k, v in sorted(check_data.items()):
msg = ('Check ' + k + ': ' +
('OK' if v[i] * ((100 - MAX_DIFF_PERCENT)/100) <= v[0] <= v[i] * (100+MAX_DIFF_PERCENT) / 100
else 'value '+str(v[0])+' not in range '+str( v[i] ) + ' +/-' + str(MAX_DIFF_PERCENT) + '%')
)
print(msg)
failed_flag = 'not in range' in msg
if failed_flag:
print('Trace for GTT PRESERVE rows: ' + gtt_sav_trace)
print('Trace for GTT DELETE rows: ' + gtt_del_trace)
#
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -9,7 +9,8 @@
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from firebird.qa import db_factory, python_act, Action
from firebird.driver import SrvStatFlag
# version: 2.5.5
# resources: None
@ -27,15 +28,23 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# runProgram('gstat',['$(DATABASE_LOCATION)bugs.core_3548.fdb','-h','-user',user_name,'-password',user_password])
# runProgram('gfix',['$(DATABASE_LOCATION)bugs.core_3548.fdb','-online','-user',user_name,'-password',user_password])
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """
Attributes force write, full shutdown
"""
"""
@pytest.mark.version('>=2.5.5')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action):
act_1.gfix(switches=['-user', act_1.db.user, '-password', act_1.db.password,
'-shut', 'full', '-force', '0', str(act_1.db.db_path)])
with act_1.connect_server() as srv:
srv.database.get_statistics(database=str(act_1.db.db_path), flags=SrvStatFlag.HDR_PAGES)
stats = srv.readlines()
act_1.gfix(switches=['-user', act_1.db.user, '-password', act_1.db.password,
'-online', str(act_1.db.db_path)])
act_1.stdout = '\n'.join(stats)
act_1.expected_stdout = expected_stdout_1
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,18 +2,18 @@
#
# id: bugs.core_3598
# title: TRACE: add statistics of actions that were after transaction finished
# decription:
# decription:
# Test verifies only FB 3.0 and above.
# Three tables are created: permanent, GTT with on commit PRESERVE rows and on commit DELETE rows.
#
#
# Trace config is created with *prohibition* of any activity related to security<N>.fdb
# but allow to log transactions-related events (commits and rollbacks) for working database.
# Trace is started before furthe actions.
#
#
# Then we launch ISQL and apply two DML for each of these tables:
# 1) insert row + commit;
# 2) insert row + rollback.
#
#
# Finally (after ISQL will finish), we stop trace and parse its log.
# For *each* table TWO lines with performance statristics must exist: both for COMMIT and ROLLBACK events.
# Checked on Windows and Linux, builds:
@ -21,14 +21,16 @@
# 4.0.0.2377 CS: 8.746s.
# 3.0.8.33420 SS: 6.784s.
# 3.0.8.33420 CS: 8.262s.
#
#
# tracker_id: CORE-3598
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
import time
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
# version: 3.0
# resources: None
@ -39,7 +41,7 @@ init_script_1 = """
recreate table tfix(id int);
recreate global temporary table gtt_ssn(id int) on commit preserve rows;
recreate global temporary table gtt_tra(id int) on commit delete rows;
"""
"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
@ -49,32 +51,32 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# import subprocess
# from subprocess import Popen
# import time
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# # Obtain engine version:
# engine = str(db_conn.engine_version)
# db_file = db_conn.database_name
# db_conn.close()
#
#
# #---------------------------------------------
#
#
# def flush_and_close(file_handle):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb'):
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -85,54 +87,54 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None
#
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
#
# #--------------------------------------------
#
#
# # NOTES ABOUT TRACE CONFIG FOR 3.0:
# # 1) Header contains `database` clause in different format vs FB 2.5: its data must be enclosed with '{' '}'
# # 2) Name and value must be separated by EQUALITY sign ('=') in FB-3 trace.conf, otherwise we get runtime error:
# # element "<. . .>" have no attribute value set
#
#
# txt30 = '''# Trace config, format for 3.0. Generated auto, do not edit!
# database='%[\\\\\\\\/](security[[:digit:]].fdb)|(security.db)
# {
# enabled = false
# }
#
#
# database=%[\\\\\\\\/]bugs.core_3598.fdb
# {
# enabled = true
# time_threshold = 0
# time_threshold = 0
# log_initfini = false
# log_transactions = true
# # log_statement_finish = true
# print_perf = true
# }
# '''
#
#
# f_trccfg=open( os.path.join(context['temp_directory'],'tmp_trace_3598.cfg'), 'w')
# if engine.startswith('2.5'):
# f_trccfg.write(txt25)
# else:
# f_trccfg.write(txt30)
# flush_and_close( f_trccfg )
#
#
# #####################################################
# # Starting trace session in new child process (async.):
#
#
# f_trclog = open( os.path.join(context['temp_directory'],'tmp_trace_3598.log'), 'w')
# # Execute a child program in a new process, redirecting STDERR to the same target as of STDOUT:
# p_trace=Popen([context['fbsvcmgr_path'], "localhost:service_mgr", "action_trace_start", "trc_cfg", f_trccfg.name], stdout=f_trclog, stderr=subprocess.STDOUT)
#
#
# # Wait! Trace session is initialized not instantly!
# time.sleep(1)
#
#
# #####################################################
# # Running ISQL with test commands:
#
#
# sqltxt='''
# set autoddl off;
# set echo on;
@ -152,31 +154,31 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# insert into gtt_tra(id) values(2);
# rollback;
# ''' % dict(globals(), **locals())
#
#
# f_run_sql = open( os.path.join(context['temp_directory'],'tmp_run_5685.sql'), 'w')
# f_run_sql.write( sqltxt )
# flush_and_close( f_run_sql )
#
#
# f_run_log = open( os.path.join(context['temp_directory'],'tmp_run_5685.log'), 'w')
#
#
# subprocess.call( [ context['isql_path'],'-q', '-i', f_run_sql.name ],
# stdout = f_run_log,
# stderr = subprocess.STDOUT
# )
#
#
# flush_and_close( f_run_log )
#
#
# # do NOT remove this otherwise trace log can contain only message about its start before being closed!
# time.sleep(2)
#
#
# #####################################################
# # Getting ID of launched trace session and STOP it:
#
#
# # Save active trace session info into file for further parsing it and obtain session_id back (for stop):
# f_trclst=open( os.path.join(context['temp_directory'],'tmp_trace_3598.lst'), 'w')
# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr", "action_trace_list"],stdout=f_trclst, stderr=subprocess.STDOUT)
# flush_and_close( f_trclst )
#
#
# trcssn=0
# with open( f_trclst.name,'r') as f:
# for line in f:
@ -187,35 +189,35 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# trcssn=word
# i=i+1
# break
#
#
# # Result: `trcssn` is ID of active trace session. Now we have to terminate it:
# f_trclst=open(f_trclst.name,'a')
# f_trclst.seek(0,2)
# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr", "action_trace_stop","trc_id",trcssn],stdout=f_trclst, stderr=subprocess.STDOUT)
# flush_and_close( f_trclst )
#
#
# # 23.02.2021. DELAY FOR AT LEAST 1 SECOND REQUIRED HERE!
# # Otherwise trace log can remain empty.
# time.sleep(1)
#
#
# # Terminate child process of launched trace session (though it should already be killed):
# p_trace.terminate()
# flush_and_close( f_trclog )
#
#
# ###################################################################
#
#
# # Output log of trace session, with filtering only interested info:
#
# # Pwerformance header text (all excessive spaces will be removed before comparison - see below):
#
# # Performance header text (all excessive spaces will be removed before comparison - see below):
# perf_header='Table Natural Index Update Insert Delete Backout Purge Expunge'
#
#
# checked_events= {
# ') COMMIT_TRANSACTION' : 'commit'
# ,') ROLLBACK_TRANSACTION' : 'rollback'
# ,') EXECUTE_STATEMENT' : 'execute_statement'
# ,') START_TRANSACTION' : 'start_transaction'
# }
#
#
# i,k = 0,0
# watched_event = ''
# with open( f_trclog.name,'r') as f:
@ -223,7 +225,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# k += 1
# e = ''.join( [v.upper() for x,v in checked_events.items() if x in line] )
# watched_event = e if e else watched_event
#
#
# if ' ms,' in line and ('fetch' in line or 'mark' in line): # One of these *always* must be in trace statistics.
# print('Statement statistics detected for %s' % watched_event)
# i = i +1
@ -231,17 +233,38 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# print('Found performance block header')
# if line.startswith('TFIX') or line.startswith('GTT_SSN') or line.startswith('GTT_TRA'):
# print('Found table statistics for %s' % line.split()[0] )
#
#
# # Cleanup:
# ##########
# time.sleep(1)
# cleanup( (f_trccfg, f_trclst,f_trclog,f_run_log,f_run_sql) )
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
test_script_1 = """
set autoddl off;
set echo on;
set count on;
set bail on;
insert into tfix(id) values(1);
commit;
insert into tfix(id) values(2);
rollback;
insert into gtt_ssn(id) values(1);
commit;
insert into gtt_ssn(id) values(2);
rollback;
insert into gtt_tra(id) values(1);
commit;
insert into gtt_tra(id) values(2);
rollback;
"""
expected_stdout_1 = """
Statement statistics detected for COMMIT
Statement statistics detected for COMMIT
Statement statistics detected for ROLLBACK
Found performance block header
@ -252,11 +275,67 @@ expected_stdout_1 = """
Found table statistics for GTT_SSN
Statement statistics detected for COMMIT
Statement statistics detected for ROLLBACK
"""
"""
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' log_transactions = true',
' print_perf = true',
#' log_connections = true',
#' log_procedure_start = true',
#' log_procedure_finish = true',
' log_initfini = false',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line)
@pytest.mark.version('>=3.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action, capsys):
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
#
act_1.isql(switches=[], input=test_script_1)
# do NOT remove this otherwise trace log can contain only message about its start before being closed!
time.sleep(3)
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(3.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
trace_output = capsys.readouterr().out
# Output log of trace session, with filtering only interested info:
# Performance header text (all excessive spaces will be removed before comparison - see below):
perf_header='Table Natural Index Update Insert Delete Backout Purge Expunge'
checked_events= {') COMMIT_TRANSACTION': 'commit',
') ROLLBACK_TRANSACTION': 'rollback',
') EXECUTE_STATEMENT': 'execute_statement',
') START_TRANSACTION': 'start_transaction'
}
i, k = 0, 0
watched_event = ''
for line in trace_output.splitlines():
k += 1
e = ''.join([v.upper() for x, v in checked_events.items() if x in line])
watched_event = e if e else watched_event
if ' ms,' in line and ('fetch' in line or 'mark' in line): # One of these *always* must be in trace statistics.
print(f'Statement statistics detected for {watched_event}')
i += 1
if ' '.join(line.split()).upper() == ' '.join(perf_header.split()).upper():
print('Found performance block header')
if line.startswith('TFIX') or line.startswith('GTT_SSN') or line.startswith('GTT_TRA'):
print(f'Found table statistics for {line.split()[0]}')
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,8 +2,8 @@
#
# id: bugs.core_3614
# title: Plan returned for query with recursive CTE return wrong count of parenthesis
# decription:
#
# decription:
#
# tracker_id: CORE-3614
# min_versions: ['3.0.0']
# versions: 3.0
@ -34,37 +34,37 @@ init_script_1 = """
insert into test_tree values ('5', '4');
insert into test_tree values ('6', '2');
commit;
"""
"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
# import os
# import subprocess
# import time
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# #---------------------------------------------
#
#
# def flush_and_close(file_handle):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb'):
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -72,9 +72,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# os.remove( f_names_list[i] )
# if os.path.isfile( f_names_list[i]):
# print('ERROR: can not remove file ' + f_names_list[i])
#
#
# #--------------------------------------------
#
#
# sql='''
# set planonly;
# with recursive
@ -83,9 +83,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# select tt.id as a, cast(tt.id as varchar(100)) as asum
# from test_tree tt
# where tt.id_header is null
#
#
# union all
#
#
# select tt.id as a, rt.asum || '_' || tt.id
# from test_tree tt join r_tree rt on rt.a = tt.id_header
# )
@ -96,32 +96,51 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# f_isql_cmd=open( os.path.join(context['temp_directory'],'tmp_isql_3614.sql'), 'w')
# f_isql_cmd.write(sql)
# flush_and_close( f_isql_cmd )
#
#
# f_isql_log=open( os.path.join(context['temp_directory'],'tmp_isql_3614.log'), 'w')
#
#
# subprocess.call( [ context['isql_path'], dsn, '-i', f_isql_cmd.name], stdout=f_isql_log, stderr=subprocess.STDOUT)
#
#
# flush_and_close( f_isql_log )
#
#
# # Let buffer be flushed on disk before we open log and parse it:
# time.sleep(1)
#
#
# # For every line which contains word 'PLAN' we count number of '(' and ')' occurences: they must be equal.
# # We display difference only when it is not so, thus 'expected_stdout' section must be EMPTY.
# with open( f_isql_log.name,'r') as f:
# for line in f:
# if 'PLAN' in line and line.count( '(' ) - line.count( ')' ) != 0:
# print( 'Difference in opening vs close parenthesis: ' + str( line.count( '(' ) - line.count( ')' ) ) )
#
#
# cleanup( [i.name for i in (f_isql_cmd, f_isql_log)] )
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
test_script_1 = """
set planonly;
with recursive
r_tree as
(
select tt.id as a, cast(tt.id as varchar(100)) as asum
from test_tree tt
where tt.id_header is null
union all
select tt.id as a, rt.asum || '_' || tt.id
from test_tree tt join r_tree rt on rt.a = tt.id_header
)
select * from r_tree rt2 join test_tree tt2 on tt2.id=rt2.a ;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
@pytest.mark.version('>=3.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action):
act_1.execute()
for line in act_1.stdout.splitlines():
if 'PLAN' in line and (line.count('(') - line.count(')') != 0):
pytest.fail(f"Difference in opening vs close parenthesis: {line.count('(') - line.count(')')}")

View File

@ -2,24 +2,24 @@
#
# id: bugs.core_3625
# title: MON$IO_STATS doesn't report page writes performed asynchronously (at the AST level)
# decription:
# decription:
# Thanks to dimitr for suggestions about how this test can be implemented.
# We have to read some part of data from table by "att_watcher", make small change in this table
# by "att_worker" and finaly do commit + once again read this table in "att_watcher".
# Counter mon$page_writes that will be obtained twise by "att_watcher" (before its two changes)
# must differ.
#
#
# ::: NOTE-1 :::
# We have to analyze counter mon$page_writes for attachment that is called "att_watcher" here,
# despite that it does NOT change any in queried table!
#
#
# ::: NOTE-2 :::
# Superserver should *not* be processed by this test because page cache is shared between all
# Superserver should *not* be processed by this test because page cache is shared between all
# attachments thus counter mon$page_writes is NOT changed in this scenario.
# For this reason in SS we can only "simulate" proper outcome.
# We define server mode (SS/SC/CS) by queries to mon$ tables ana analyzing results, result will
# be stored in variable 'fba'.
#
#
# Checked on:
# 4.0.0.1740 SC: 1.228s.
# 4.0.0.1714 CS: 8.047s.
@ -27,78 +27,92 @@
# 3.0.6.33236 CS: 1.372s.
# 2.5.9.27149 SC: 0.218s.
# 2.5.9.27143 CS: 0.645s.
#
#
#
#
# tracker_id: CORE-3625
# min_versions: ['2.5.2']
# versions: 2.5.2
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from firebird.qa import db_factory, python_act, Action
from firebird.driver import DbInfoCode
# version: 2.5.2
# resources: None
substitutions_1 = [('[ ]+', ' ')]
init_script_1 = """"""
init_script_1 = """
recreate view v_check as
select i.mon$page_writes as iostat_pg_writes
from mon$attachments a
left join mon$io_stats i on a.mon$stat_id = i.mon$stat_id
where
a.mon$attachment_id <> current_connection
and a.mon$remote_protocol is not null
and i.mon$stat_group = 1 -- <<< ATTACHMENTS level
;
recreate table test(x int) ;
insert into test(x) values(1) ;
"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
# import os
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
# db_conn.close()
#
#
# v_check_ddl='''
# recreate view v_check as
# select i.mon$page_writes as iostat_pg_writes
# from mon$attachments a
# left join mon$io_stats i on a.mon$stat_id = i.mon$stat_id
# where
# where
# a.mon$attachment_id <> current_connection
# and a.mon$remote_protocol is not null
# and i.mon$stat_group = 1 -- <<< ATTACHMENTS level
# ;
# '''
#
#
# # Connection-"worker":
# con1=fdb.connect( dsn = dsn )
# #print(con1.firebird_version)
#
#
# con1.execute_immediate( v_check_ddl )
# con1.commit()
# con1.execute_immediate('recreate table test(x int)')
# con1.commit()
# con1.execute_immediate('insert into test(x) values(1)')
# con1.commit()
#
#
# #-------------------------------------------------------
#
#
# # Connection-"watcher":
# con2=fdb.connect( dsn = dsn )
#
#
# ###############################################################
# ### G E T S E R V E R M O D E: S S / S C / C S ? ###
# ###############################################################
# cur1 = con1.cursor()
#
#
# sql_mon_query='''
# select count(distinct a.mon$server_pid), min(a.mon$remote_protocol), max(iif(a.mon$remote_protocol is null,1,0))
# from mon$attachments a
# where a.mon$attachment_id in (%s, %s) or upper(a.mon$user) = upper('%s')
# ''' % (con1.attachment_id, con2.attachment_id, 'cache writer')
#
#
# cur1.execute( sql_mon_query )
# for r in cur1.fetchall():
# server_cnt=r[0]
# server_pro=r[1]
# cache_wrtr=r[2]
#
#
# if server_pro == None:
# fba='Embedded'
# elif cache_wrtr == 1:
@ -106,23 +120,23 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# elif server_cnt == 2:
# fba='CS'
# else:
#
#
# f1=con1.db_info(fdb.isc_info_fetches)
#
#
# cur2=con2.cursor()
# cur2.execute('select 1 from rdb$database')
# for r in cur2.fetchall():
# pass
#
#
# f2=con1.db_info(fdb.isc_info_fetches)
#
#
# fba = 'SC' if f1 ==f2 else 'SS'
#
#
# #print('Server mode: ', fba)
# cur1.close()
#
#
# #########################################################
#
#
# if fba == 'SS':
# ### !!!!!!!!!!!!! ###
# ################## A C H T U N G ########################
@ -133,47 +147,89 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# #############################################################
# page_writes_at_point_2, page_writes_at_point_1 = 0,1
# else:
#
#
# # Do following in connection-WATCHER:
# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# cur2=con2.cursor()
# con1.execute_immediate('update test set x = 2 rows 1')
#
#
# cur2.execute('select * from v_check') # get FIRST value of mon$page_writes
# for r in cur2:
# page_writes_at_point_1 = r[0]
#
#
# # Again do in connection-worker: add small change to the data,
# # otherwise watcher will not get any difference in mon$page_writes:
# con1.execute_immediate('update test set x = 3 rows 1')
#
#
# cur2.execute('select * from test')
# for r in cur2:
# pass # print('query data:', r[0])
#
#
# con2.commit()
# cur2.execute('select * from v_check') # get SECOND value of mon$page_writes
# for r in cur2:
# page_writes_at_point_2 = r[0]
# cur2.close()
#
#
#
#
# con2.close()
# con1.close()
#
#
# print('PAGE_WRITES DIFFERENCE SIGN: ', abs(page_writes_at_point_2 - page_writes_at_point_1) )
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
PAGE_WRITES DIFFERENCE SIGN: 1
"""
act_1 = python_act('db_1', substitutions=substitutions_1)
@pytest.mark.version('>=2.5.2')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action):
with act_1.db.connect() as worker_con, act_1.db.connect() as watcher_con:
worker = worker_con.cursor()
watcher = watcher_con.cursor()
sql_mon_query = f'''
select count(distinct a.mon$server_pid), min(a.mon$remote_protocol), max(iif(a.mon$remote_protocol is null,1,0))
from mon$attachments a
where a.mon$attachment_id in ({worker_con.info.id}, {watcher_con.info.id}) or upper(a.mon$user) = upper('cache writer')
'''
worker.execute(sql_mon_query)
server_cnt, server_pro, cache_wrtr = worker.fetchone()
if server_pro is None:
fba = 'Embedded'
elif cache_wrtr == 1:
fba = 'SS'
elif server_cnt == 2:
fba = 'CS'
else:
f1 = worker_con.info.get_info(DbInfoCode.FETCHES)
watcher.execute('select 1 from rdb$database')
watcher.fetchall()
f1 = worker_con.info.get_info(DbInfoCode.FETCHES)
fba = 'SC' if f1 ==f2 else 'SS'
#
if fba == 'SS':
# SUPERSERVER SHOULD *NOT* BE PROCESSED BY THIS TEST
# COUNTER MON$PAGE_WRITES IS NOT CHANGED DURING RUN,
# SO WE CAN ONLY "SIMULATE" PROPER OUTCOME FOR THIS!
page_writes_at_point_2, page_writes_at_point_1 = 0, 1
else:
# Do following in connection-WATCHER:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
worker_con.execute_immediate('update test set x = 2 rows 1')
watcher.execute('select * from v_check') # get FIRST value of mon$page_writes
page_writes_at_point_1 = watcher.fetchone()[0]
# Again do in connection-worker: add small change to the data,
# otherwise watcher will not get any difference in mon$page_writes:
worker_con.execute_immediate('update test set x = 3 rows 1')
watcher.execute('select * from test')
watcher.fetchall()
watcher_con.commit()
watcher.execute('select * from v_check') # get SECOND value of mon$page_writes
page_writes_at_point_2 = watcher.fetchone()[0]
# PAGE_WRITES DIFFERENCE SIGN: 1
assert abs(page_writes_at_point_2 - page_writes_at_point_1) == 1

View File

@ -2,14 +2,14 @@
#
# id: bugs.core_3658
# title: FBSVCMGR connects to server as OS user name rather than value of ISC_USER environment variable
# decription:
# decription:
# ### W A R N I N G ###
# 1) This test uses asynchronous call of external routine (fbsvcmgr) using subprocess.Popen unit,
# see: subprocess.call(["fbsvcmgr", ... ], stdout=...)
# 2) It was encountered that FBSVCMGR do NOT wait for OS completes writing of its output on disk,
# (see CORE-4896), thus forced to use delays (see calls `time.sleep()`).
# 3) Correct work was checked on: WI-V2.5.6.26963; WI-V3.0.0.32281 (SS/SC/CS).
#
#
# 01-mar-2021: re-implemented after start runs on Linux.
# Replaced substitutions with simple pattern matching check using 're' package.
# Checked on:
@ -18,7 +18,9 @@
# 3.0.8.33420 SS: 5.121s.
# 3.0.8.33420 CS: 6.649s.
# 2.5.9.27152 SC: 4.410s.
#
#
# [pcisar] 17.11.2021
# Implementation is complicated, and IMHO not worth of realization
# tracker_id: CORE-3658
# min_versions: ['2.5.2']
# versions: 2.5.2
@ -43,32 +45,32 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# from subprocess import Popen
# import time
# import re
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# # Obtain engine version:
# engine = str(db_conn.engine_version) # convert to text because 'float' object has no attribute 'startswith'
# db_file = db_conn.database_name
# db_conn.close()
#
#
# #---------------------------------------------
#
#
# def flush_and_close(file_handle):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb'):
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -76,9 +78,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# os.remove( f_names_list[i] )
# if os.path.isfile( f_names_list[i]):
# print('ERROR: can not remove file ' + f_names_list[i])
#
#
# #--------------------------------------------
#
#
# txt25 = '''# Trace config, format for 2.5. Generated auto, do not edit!
# <services>
# enabled true
@ -86,48 +88,48 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# log_errors true
# </services>
# '''
#
#
# # NOTES ABOUT TRACE CONFIG FOR 3.0:
# # 1) Header contain clauses in different format vs FB 2.5: its header's data must be enclosed with '{' '}';
# # 2) Name and value must be separated by EQUALITY sign ('=') in FB-3 trace.conf, otherwise we get runtime error:
# # element "<. . .>" have no attribute value set
# txt30 = '''# Trace config, format for 2.5. Generated auto, do not edit!
# services
# services
# {
# enabled = true
# log_services = true
# log_errors = true
# }
# '''
#
#
# f_trccfg=open( os.path.join(context['temp_directory'],'tmp_trace_3658.cfg'), 'w')
# if engine.startswith('2.5'):
# f_trccfg.write(txt25)
# else:
# f_trccfg.write(txt30)
# flush_and_close( f_trccfg )
#
#
#
#
# # ##############################################################
# # S T A R T T R A C E i n S E P A R A T E P R O C E S S
# # ##############################################################
#
#
# f_trclog=open( os.path.join(context['temp_directory'],'tmp_trace_3658.log'), 'w')
# p = Popen([ context['fbsvcmgr_path'], "localhost:service_mgr" , "action_trace_start" , "trc_cfg" , f_trccfg.name], stdout=f_trclog, stderr=subprocess.STDOUT)
# time.sleep(2)
#
#
# # ####################################################
# # G E T A C T I V E T R A C E S E S S I O N I D
# # ####################################################
# # Save active trace session info into file for further parsing it and obtain session_id back (for stop):
#
#
# f_trclst=open( os.path.join(context['temp_directory'],'tmp_trace_3658.lst'), 'w')
# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr", "action_trace_list"], stdout=f_trclst, stderr=subprocess.STDOUT)
# flush_and_close( f_trclst )
#
#
# # !!! DO NOT REMOVE THIS LINE !!!
# time.sleep(1)
#
#
# trcssn=0
# with open( f_trclst.name,'r') as f:
# for line in f:
@ -138,25 +140,25 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# trcssn=word
# i=i+1
# break
#
#
# # Result: `trcssn` is ID of active trace session. Now we have to terminate it:
#
#
# # ####################################################
# # S E N D R E Q U E S T T R A C E T O S T O P
# # ####################################################
# fn_nul = open(os.devnull, 'w')
# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr", "action_trace_stop","trc_id", trcssn], stdout=fn_nul)
# fn_nul.close()
#
#
# # 23.02.2021. DELAY FOR AT LEAST 1 SECOND REQUIRED HERE!
# # Otherwise trace log can remain empty.
# time.sleep(1)
#
#
# # Doc about Popen.terminate():
# # https://docs.python.org/2/library/subprocess.html
# # Stop the child. On Posix OSs the method sends SIGTERM to the child.
# # On Windows the Win32 API function TerminateProcess() is called to stop the child.
#
#
# # Doc about Win API TerminateProcess() function:
# # https://msdn.microsoft.com/en-us/library/windows/desktop/ms686714%28v=vs.85%29.aspx
# # The terminated process cannot exit until all pending I/O has been completed or canceled.
@ -164,11 +166,11 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # ^^^^^^^^^^^^
# p.terminate()
# flush_and_close( f_trclog )
#
#
# # Output log of trace for comparing it with expected.
# # ::: NB ::: Content if trace log is converted to UPPER case in order to reduce change of mismatching with
# # updated trace output in some future versions:
#
#
# # Windows:
# # 2.5.x: service_mgr, (Service 00000000007C9B88, SYSDBA, TCPv4:127.0.0.1/59583, C:\\FBsCin
# bsvcmgr.exe:6888)
@ -178,16 +180,16 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# bsvcmgr.exe:5616)
# # Linux:
# # service_mgr, (Service 0x7f46c4027c40, SYSDBA, TCPv4:127.0.0.1/51226, /var/tmp/fb40tmp/bin/fbsvcmgr:20947)
#
#
# p=re.compile('service_mgr,[ ]+\\(\\s*Service[ ]+\\S+[,]?[ ]+sysdba[,]?', re.IGNORECASE)
# with open( f_trclog.name,'r') as f:
# for line in f:
# if p.search(line):
# print('Expected line found.')
#
#
# cleanup( [i.name for i in (f_trccfg, f_trclog, f_trclst) ] )
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
@ -195,7 +197,7 @@ expected_stdout_1 = """
Expected line found.
Expected line found.
Expected line found.
Expected line found.
Expected line found.
"""
@pytest.mark.version('>=2.5.2')

View File

@ -2,7 +2,7 @@
#
# id: bugs.core_3732
# title: Segfault when closing attachment to database
# decription:
# decription:
# Confirmed bug on: WI-V2.5.1.26351. Works fine on WI-V2.5.2.26540
# On 2.5.1:
# 1) test finished with:
@ -10,21 +10,24 @@
# Test cleanup: Exception raised while dropping database.
# FAILED (errors=1)
# 2) firebird.log did contain:
# REMOTE INTERFACE/gds__detach: Unsuccesful detach from database.
# REMOTE INTERFACE/gds__detach: Unsuccesful detach from database.
# Uncommitted work may have been lost
#
#
# tracker_id: CORE-3732
# min_versions: ['2.5.2']
# versions: 2.5.2
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from difflib import unified_diff
from firebird.qa import db_factory, python_act, Action
# version: 2.5.2
# resources: None
substitutions_1 = [('STATEMENT FAILED, SQLSTATE = HY000', ''), ('RECORD NOT FOUND FOR USER: TMP\\$C3732', ''), ('AFTER LINE.*', '')]
substitutions_1 = [('STATEMENT FAILED, SQLSTATE = HY000', ''),
('RECORD NOT FOUND FOR USER: TMP\\$C3732', ''),
('AFTER LINE.*', '')]
init_script_1 = """"""
@ -37,32 +40,32 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# import subprocess
# import time
# import difflib
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# # Obtain engine version:
# engine = str(db_conn.engine_version) # convert to text because 'float' object has no attribute 'startswith'
# db_file = db_conn.database_name
# db_conn.close()
#
#
# #---------------------------------------------
#
#
# def flush_and_close(file_handle):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb'):
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -70,33 +73,33 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# os.remove( f_names_list[i] )
# if os.path.isfile( f_names_list[i]):
# print('ERROR: can not remove file ' + f_names_list[i])
#
#
# #--------------------------------------------
#
#
# def svc_get_fb_log( engine, f_fb_log ):
#
#
# import subprocess
#
#
# if engine.startswith('2.5'):
# get_firebird_log_key='action_get_ib_log'
# else:
# get_firebird_log_key='action_get_fb_log'
#
#
# subprocess.call([ context['fbsvcmgr_path'],
# "localhost:service_mgr",
# get_firebird_log_key
# ],
# stdout=f_fb_log, stderr=subprocess.STDOUT
# )
#
#
# return
#
#
# #--------------------------------------------
#
#
# f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_3732_fblog_before.txt'), 'w')
# svc_get_fb_log( engine, f_fblog_before )
# flush_and_close( f_fblog_before )
#
#
# sql_ddl='''
# drop user tmp$c3732;
# commit;
@ -105,65 +108,80 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# grant repl_admin to tmp$c3732;
# revoke all on all from tmp$c3732;
# drop user tmp$c3732;
# exit;
# exit;
# '''
#
#
# f_ddl_sql = open( os.path.join(context['temp_directory'],'tmp_ddl_3732.sql'), 'w')
# f_ddl_sql.write(sql_ddl)
# flush_and_close( f_ddl_sql )
#
#
# f_ddl_log = open( os.path.join(context['temp_directory'],'tmp_ddl_3732.log'), 'w')
# subprocess.call( [ context['isql_path'], dsn, "-q", "-i",f_ddl_sql.name ],
# stdout=f_ddl_log,
# stderr=subprocess.STDOUT
# )
# flush_and_close( f_ddl_log )
#
#
# f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_3732_fblog_after.txt'), 'w')
# svc_get_fb_log( engine, f_fblog_after )
# flush_and_close( f_fblog_after )
#
#
# # Now we can compare two versions of firebird.log and check their difference.
#
#
# oldfb=open(f_fblog_before.name, 'r')
# newfb=open(f_fblog_after.name, 'r')
#
#
# difftext = ''.join(difflib.unified_diff(
# oldfb.readlines(),
# oldfb.readlines(),
# newfb.readlines()
# ))
# oldfb.close()
# newfb.close()
#
#
# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_3732_diff.txt'), 'w')
# f_diff_txt.write(difftext)
# flush_and_close( f_diff_txt )
#
#
# # This should be empty:
# #######################
# with open( f_diff_txt.name,'r') as f:
# for line in f:
# print( line.upper() )
#
#
# # This should be empty:
# #######################
# with open( f_ddl_log.name,'r') as f:
# for line in f:
# print(line.upper())
#
#
# # CLEANUP
# #########
# time.sleep(1)
# cleanup( [i.name for i in (f_fblog_before, f_ddl_sql, f_ddl_log, f_fblog_after, f_diff_txt) ] )
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
test_script_1 = """
create role REPL_ADMIN;
create user tmp$c3732 password '12345';
grant repl_admin to tmp$c3732;
revoke all on all from tmp$c3732;
drop user tmp$c3732;
drop role REPL_ADMIN;
exit;
"""
@pytest.mark.version('>=2.5.2')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action):
with act_1.connect_server() as srv:
srv.info.get_log()
log_before = srv.readlines()
act_1.isql(switches=['-q'], input=test_script_1)
with act_1.connect_server() as srv:
srv.info.get_log()
log_after = srv.readlines()
assert list(unified_diff(log_before, log_after)) == []

View File

@ -2,17 +2,19 @@
#
# id: bugs.core_3779
# title: Report OS user name in MON$ATTACHMENTS
# decription:
# decription:
# We compare values in mon$attachment with those that can be obtained using pure Python calls (without FB).
# NB: on Windows remote_os_user contains value in lower case ('zotov'), exact value was: 'Zotov'.
#
#
# tracker_id: CORE-3779
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
import socket
import getpass
from firebird.qa import db_factory, python_act, Action
# version: 3.0
# resources: None
@ -25,11 +27,11 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
# import os
# import socket
# import getpass
#
#
# cur=db_conn.cursor()
# cur.execute('select mon$remote_host, mon$remote_os_user from mon$attachments where mon$attachment_id=current_connection')
# for r in cur:
@ -37,25 +39,25 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# print('Check of remote_host: passed')
# else:
# print('FAILED check remote_host: got |'+r[0]+'| instead of |'+socket.gethostname()+'|')
#
#
# if r[1].upper() == getpass.getuser().upper():
# print('Check of remote_os_user: passed')
# else:
# print('FAILED check remote_os_user: got |'+r[1]+'| instead of |'+getpass.getuser()+'|')
#
#
# cur.close()
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
Check of remote_host: passed
Check of remote_os_user: passed
"""
act_1 = python_act('db_1', substitutions=substitutions_1)
@pytest.mark.version('>=3.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action):
with act_1.db.connect() as con:
c = con.cursor()
c.execute('select mon$remote_host, mon$remote_os_user from mon$attachments where mon$attachment_id=current_connection')
r = c.fetchone()
if r[0].upper() != socket.gethostname().upper():
pytest.fail(f'FAILED check remote_host: got "{r[0]}" instead of "{socket.gethostname()}"')
if r[1].upper() != getpass.getuser().upper():
pytest.fail(f'FAILED check remote_os_user: got "{r[1]}" instead of "{getpass.getuser()}"')

View File

@ -2,7 +2,7 @@
#
# id: bugs.core_3860
# title: Trace API: Faulty database filter crashes server
# decription:
# decription:
# Confirmed crash on 2.5.1.26351, got on console:
# ===
# Unable to complete network request to host "localhost".
@ -15,14 +15,14 @@
# error while parsing trace configuration
# line 8: expected closing element, got "database"
# ====
# We create array of patterns for each of these messages and search in it each line of trace STDOUT.
# We create array of patterns for each of these messages and search in it each line of trace STDOUT.
# Every line should be found in this patterns array, otherwise this is UNEXPECTED case.
# Finally, if every line will be found then we have no unexpected result and 'expected_stdout' should be EMPTY.
# Checked on:
# 2.5.8.27067: OK, 7.000s.
# 2.5.9.27107: OK, 6.438s.
# For 3.0+ we just remain test body empty (there is nothing to check because of changed trace config format).
#
#
# tracker_id: CORE-3860
# min_versions: ['2.5.2']
# versions: 3.0
@ -38,19 +38,18 @@ substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
#db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
@pytest.mark.version('>=3.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1():
# This test should PASS, as there is nothing to check because of changed trace config format
pass

View File

@ -2,16 +2,18 @@
#
# id: bugs.core_3884
# title: Server crashes on preparing empty query when trace is enabled
# decription:
# decription:
# Could reproduce crash only once. All other attempts were useless - FB lives.
#
#
# tracker_id: CORE-3884
# min_versions: ['2.5.0']
# versions: 2.5
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
from firebird.driver import DatabaseError
# version: 2.5
# resources: None
@ -24,39 +26,39 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
# import os
# import re
# import subprocess
# import time
# from fdb import services
# from subprocess import Popen
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# # Obtain engine version:
# engine = str(db_conn.engine_version) # convert to text because 'float' object has no attribute 'startswith'
# db_file = db_conn.database_name
# # do NOT: db_conn.close()
#
#
# #---------------------------------------------
#
#
# def flush_and_close(file_handle):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb'):
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -64,9 +66,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# os.remove( f_names_list[i] )
# if os.path.isfile( f_names_list[i]):
# print('ERROR: can not remove file ' + f_names_list[i])
#
#
# #--------------------------------------------
#
#
# txt25 = '''# Trace config, format for 2.5. Generated auto, do not edit!
# <database %[\\\\\\\\/]bugs.core_3884.fdb>
# enabled true
@ -77,12 +79,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# log_statement_finish true
# </database>
# '''
#
#
# # NOTES ABOUT TRACE CONFIG FOR 3.0:
# # 1) Header contains `database` clause in different format vs FB 2.5: its data must be enclosed with '{' '}'
# # 2) Name and value must be separated by EQUALITY sign ('=') in FB-3 trace.conf, otherwise we get runtime error:
# # element "<. . .>" have no attribute value set
#
#
# txt30 = '''# Trace config, format for 3.0. Generated auto, do not edit!
# database=%[\\\\\\\\/]bugs.core_3884.fdb
# {
@ -91,38 +93,38 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# log_statement_finish = true
# }
# '''
#
#
# f_trc_cfg=open( os.path.join(context['temp_directory'],'tmp_trace_3884.cfg'), 'w')
# if engine.startswith('2.5'):
# f_trc_cfg.write(txt25)
# else:
# f_trc_cfg.write(txt30)
# f_trc_cfg.close()
#
#
# # ##############################################################
# # S T A R T T R A C E i n S E P A R A T E P R O C E S S
# # ##############################################################
#
#
# f_trc_log=open( os.path.join(context['temp_directory'],'tmp_trace_3884.log'), "w")
# f_trc_err=open( os.path.join(context['temp_directory'],'tmp_trace_3884.err'), "w")
#
#
# p_trace = Popen( [ context['fbsvcmgr_path'], 'localhost:service_mgr', 'action_trace_start' , 'trc_cfg', f_trc_cfg.name],stdout=f_trc_log,stderr=f_trc_err)
#
#
# # this delay need for trace start and finish its output about invalid section in its config file:
# time.sleep(2)
#
#
# # ####################################################
# # G E T A C T I V E T R A C E S E S S I O N I D
# # ####################################################
# # Save active trace session info into file for further parsing it and obtain session_id back (for stop):
#
#
# f_trc_lst = open( os.path.join(context['temp_directory'],'tmp_trace_3884.lst'), 'w')
# subprocess.call([context['fbsvcmgr_path'], 'localhost:service_mgr', 'action_trace_list'], stdout=f_trc_lst)
# flush_and_close( f_trc_lst )
#
#
# # !!! DO NOT REMOVE THIS LINE !!!
# time.sleep(1)
#
#
# trcssn=0
# with open( f_trc_lst.name,'r') as f:
# for line in f:
@ -133,12 +135,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# trcssn=word
# i=i+1
# break
#
#
# # Result: `trcssn` is ID of active trace session. Now we have to terminate it:
#
#
#
#
# #.............................................................................
#
#
# sql_cmd=''' execute block returns(n int) as
# declare s varchar(100) = 'SELECT count(*) from rdb$database';
# begin
@ -146,10 +148,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# suspend;
# end
# '''
#
#
# #sql_cmd=' \\n \\n\\n '
# sql_cmd=''
#
#
# cur=db_conn.cursor()
# try:
# cur.execute( sql_cmd )
@ -160,11 +162,11 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# print('error text:',i)
# finally:
# cur.close()
#
#
# db_conn.commit()
# #.............................................................................
#
#
#
#
# # ####################################################
# # S E N D R E Q U E S T T R A C E T O S T O P
# # ####################################################
@ -174,31 +176,56 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# fn_nul.close()
# # DO NOT REMOVE THIS LINE:
# time.sleep(2)
#
#
#
#
# p_trace.terminate()
# flush_and_close( f_trc_log )
# flush_and_close( f_trc_err )
#
#
#
#
# # CLEANUP
# #########
# time.sleep(1)
# cleanup( [i.name for i in (f_trc_cfg, f_trc_log, f_trc_err, f_trc_lst) ] )
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
error text: Error while preparing SQL statement:
error text: - SQLCODE: -104
error text: - Unexpected end of command - line 1, column 1
"""
act_1 = python_act('db_1', substitutions=substitutions_1)
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' time_threshold = 0',
' log_statement_finish = true',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
pass # We are not interested in trace output
@pytest.mark.version('>=2.5')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action, capsys):
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
# empty query
with act_1.db.connect() as con:
c = con.cursor()
try:
c.execute('') # This may crash the server
except Exception as exc:
pass
#
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
# If we got here, the server lives so test passed

View File

@ -2,26 +2,26 @@
#
# id: bugs.core_3895
# title: High memory usage when PSQL code SELECT's from stored procedure which modified some data
# decription:
# decription:
# Test does <run_cnt> calls of selectable SP which performs DML inside itself.
# After every call we store value of db_info(fdb.isc_info_current_memory) as new element in list.
# After all calls finish we scan list for difference between adjacent elements which exceeds
# <max_mem_leak> threshold.
# Value of this threshold depends on FB engine version.
#
# On current FB versions memory usage is incremented (after every call of SP, w/o commit) by:
#
# On current FB versions memory usage is incremented (after every call of SP, w/o commit) by:
# 1) ~ 1800 bytes for 2.5.7
# 2) ~ 14500 bytes for 3.0
#
#
# Confirmed excessive memory usage on WI-V2.5.2.26540 (requires additional ~100 Kb).
#
#
# tracker_id: CORE-3895
# min_versions: ['2.5.3']
# versions: 2.5.3
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from firebird.qa import db_factory, python_act, Action
# version: 2.5.3
# resources: None
@ -57,11 +57,11 @@ init_script_1 = """
end
suspend;
end
^
^
commit
^
set term ;^
"""
"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
@ -71,44 +71,55 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# import fdb
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# # Obtain engine version:
# cur1=db_conn.cursor()
# cur1.execute("select rdb$get_context('SYSTEM','ENGINE_VERSION') as engine_version from rdb$database")
# for row in cur1:
# engine = row[0]
# cur1.close()
#
#
# sql_check="select id from sp_main"
# cur2=db_conn.cursor()
#
#
# mem_usage=[]
# run_cnt=20
#
#
# for i in range(0, run_cnt):
# cur2.execute(sql_check)
# for r in cur2:
# pass
# mem_usage.append( db_conn.db_info(fdb.isc_info_current_memory) )
# cur2.close()
# db_conn.close()
#
# db_conn.close()
#
# max_mem_leak=4096 if engine.startswith('2.5') else 16384
#
#
# for i in range(1, run_cnt):
# m0=mem_usage[i-1]
# m1=mem_usage[i]
# if m1 - m0 >= max_mem_leak:
# print('Unexpected memory leak: '+str(m1-m0)+' bytes, exceeds threshold = '+str(max_mem_leak) ) # 2.5.2: 108532 ... 108960; 2.5.7: 1192 ... 1680
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
@pytest.mark.version('>=2.5.3')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action):
run_cnt = 20
with act_1.db.connect() as con:
c = con.cursor()
mem_usage = []
for i in range(0, run_cnt):
c.execute('select id from sp_main')
c.fetchall()
mem_usage.append(con.info.current_memory)
max_mem_leak = 16384 # FB 3+
print(mem_usage)
for i in range(2, run_cnt):
m0 = mem_usage[i-1]
m1 = mem_usage[i]
if m1 - m0 >= max_mem_leak:
pytest.fail(f'Unexpected memory leak: {m1-m0} bytes, exceeds threshold = {max_mem_leak}')