mirror of
https://github.com/FirebirdSQL/firebird-qa.git
synced 2025-01-22 13:33:07 +01:00
More python tests
This commit is contained in:
parent
93a898b9ee
commit
c66b267c73
BIN
files/core_5078.zip
Normal file
BIN
files/core_5078.zip
Normal file
Binary file not shown.
@ -352,11 +352,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
RESULT_OF_REQ_COMPARE_TO_ACTUAL EXPECTED: actual values were equal to required.
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
pytest.skip("Test requires manipulation with firebird.conf")
|
||||
#pytest.fail("Test not IMPLEMENTED")
|
||||
|
@ -136,9 +136,8 @@ expected_stdout_1 = """
|
||||
user_1 = user_factory(name='TMP$C1972', password='123')
|
||||
|
||||
@pytest.mark.version('>=2.1.1')
|
||||
@pytest.mark.xfail
|
||||
def test_1(act_1: Action, user_1: User):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
pytest.skip("Test can't be implement with new Python driver")
|
||||
# This test is not possible to implement with new Python driver as it does not
|
||||
# allow to specify `forced_writes` or `reserve_space` options on connect() as tested
|
||||
# configuration options are passed to DPB only for create_database()!
|
||||
|
@ -201,8 +201,6 @@ Expected line found.
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=2.5.2')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
pytest.skip("Implementation is complicated, and IMHO not worth of realization")
|
||||
#pytest.fail("Test not IMPLEMENTED")
|
||||
|
@ -252,8 +252,7 @@ def test_1(act_1: Action):
|
||||
c = con.cursor()
|
||||
c.execute('update test set x = -x')
|
||||
con.commit()
|
||||
act_1.svcmgr(switches=['localhost:service_mgr', 'user', act_1.db.user,
|
||||
'password', act_1.db.password, 'action_db_stats', 'dbname',
|
||||
act_1.svcmgr(switches=['action_db_stats', 'dbname',
|
||||
str(act_1.db.db_path), 'sts_record_versions'])
|
||||
act_1.stdout = '\n'.join([line for line in act_1.stdout.splitlines() if 'versions:' in line.lower()])
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
|
@ -2,33 +2,38 @@
|
||||
#
|
||||
# id: bugs.core_4345
|
||||
# title: Ability to trace stored functions execution
|
||||
# decription:
|
||||
# decription:
|
||||
# Test checks two cases: 1) when execution of function is ENABLED and 2) DISABLED.
|
||||
# In 1st case we search in trace log rows which prove that function execution was actually logged,
|
||||
# and in 2nd case we have to ensure that trace log does NOT contain text about this event.
|
||||
# Both standalone and packaged functions are checked.
|
||||
#
|
||||
#
|
||||
# Checked on WI-V3.0.0.32328 (SS/SC/CS); WI-T4.0.0.633
|
||||
#
|
||||
#
|
||||
# 08-may-2017.
|
||||
# Refactored: additional filtering using regexp (pattern.search(line)) in order to avoid take in account
|
||||
# start transaction events in trace (number of starting Tx became differ since 29-mar-2017 when some changes
|
||||
# in PIPE mechanism were done, see:
|
||||
# Refactored: additional filtering using regexp (pattern.search(line)) in order to avoid take in account
|
||||
# start transaction events in trace (number of starting Tx became differ since 29-mar-2017 when some changes
|
||||
# in PIPE mechanism were done, see:
|
||||
# https://github.com/FirebirdSQL/firebird/commit/e1232d8015b199e33391dd2550e7c5f7e3f08493 )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4345
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
import time
|
||||
import re
|
||||
from threading import Thread, Barrier
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('^((?!PARAM0|EXECUTE_FUNCTION_START|EXECUTE_FUNCTION_FINISH|SA_FUNC|PG_FUNC).)*$', ''), ('LOG_FUNC_ENABLED.*EXECUTE_FUNCTION_START', 'LOG_FUNC_ENABLED EXECUTE_FUNCTION_START'), ('LOG_FUNC_ENABLED.*EXECUTE_FUNCTION_FINISH', 'LOG_FUNC_ENABLED EXECUTE_FUNCTION_FINISH')]
|
||||
substitutions_1 = [('^((?!PARAM0|EXECUTE_FUNCTION_START|EXECUTE_FUNCTION_FINISH|SA_FUNC|PG_FUNC).)*$', ''),
|
||||
('LOG_FUNC_ENABLED.*EXECUTE_FUNCTION_START', 'LOG_FUNC_ENABLED EXECUTE_FUNCTION_START'),
|
||||
('LOG_FUNC_ENABLED.*EXECUTE_FUNCTION_FINISH', 'LOG_FUNC_ENABLED EXECUTE_FUNCTION_FINISH')]
|
||||
|
||||
init_script_1 = """
|
||||
set term ^;
|
||||
@ -64,41 +69,41 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# from subprocess import Popen
|
||||
# import shutil
|
||||
# import re
|
||||
#
|
||||
#
|
||||
# trace_timestamp_prefix='[.*\\s+]*20[0-9]{2}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{3,4}\\s+\\(.+\\)'
|
||||
# func_start_ptn=re.compile( trace_timestamp_prefix + '\\s+(FAILED){0,1}\\s*EXECUTE_FUNCTION_START$', re.IGNORECASE)
|
||||
# func_finish_ptn=re.compile( trace_timestamp_prefix + '\\s+(FAILED){0,1}\\s*EXECUTE_FUNCTION_FINISH$', re.IGNORECASE)
|
||||
# func_name_ptn=re.compile('Function\\s+(SA_FUNC|PG_TEST.PG_FUNC):$')
|
||||
# func_param_prn=re.compile('param[0-9]+\\s+=\\s+', re.IGNORECASE)
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# # Minimal delay after we issue command fbsvcmgr action_trace_start
|
||||
# # and before we launch execution of checked code
|
||||
# ###########################################
|
||||
# min_delay_after_trace_start = 1
|
||||
#
|
||||
#
|
||||
# # Minimal delay after we finish connection to database
|
||||
# # and before issuing command to stop trace
|
||||
# ##########################################
|
||||
# min_delay_before_trace_stop = 1
|
||||
#
|
||||
#
|
||||
# # Minimal delay for trace log be flushed on disk after
|
||||
# # we issue command 'fbsvcmgr action_trace_stop':
|
||||
# ###############################
|
||||
# min_delay_after_trace_stop = 1
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# def make_trace_config( is_func_logged, trccfg_name ):
|
||||
#
|
||||
#
|
||||
# # NOTES ABOUT TRACE CONFIG FOR 3.0:
|
||||
# # 1) Header contains `database` clause in different format vs FB 2.5: its data must be enclosed with '{' '}'
|
||||
# # 2) Name and value must be separated by EQUALITY sign ('=') in FB-3 trace.conf, otherwise we get runtime error:
|
||||
# # element "<. . .>" have no attribute value set
|
||||
#
|
||||
#
|
||||
# if is_func_logged.upper() == 'TRUE':
|
||||
# txt30 = '''
|
||||
# database=%[\\\\\\\\/]bugs.core_4345.fdb
|
||||
@ -122,26 +127,26 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# log_function_start = false
|
||||
# }
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# trccfg=open( trccfg_name, 'w')
|
||||
# trccfg.write(txt30)
|
||||
# trccfg.close()
|
||||
#
|
||||
#
|
||||
# return
|
||||
#
|
||||
#
|
||||
# def stop_trace_session():
|
||||
#
|
||||
#
|
||||
# # Save active trace session info into file for further parsing it and obtain session_id back (for stop):
|
||||
# import os
|
||||
# import subprocess
|
||||
#
|
||||
#
|
||||
# f_trclst=open( os.path.join(context['temp_directory'],'tmp_trace_4345.lst'), 'w')
|
||||
# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr", "action_trace_list"],
|
||||
# stdout=f_trclst,
|
||||
# stderr=subprocess.STDOUT
|
||||
# )
|
||||
# f_trclst.close()
|
||||
#
|
||||
#
|
||||
# trcssn=0
|
||||
# with open( f_trclst.name,'r') as f:
|
||||
# for line in f:
|
||||
@ -149,7 +154,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# trcssn=line.split()[2]
|
||||
# break
|
||||
# f.close()
|
||||
#
|
||||
#
|
||||
# # Result: `trcssn` is ID of active trace session. Now we have to terminate it:
|
||||
# f_trclst=open(f_trclst.name,'a')
|
||||
# f_trclst.seek(0,2)
|
||||
@ -157,12 +162,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# stdout=f_trclst, stderr=subprocess.STDOUT
|
||||
# )
|
||||
# f_trclst.close()
|
||||
#
|
||||
#
|
||||
# os.remove(f_trclst.name)
|
||||
#
|
||||
#
|
||||
# return
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# sql_fnc='''
|
||||
# set list on;
|
||||
# set term ^;
|
||||
@ -179,25 +184,25 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# commit;
|
||||
# '''
|
||||
# # % (123, 456)
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# ##############################################################################
|
||||
# ### ###
|
||||
# ### C A S E - 1: l o g _ f u n c = t r u e ###
|
||||
# ### ###
|
||||
# ##############################################################################
|
||||
#
|
||||
#
|
||||
# # Make trace config with ENABLING logging of func. execution:
|
||||
#
|
||||
#
|
||||
# trccfg_log_enable=os.path.join(context['temp_directory'],'tmp_trace_4345_log_enable.cfg')
|
||||
#
|
||||
#
|
||||
# make_trace_config( 'true', trccfg_log_enable )
|
||||
#
|
||||
#
|
||||
# f_trclog_log_enable=open( os.path.join(context['temp_directory'],'tmp_trace_4345_log_enable.log'), 'w')
|
||||
#
|
||||
#
|
||||
# #####################################################
|
||||
# # Starting trace session in new child process (async.):
|
||||
#
|
||||
#
|
||||
# # Execute a child program in a new process, redirecting STDERR to the same target as of STDOUT:
|
||||
# p_trace=Popen( [ context['fbsvcmgr_path'], "localhost:service_mgr",
|
||||
# "action_trace_start",
|
||||
@ -206,63 +211,63 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# stdout=f_trclog_log_enable,
|
||||
# stderr=subprocess.STDOUT
|
||||
# )
|
||||
#
|
||||
#
|
||||
# # Wait _AT_LEAST_ 4..5 seconds in 2.5 because trace session is initialized not instantly.
|
||||
# # If this delay is less then 2 second then trace log will be EMPTY (got on 2.5 SS and Cs).
|
||||
# time.sleep( min_delay_after_trace_start )
|
||||
#
|
||||
#
|
||||
# ####################################################
|
||||
# # Make connection to database and perform script that
|
||||
# # Make connection to database and perform script that
|
||||
# # calls two functions: standalone and packaged:
|
||||
# ####################################################
|
||||
#
|
||||
#
|
||||
# runProgram('isql',[dsn, '-n', '-q'], sql_fnc % (123, 456) )
|
||||
#
|
||||
#
|
||||
# # do NOT remove this otherwise trace log can contain only message about its start before being closed!
|
||||
# time.sleep(min_delay_before_trace_stop)
|
||||
#
|
||||
#
|
||||
# #####################################################
|
||||
# # Getting ID of launched trace session and STOP it:
|
||||
#
|
||||
#
|
||||
# stop_trace_session()
|
||||
# time.sleep(min_delay_after_trace_stop)
|
||||
#
|
||||
#
|
||||
# # Terminate child process of launched trace session (though it should already be killed):
|
||||
# p_trace.terminate()
|
||||
# f_trclog_log_enable.close()
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# #############
|
||||
# # O U T P U T
|
||||
# #############
|
||||
#
|
||||
#
|
||||
# with open( f_trclog_log_enable.name,'r') as f:
|
||||
# for line in f:
|
||||
# if ( func_start_ptn.search(line)
|
||||
# or func_finish_ptn.search(line)
|
||||
# or func_name_ptn.search(line)
|
||||
# if ( func_start_ptn.search(line)
|
||||
# or func_finish_ptn.search(line)
|
||||
# or func_name_ptn.search(line)
|
||||
# or func_param_prn.search(line) ):
|
||||
# print('LOG_FUNC_ENABLED '+line.upper())
|
||||
# f.close()
|
||||
#
|
||||
#
|
||||
# #############################################################################
|
||||
# ### ###
|
||||
# ### C A S E - 2: l o g _ f u n c = f al s e ###
|
||||
# ### ###
|
||||
# #############################################################################
|
||||
#
|
||||
#
|
||||
# # Make trace config with DISABLING logging of func. execution:
|
||||
#
|
||||
#
|
||||
# trccfg_log_disable=os.path.join(context['temp_directory'],'tmp_trace_4345_log_disable.cfg')
|
||||
# make_trace_config( 'false', trccfg_log_disable )
|
||||
#
|
||||
#
|
||||
# #####################################################
|
||||
# # Starting trace session in new child process (async.):
|
||||
#
|
||||
#
|
||||
# # Execute a child program in a new process, redirecting STDERR to the same target as of STDOUT:
|
||||
#
|
||||
#
|
||||
# f_trclog_log_disable=open( os.path.join(context['temp_directory'],'tmp_trace_4345_log_disable.log'), 'w')
|
||||
#
|
||||
#
|
||||
# p_trace=Popen( [context['fbsvcmgr_path'], "localhost:service_mgr",
|
||||
# "action_trace_start",
|
||||
# "trc_cfg", trccfg_log_disable
|
||||
@ -270,62 +275,63 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# stdout=f_trclog_log_disable,
|
||||
# stderr=subprocess.STDOUT
|
||||
# )
|
||||
#
|
||||
#
|
||||
# # Wait _AT_LEAST_ 4..5 seconds in 2.5 because trace session is initialized not instantly.
|
||||
# # If this delay is less then 2 second then trace log will be EMPTY (got on 2.5 SS and Cs).
|
||||
#
|
||||
#
|
||||
# time.sleep( min_delay_after_trace_start )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# ####################################################
|
||||
# # Make connection to database and perform script that
|
||||
# # Make connection to database and perform script that
|
||||
# # calls two functions: standalone and packaged:
|
||||
# ####################################################
|
||||
#
|
||||
#
|
||||
# runProgram('isql',[dsn, '-n', '-q'], sql_fnc % (789, 987) )
|
||||
#
|
||||
#
|
||||
# # do NOT remove this otherwise trace log can contain only message about its start before being closed!
|
||||
# time.sleep(min_delay_before_trace_stop)
|
||||
#
|
||||
#
|
||||
# #####################################################
|
||||
# # Getting ID of launched trace session and STOP it:
|
||||
# stop_trace_session()
|
||||
# time.sleep(min_delay_after_trace_stop)
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# # Terminate child process of launched trace session (though it should already be killed):
|
||||
# p_trace.terminate()
|
||||
# f_trclog_log_disable.close()
|
||||
#
|
||||
#
|
||||
# #############
|
||||
# # O U T P U T
|
||||
# #############
|
||||
#
|
||||
#
|
||||
# with open( f_trclog_log_disable.name,'r') as f:
|
||||
# for line in f:
|
||||
# if ( func_start_ptn.search(line)
|
||||
# or func_finish_ptn.search(line)
|
||||
# or func_name_ptn.search(line)
|
||||
# if ( func_start_ptn.search(line)
|
||||
# or func_finish_ptn.search(line)
|
||||
# or func_name_ptn.search(line)
|
||||
# or func_param_prn.search(line) ):
|
||||
# print('LOG_FUNC_DISABLED '+line.upper())
|
||||
# f.close()
|
||||
#
|
||||
#
|
||||
# time.sleep(1)
|
||||
#
|
||||
#
|
||||
# ###############################
|
||||
# # Cleanup.
|
||||
#
|
||||
#
|
||||
# f_list = (f_trclog_log_enable, f_trclog_log_disable)
|
||||
# for i in range(len(f_list)):
|
||||
# if os.path.isfile(f_list[i].name):
|
||||
# os.remove(f_list[i].name)
|
||||
#
|
||||
#
|
||||
# os.remove(trccfg_log_enable)
|
||||
# os.remove(trccfg_log_disable)
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
LOG_FUNC_ENABLED 2016-02-10T15:10:43.5940 (1700:00C52280) EXECUTE_FUNCTION_START
|
||||
@ -342,11 +348,100 @@ expected_stdout_1 = """
|
||||
LOG_FUNC_ENABLED FUNCTION PG_TEST.PG_FUNC:
|
||||
LOG_FUNC_ENABLED PARAM0 = INTEGER, "456"
|
||||
LOG_FUNC_ENABLED PARAM0 = BIGINT, "207936"
|
||||
"""
|
||||
"""
|
||||
|
||||
def trace_session(act: Action, b: Barrier, logfunc: bool):
|
||||
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
|
||||
f'database=%[\\\\/]{act.db.db_path.name}',
|
||||
'{',
|
||||
' enabled = true',
|
||||
' time_threshold = 0',
|
||||
' log_errors = true',
|
||||
' log_connections = true',
|
||||
' log_transactions = true',
|
||||
]
|
||||
if logfunc:
|
||||
cfg30.append(' log_function_start = true')
|
||||
cfg30.append(' log_function_finish = true')
|
||||
cfg30.append('}')
|
||||
with act.connect_server() as srv:
|
||||
srv.trace.start(config='\n'.join(cfg30))
|
||||
b.wait()
|
||||
for line in srv:
|
||||
print(line.upper())
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, capsys):
|
||||
output = []
|
||||
trace_timestamp_prefix = '[.*\\s+]*20[0-9]{2}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{3,4}\\s+\\(.+\\)'
|
||||
func_start_ptn = re.compile(trace_timestamp_prefix + '\\s+(FAILED){0,1}\\s*EXECUTE_FUNCTION_START$', re.IGNORECASE)
|
||||
func_finish_ptn = re.compile(trace_timestamp_prefix + '\\s+(FAILED){0,1}\\s*EXECUTE_FUNCTION_FINISH$', re.IGNORECASE)
|
||||
func_name_ptn = re.compile('Function\\s+(SA_FUNC|PG_TEST.PG_FUNC):$')
|
||||
func_param_prn = re.compile('param[0-9]+\\s+=\\s+', re.IGNORECASE)
|
||||
#
|
||||
func_script = """
|
||||
set list on;
|
||||
set term ^;
|
||||
execute block as -- returns( sa_func_result bigint, pg_func_result bigint ) as
|
||||
declare sa_func_result bigint;
|
||||
declare pg_func_result bigint;
|
||||
begin
|
||||
sa_func_result = sa_func(%s);
|
||||
pg_func_result = pg_test.pg_func(%s);
|
||||
--suspend;
|
||||
end
|
||||
^
|
||||
set term ;^
|
||||
commit;
|
||||
"""
|
||||
# Case 1: Trace functions enabled
|
||||
b = Barrier(2)
|
||||
trace_thread = Thread(target=trace_session, args=[act_1, b, True])
|
||||
trace_thread.start()
|
||||
b.wait()
|
||||
try:
|
||||
act_1.isql(switches=['-n', '-q'], input=func_script % (123, 456))
|
||||
time.sleep(2)
|
||||
finally:
|
||||
with act_1.connect_server() as srv:
|
||||
for session in list(srv.trace.sessions.keys()):
|
||||
srv.trace.stop(session_id=session)
|
||||
trace_thread.join(1.0)
|
||||
if trace_thread.is_alive():
|
||||
pytest.fail('Trace thread still alive')
|
||||
#
|
||||
trace_log = capsys.readouterr().out
|
||||
for line in trace_log.splitlines():
|
||||
if (func_start_ptn.search(line)
|
||||
or func_finish_ptn.search(line)
|
||||
or func_name_ptn.search(line)
|
||||
or func_param_prn.search(line) ):
|
||||
output.append('LOG_FUNC_ENABLED ' + line.upper())
|
||||
# Case 1: Trace functions disabled
|
||||
b = Barrier(2)
|
||||
trace_thread = Thread(target=trace_session, args=[act_1, b, False])
|
||||
trace_thread.start()
|
||||
b.wait()
|
||||
try:
|
||||
act_1.isql(switches=['-n', '-q'], input=func_script % (789, 987))
|
||||
time.sleep(2)
|
||||
finally:
|
||||
with act_1.connect_server() as srv:
|
||||
for session in list(srv.trace.sessions.keys()):
|
||||
srv.trace.stop(session_id=session)
|
||||
trace_thread.join(1.0)
|
||||
if trace_thread.is_alive():
|
||||
pytest.fail('Trace thread still alive')
|
||||
#
|
||||
trace_log += capsys.readouterr().out
|
||||
for line in trace_log.splitlines():
|
||||
if (func_start_ptn.search(line)
|
||||
or func_finish_ptn.search(line)
|
||||
or func_name_ptn.search(line)
|
||||
or func_param_prn.search(line) ):
|
||||
print('LOG_FUNC_DISABLED ' + line.upper())
|
||||
# Test
|
||||
act_1.reset()
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = '\n'.join(output)
|
||||
assert act_1.clean_stderr == act_1.clean_expected_stderr
|
||||
|
@ -2,16 +2,17 @@
|
||||
#
|
||||
# id: bugs.core_4380
|
||||
# title: ISQL truncates blob when reading an empty segment
|
||||
# decription:
|
||||
# decription:
|
||||
# Checked on: 4.0.0.138 (both Windows and POSIX); 3.0.0.32484.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4380
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
import re
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
@ -35,32 +36,32 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# import os
|
||||
# import subprocess
|
||||
# import re
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# # -- NB: i'm not sure that this test properly reflects the trouble described in the ticket.
|
||||
# # -- At least on 3.0 Alpha 1, Alpha 2 and Beta 2 (31807) output is identical.
|
||||
# # -- Note that value in "BLR to Source mapping" under 'Column' was changed to reflect
|
||||
# # -- real offset from the beginning of line in THIS .fbt file (right shifted on 4 character).
|
||||
#
|
||||
#
|
||||
# sql_script=''' set blob all;
|
||||
# set list on;
|
||||
# select rdb$debug_info from rdb$procedures;
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# f_blob_sql = open( os.path.join(context['temp_directory'],'tmp_blob_4380.sql'), 'w')
|
||||
# f_blob_sql.write(sql_script)
|
||||
# f_blob_sql.close()
|
||||
#
|
||||
#
|
||||
# f_blob_log = open( os.path.join(context['temp_directory'],'tmp_blob_4380.log'), 'w')
|
||||
#
|
||||
#
|
||||
# subprocess.call( [ context['isql_path'], dsn, "-i", f_blob_sql.name],
|
||||
# stdout = f_blob_log,
|
||||
# stderr = subprocess.STDOUT
|
||||
# )
|
||||
# f_blob_log.close()
|
||||
#
|
||||
#
|
||||
# # RDB$DEBUG_INFO 1a:1e1
|
||||
# # Parameters:
|
||||
# # Number Name Type
|
||||
@ -80,20 +81,20 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# # ^ ^ ^
|
||||
# # | | |
|
||||
# # +-----------+----------+---- all of them can vary!
|
||||
#
|
||||
#
|
||||
# # Print content of log with filtering lines:we are interesting only for rows
|
||||
# # which contain words: {'Parameters', 'Number', 'Variables', 'BLR'}.
|
||||
# # For last line (with three numbers for offset, line and col) we just check
|
||||
# # matching of row to appropriate pattern.
|
||||
#
|
||||
#
|
||||
# # NB: we remove all exsessive spaces from printed lines.
|
||||
#
|
||||
#
|
||||
# pattern = re.compile("[\\s]+[0-9]+[\\s]+[0-9]+[\\s]+[0-9]+")
|
||||
#
|
||||
#
|
||||
# with open( f_blob_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# line = line.upper()
|
||||
#
|
||||
#
|
||||
# if ('PARAMETER' in line or
|
||||
# 'NUMBER' in line or
|
||||
# 'INPUT' in line or
|
||||
@ -101,24 +102,25 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# 'VARIABLE' in line or
|
||||
# 'BLR' in line):
|
||||
# print(' '.join(line.split()).upper())
|
||||
#
|
||||
#
|
||||
# if pattern.match(line):
|
||||
# print('VALUES: <OFFSET> <LINE> <COLUMN>')
|
||||
#
|
||||
#
|
||||
# ################################################
|
||||
# # Cleanup
|
||||
#
|
||||
#
|
||||
# f_list=[]
|
||||
# f_list.append(f_blob_sql)
|
||||
# f_list.append(f_blob_log)
|
||||
#
|
||||
#
|
||||
# for i in range(len(f_list)):
|
||||
# if os.path.isfile(f_list[i].name):
|
||||
# os.remove(f_list[i].name)
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
PARAMETERS:
|
||||
@ -130,11 +132,63 @@ expected_stdout_1 = """
|
||||
BLR TO SOURCE MAPPING:
|
||||
BLR OFFSET LINE COLUMN
|
||||
VALUES: <OFFSET> <LINE> <COLUMN>
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
def test_1(act_1: Action, capsys):
|
||||
# -- NB: i'm not sure that this test properly reflects the trouble described in the ticket.
|
||||
# -- At least on 3.0 Alpha 1, Alpha 2 and Beta 2 (31807) output is identical.
|
||||
# -- Note that value in "BLR to Source mapping" under 'Column' was changed to reflect
|
||||
# -- real offset from the beginning of line in THIS .fbt file (right shifted on 4 character).
|
||||
sql_script = """
|
||||
set blob all;
|
||||
set list on;
|
||||
select rdb$debug_info from rdb$procedures;
|
||||
"""
|
||||
act_1.isql(switches=[], input=sql_script)
|
||||
# RDB$DEBUG_INFO 1a:1e1
|
||||
# Parameters:
|
||||
# Number Name Type
|
||||
# --------------------------------------------------
|
||||
# 0 A_ID INPUT
|
||||
# 0 O_TXT OUTPUT
|
||||
#
|
||||
# Variables:
|
||||
# Number Name
|
||||
# -------------------------------------------
|
||||
# 0 O_TXT
|
||||
#
|
||||
# BLR to Source mapping:
|
||||
# BLR offset Line Column
|
||||
# --------------------------------
|
||||
# 42 2 79
|
||||
# ^ ^ ^
|
||||
# | | |
|
||||
# +-----------+----------+---- all of them can vary!
|
||||
|
||||
# Print content of log with filtering lines:we are interesting only for rows
|
||||
# which contain words: {'Parameters', 'Number', 'Variables', 'BLR'}.
|
||||
# For last line (with three numbers for offset, line and col) we just check
|
||||
# matching of row to appropriate pattern.
|
||||
|
||||
# NB: we remove all exsessive spaces from printed lines.
|
||||
|
||||
pattern = re.compile("[\\s]+[0-9]+[\\s]+[0-9]+[\\s]+[0-9]+")
|
||||
for line in act_1.stdout.splitlines():
|
||||
line = line.upper()
|
||||
|
||||
if ('PARAMETER' in line or
|
||||
'NUMBER' in line or
|
||||
'INPUT' in line or
|
||||
'OUTPUT' in line or
|
||||
'VARIABLE' in line or
|
||||
'BLR' in line):
|
||||
print(' '.join(line.split()).upper())
|
||||
|
||||
if pattern.match(line):
|
||||
print('VALUES: <OFFSET> <LINE> <COLUMN>')
|
||||
# Test
|
||||
act_1.reset()
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,7 +2,7 @@
|
||||
#
|
||||
# id: bugs.core_4382
|
||||
# title: User savepoints are not released on commit
|
||||
# decription:
|
||||
# decription:
|
||||
# Added separate code for 4.0: one need to be sure that all changes have been flushed on disk before we launch gstat.
|
||||
# See letter from hvlad, 02.02.2019 22:30.
|
||||
# ::: NOTE :::
|
||||
@ -12,19 +12,20 @@
|
||||
# 4.0.0.1421: OK, 3.340s. // SS, SC, CS
|
||||
# 3.0.5.33097: OK, 1.113s.
|
||||
# 2.5.9.27127: OK, 0.650s.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4382
|
||||
# min_versions: ['2.5.4']
|
||||
# versions: 4.0
|
||||
# qmid:
|
||||
# qmid:
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 4.0
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('^((?!nodes).)*$', ''), ('Root page: [0-9]+,', ''), ('Depth', 'depth')]
|
||||
substitutions_1 = [('^((?!nodes).)*$', ''),
|
||||
('Root page: [0-9]+,', ''), ('Depth', 'depth')]
|
||||
|
||||
init_script_1 = """"""
|
||||
|
||||
@ -32,12 +33,12 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# sql_prep='''
|
||||
# create table g_test (f integer);
|
||||
# create index g_ind on g_test (f);
|
||||
@ -55,21 +56,43 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
|
||||
# savepoint e;
|
||||
# update g_test set f=7;
|
||||
# commit;
|
||||
# select * from g_test;
|
||||
# select * from g_test;
|
||||
# '''
|
||||
# runProgram( 'isql',[ '-q', dsn], sql_prep ),
|
||||
# runProgram( 'gstat',['-i', dsn] )
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Root page: 203, depth: 1, leaf buckets: 1, nodes: 1
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=4.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
def test_1(act_1: Action):
|
||||
sql_scipt = """
|
||||
create table g_test (f integer);
|
||||
create index g_ind on g_test (f);
|
||||
insert into g_test values (1);
|
||||
commit;
|
||||
update g_test set f=2;
|
||||
savepoint a;
|
||||
update g_test set f=3;
|
||||
savepoint b;
|
||||
update g_test set f=4;
|
||||
savepoint c;
|
||||
update g_test set f=5;
|
||||
savepoint d;
|
||||
update g_test set f=6;
|
||||
savepoint e;
|
||||
update g_test set f=7;
|
||||
commit;
|
||||
select * from g_test;
|
||||
"""
|
||||
act_1.isql(switches=['-q'], input=sql_scipt)
|
||||
act_1.reset()
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.gstat(switches=['-i'])
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
||||
|
@ -2,16 +2,17 @@
|
||||
#
|
||||
# id: bugs.core_4386
|
||||
# title: Report more details for "object in use" errors
|
||||
# decription:
|
||||
# decription:
|
||||
# Checked on 3.0.6.33242 (intermediate build) after discuss with dimitr.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4386
|
||||
# min_versions: ['3.0.6']
|
||||
# versions: 3.0.6
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
from firebird.driver import TPB, Isolation
|
||||
|
||||
# version: 3.0.6
|
||||
# resources: None
|
||||
@ -26,72 +27,72 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
#---
|
||||
# import os
|
||||
# import subprocess
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# CUSTOM_TX_PARAMS = ( [ fdb.isc_tpb_read_committed, fdb.isc_tpb_no_rec_version, fdb.isc_tpb_nowait ] )
|
||||
#
|
||||
#
|
||||
# sql_ddl='''
|
||||
# set bail on;
|
||||
# create or alter procedure sp_worker as begin end;
|
||||
# create or alter procedure sp_test as begin end;
|
||||
# create or alter view v_test as select 1 x from rdb$database;
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# recreate table test1(id int,x int);
|
||||
# recreate table test2(id int,x int);
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# create index test1_id on test1(id);
|
||||
# commit;
|
||||
# create descending index test2_id_x_desc on test2(id,x);
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# create or alter view v_test as select id,x from test1 where id between 15 and 30;
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# set term ^;
|
||||
# create or alter procedure sp_worker(a_id int) returns(x int) as
|
||||
# begin
|
||||
# for
|
||||
# for
|
||||
# execute statement ('select v.x from v_test v where v.id = ? and exists(select * from test2 b where b.id = v.id)') (:a_id)
|
||||
# into x
|
||||
# do
|
||||
# into x
|
||||
# do
|
||||
# suspend;
|
||||
# end
|
||||
# ^
|
||||
# create or alter procedure sp_test(a_id int) returns(x int) as
|
||||
# begin
|
||||
# for
|
||||
# for
|
||||
# execute statement ('select x from sp_worker(?)') (:a_id)
|
||||
# into x
|
||||
# do
|
||||
# into x
|
||||
# do
|
||||
# suspend;
|
||||
# end
|
||||
# ^
|
||||
# set term ;^
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# insert into test1 values(11,111);
|
||||
# insert into test1 values(21,222);
|
||||
# insert into test1 values(31,333);
|
||||
# insert into test1 values(41,444);
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# insert into test2 select * from test1;
|
||||
# commit;
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# runProgram('isql', [ dsn ], sql_ddl)
|
||||
#
|
||||
#
|
||||
# con1=fdb.connect(dsn = dsn)
|
||||
# cur1=con1.cursor()
|
||||
# cur1.execute('select x from sp_test(21)')
|
||||
# for r in cur1:
|
||||
# pass
|
||||
#
|
||||
#
|
||||
# drop_commands = [
|
||||
# 'drop procedure sp_test',
|
||||
# 'drop procedure sp_worker',
|
||||
@ -100,7 +101,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# 'drop index test1_id',
|
||||
# 'drop index test2_id_x_desc'
|
||||
# ]
|
||||
#
|
||||
#
|
||||
# for i,c in enumerate(drop_commands):
|
||||
# con2=fdb.connect(dsn = dsn)
|
||||
# tx = con2.trans( default_tpb = CUSTOM_TX_PARAMS )
|
||||
@ -109,7 +110,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# #############################################
|
||||
# tx.begin()
|
||||
# cur2=tx.cursor()
|
||||
#
|
||||
#
|
||||
# try:
|
||||
# cur2.execute( c )
|
||||
# tx.commit()
|
||||
@ -119,13 +120,14 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# finally:
|
||||
# cur2.close()
|
||||
# con2.close()
|
||||
#
|
||||
#
|
||||
# cur1.close()
|
||||
# con1.close()
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Error while commiting transaction:
|
||||
@ -169,11 +171,89 @@ expected_stdout_1 = """
|
||||
- unsuccessful metadata update
|
||||
- object INDEX "TEST2_ID_X_DESC" is in use
|
||||
335544345
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0.6')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
def test_1(act_1: Action, capsys):
|
||||
ddl_script = """
|
||||
set bail on;
|
||||
create or alter procedure sp_worker as begin end;
|
||||
create or alter procedure sp_test as begin end;
|
||||
create or alter view v_test as select 1 x from rdb$database;
|
||||
commit;
|
||||
|
||||
recreate table test1(id int,x int);
|
||||
recreate table test2(id int,x int);
|
||||
commit;
|
||||
|
||||
create index test1_id on test1(id);
|
||||
commit;
|
||||
create descending index test2_id_x_desc on test2(id,x);
|
||||
commit;
|
||||
|
||||
create or alter view v_test as select id,x from test1 where id between 15 and 30;
|
||||
commit;
|
||||
|
||||
set term ^;
|
||||
create or alter procedure sp_worker(a_id int) returns(x int) as
|
||||
begin
|
||||
for
|
||||
execute statement ('select v.x from v_test v where v.id = ? and exists(select * from test2 b where b.id = v.id)') (:a_id)
|
||||
into x
|
||||
do
|
||||
suspend;
|
||||
end
|
||||
^
|
||||
create or alter procedure sp_test(a_id int) returns(x int) as
|
||||
begin
|
||||
for
|
||||
execute statement ('select x from sp_worker(?)') (:a_id)
|
||||
into x
|
||||
do
|
||||
suspend;
|
||||
end
|
||||
^
|
||||
set term ;^
|
||||
commit;
|
||||
|
||||
insert into test1 values(11,111);
|
||||
insert into test1 values(21,222);
|
||||
insert into test1 values(31,333);
|
||||
insert into test1 values(41,444);
|
||||
commit;
|
||||
|
||||
insert into test2 select * from test1;
|
||||
commit;
|
||||
"""
|
||||
act_1.isql(switches=[], input=ddl_script)
|
||||
#
|
||||
tpb = TPB(isolation=Isolation.READ_COMMITTED_NO_RECORD_VERSION, lock_timeout=0).get_buffer()
|
||||
with act_1.db.connect() as con:
|
||||
cur1 = con.cursor()
|
||||
cur1.execute('select x from sp_test(21)').fetchall()
|
||||
drop_commands = ['drop procedure sp_test',
|
||||
'drop procedure sp_worker',
|
||||
'drop view v_test',
|
||||
'drop table test2',
|
||||
'drop index test1_id',
|
||||
'drop index test2_id_x_desc']
|
||||
for cmd in drop_commands:
|
||||
with act_1.db.connect() as con2:
|
||||
tx = con2.transaction_manager(default_tpb=tpb)
|
||||
tx.begin()
|
||||
cur2 = tx.cursor()
|
||||
try:
|
||||
cur2.execute(cmd)
|
||||
except Exception as exc:
|
||||
print(exc)
|
||||
#
|
||||
act_1.reset()
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
# [pcisar] 22.11.2021
|
||||
# This test requires READ_COMMITTED_NO_RECORD_VERSION transaction to work, which
|
||||
# requires ReadConsistency disabled in FB 4. However, it does not work as expected
|
||||
# because all drop commands pass without exception even with ReadConsistency disabled.
|
||||
# Not yet tested with FB3. I also expect it FAIL due to exception differences in FDB
|
||||
# and new driver (this will be fixed once we make it to raise "object in use" exception)
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,51 +2,55 @@
|
||||
#
|
||||
# id: bugs.core_4388
|
||||
# title: SELECT WITH LOCK may enter an infinite loop for a single record
|
||||
# decription:
|
||||
# decription:
|
||||
# Caution: could not reproduce on neither WI-T3.0.0.30566 Firebird 3.0 Alpha 1 nor WI-T3.0.0.30809 Firebird 3.0 Alpha 2.
|
||||
# Any advice about how this test should be properly written will be appreciated.
|
||||
# Added separate code for 4.0 because isc_update_conflict now can be primary code of exception reason
|
||||
# (after consulting with Vlad, letter 06-aug-2018 16:27).
|
||||
#
|
||||
#
|
||||
# 01-apr-2020. Expected STDERR section for 4.0.x was changed BACK TO PREVIOUS set of messages, i.e.:
|
||||
# 1. Statement failed, SQLSTATE = 40001
|
||||
# 2. deadlock <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< THIS LINE APPEARED SINCE 4.0.0.1848
|
||||
# 3. update conflicts with concurrent update
|
||||
# 4. concurrent transaction number is ...
|
||||
# Confirmed by Alex, letter 31.03.2020 12:01.
|
||||
#
|
||||
#
|
||||
# Checked on:
|
||||
# 3.0.4.33022: OK, 5.453s.
|
||||
# 4.0.0.1158: OK, 5.313s.
|
||||
#
|
||||
#
|
||||
# 21.09.2020: removed separate section for 4.0 because error messages equal to FB 3.x. Changed 'substitution' section.
|
||||
#
|
||||
#
|
||||
# Waiting for completion of child ISQL async process is done by call <isql_PID>.wait() instead of old (and "fragile")
|
||||
# assumption about maximal time that it could last before forcedly terminate it.
|
||||
#
|
||||
#
|
||||
# Replaced direct specification of executable 'isql' with context['isql_path'] in order to remove dependence on PATH
|
||||
# (suggested by dimitr, letter 28.08.2020, 13:42; otherwise directory with isql must be added into PATH list).
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4388
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
import subprocess
|
||||
import time
|
||||
from pathlib import Path
|
||||
from firebird.qa import db_factory, python_act, Action, temp_file
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('(-)?concurrent\\s+transaction\\s+number(\\s+is)?\\s+\\d+', 'concurrent transaction'), ('After\\s+line\\s+\\d+.*', '')]
|
||||
substitutions_1 = [('(-)?concurrent\\s+transaction\\s+number(\\s+is)?\\s+\\d+', 'concurrent transaction'),
|
||||
('After\\s+line\\s+\\d+.*', '')]
|
||||
|
||||
init_script_1 = """
|
||||
create table test(id int primary key, x int);
|
||||
commit;
|
||||
insert into test values(1, 100);
|
||||
commit;
|
||||
"""
|
||||
"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
@ -57,28 +61,28 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# from subprocess import Popen
|
||||
# import time
|
||||
# import fdb
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# #-----------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close(file_handle):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# os.fsync(file_handle.fileno())
|
||||
#
|
||||
#
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -86,16 +90,16 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# os.remove( f_names_list[i] )
|
||||
# if os.path.isfile( f_names_list[i]):
|
||||
# print('ERROR: can not remove file ' + f_names_list[i])
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# att1 = fdb.connect( dsn = dsn)
|
||||
#
|
||||
# # Delete record but not yet commit - it's a time
|
||||
#
|
||||
# # Delete record but not yet commit - it's a time
|
||||
# # to make another connection:
|
||||
# att1.execute_immediate("delete from test where id = 1")
|
||||
#
|
||||
#
|
||||
# sql_cmd='''
|
||||
# set list on;
|
||||
# -- set echo on;
|
||||
@ -103,58 +107,84 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# set transaction lock timeout 20;
|
||||
# select x from test where id = 1 with lock;
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# f_select_with_lock_sql = open( os.path.join(context['temp_directory'],'tmp_4388_select_with_lock.sql'), 'w')
|
||||
# f_select_with_lock_sql.write(sql_cmd)
|
||||
# f_select_with_lock_sql.close()
|
||||
#
|
||||
#
|
||||
# #context['isql_path']
|
||||
#
|
||||
#
|
||||
# f_select_with_lock_log = open( os.path.join(context['temp_directory'],'tmp_4388_select_with_lock.log'), 'w')
|
||||
#
|
||||
#
|
||||
# p_hanged_isql=subprocess.Popen( [ context['isql_path'], dsn, "-n", "-i", f_select_with_lock_sql.name ],
|
||||
# stdout = f_select_with_lock_log,
|
||||
# stderr = subprocess.STDOUT
|
||||
# )
|
||||
#
|
||||
#
|
||||
# # Let ISQL to be loaded and establish its attachment:
|
||||
# time.sleep(2)
|
||||
#
|
||||
#
|
||||
# # Return to att1 and make COMMIT of deleted record:
|
||||
# #############
|
||||
# att1.commit()
|
||||
# att1.close()
|
||||
# #############
|
||||
#
|
||||
#
|
||||
# # Wait until ISQL complete its mission:
|
||||
# p_hanged_isql.wait()
|
||||
#
|
||||
#
|
||||
# flush_and_close(f_select_with_lock_log)
|
||||
#
|
||||
#
|
||||
# with open(f_select_with_lock_log.name,'r') as f:
|
||||
# print(f.read())
|
||||
# f.close()
|
||||
#
|
||||
#
|
||||
# ###############################
|
||||
# # Cleanup.
|
||||
# time.sleep(1)
|
||||
# f_list = [ i.name for i in (f_select_with_lock_sql, f_select_with_lock_log) ]
|
||||
# cleanup( f_list )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Statement failed, SQLSTATE = 40001
|
||||
deadlock
|
||||
-update conflicts with concurrent update
|
||||
-concurrent transaction number is 13
|
||||
"""
|
||||
"""
|
||||
|
||||
test_script_1 = temp_file('test-script.sql')
|
||||
script_out = temp_file('test-script.out')
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, test_script_1: Path, script_out: Path):
|
||||
with act_1.db.connect() as att1:
|
||||
# Delete record but not yet commit - it's a time
|
||||
# to make another connection:
|
||||
att1.execute_immediate("delete from test where id = 1")
|
||||
test_script_1.write_text("""
|
||||
set list on;
|
||||
-- set echo on;
|
||||
commit;
|
||||
set transaction lock timeout 20;
|
||||
select x from test where id = 1 with lock;
|
||||
""")
|
||||
try:
|
||||
with open(script_out, mode='w') as output:
|
||||
p_test_sql = subprocess.Popen([act_1.vars['isql'], '-n', '-i', str(test_script_1),
|
||||
'-user', act_1.db.user,
|
||||
'-password', act_1.db.password, act_1.db.dsn],
|
||||
stdout=output, stderr=subprocess.STDOUT)
|
||||
#
|
||||
time.sleep(2)
|
||||
finally:
|
||||
att1.commit()
|
||||
p_test_sql.wait()
|
||||
# Check
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = script_out.read_text()
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,14 +2,15 @@
|
||||
#
|
||||
# id: bugs.core_4398
|
||||
# title: Provide ability to specify extra-long name of log when doing gbak to avoid "attempt to store 256 bytes in a clumplet" message
|
||||
# decription:
|
||||
# decription:
|
||||
# tracker_id: CORE-4398
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid:
|
||||
# qmid:
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from pathlib import Path
|
||||
from firebird.qa import db_factory, python_act, Action, temp_file
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
@ -23,7 +24,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# test_script_1
|
||||
#---
|
||||
# import os
|
||||
#
|
||||
#
|
||||
# print ('Starting backup...')
|
||||
# fbk = os.path.join(context['temp_directory'],'backup.fbk')
|
||||
# lbk = os.path.join(context['temp_directory'],'A012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890.log')
|
||||
@ -38,9 +39,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# print ('Delete log file...')
|
||||
# os.remove(lbk)
|
||||
# print ('Log file deleted.')
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Starting backup...
|
||||
@ -49,11 +51,23 @@ expected_stdout_1 = """
|
||||
Backup file deleted.
|
||||
Delete log file...
|
||||
Log file deleted.
|
||||
"""
|
||||
"""
|
||||
|
||||
backup_file_1 = temp_file('backup.fbk')
|
||||
log_file_1 = temp_file('A012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890.log')
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, capsys, log_file_1: Path, backup_file_1: Path):
|
||||
print ('Starting backup...')
|
||||
act_1.gbak(switches=['-b', '-v', '-y', str(log_file_1), str(act_1.db.db_path), str(backup_file_1)])
|
||||
print ('Backup finished.')
|
||||
if backup_file_1.is_file():
|
||||
print ('Delete backup file...')
|
||||
backup_file_1.unlink()
|
||||
print ('Backup file deleted.')
|
||||
print ('Delete log file...')
|
||||
log_file_1.unlink()
|
||||
print ('Log file deleted.')
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,20 +2,22 @@
|
||||
#
|
||||
# id: bugs.core_4451
|
||||
# title: Allow output to trace explain plan form.
|
||||
# decription:
|
||||
# decription:
|
||||
# Checked on
|
||||
# 4.0.0.1685 SS: 7.985s.
|
||||
# 4.0.0.1685 CS: 8.711s.
|
||||
# 3.0.5.33206 SS: 7.281s.
|
||||
# 3.0.5.33206 CS: 8.278s.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4451
|
||||
# min_versions: ['3.0.0']
|
||||
# versions: 3.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
import time
|
||||
from threading import Thread, Barrier
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
@ -24,41 +26,41 @@ substitutions_1 = [('[ \t]+', ' '), ('[ \t]+[\\d]+[ \t]+ms', '')]
|
||||
|
||||
init_script_1 = """
|
||||
recreate table test(x int);
|
||||
"""
|
||||
"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import subprocess
|
||||
# from subprocess import Popen
|
||||
# import time
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# fdb_file=db_conn.database_name
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# #-----------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close(file_handle):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# os.fsync(file_handle.fileno())
|
||||
#
|
||||
#
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -66,18 +68,18 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# os.remove( f_names_list[i] )
|
||||
# if os.path.isfile( f_names_list[i]):
|
||||
# print('ERROR: can not remove file ' + f_names_list[i])
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
# # Prepare config for trace session that will be launched by call of FBSVCMGR:
|
||||
#
|
||||
#
|
||||
# txt = ''' database= # %[\\\\\\\\/]bugs.core_4451.fdb
|
||||
# {
|
||||
# enabled = true
|
||||
# time_threshold = 0
|
||||
# time_threshold = 0
|
||||
# log_initfini = false
|
||||
# print_plan = true
|
||||
# explain_plan = true
|
||||
@ -88,40 +90,40 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# f_trccfg=open( os.path.join(context['temp_directory'],'tmp_trace_4451.cfg'), 'w')
|
||||
# f_trccfg.write(txt)
|
||||
# flush_and_close( f_trccfg )
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
# # Async. launch of trace session using FBSVCMGR action_trace_start:
|
||||
#
|
||||
#
|
||||
# f_trclog=open( os.path.join(context['temp_directory'],'tmp_trace_4451.log'), 'w')
|
||||
#
|
||||
#
|
||||
# # Execute a child program in a new process, redirecting STDERR to the same target as of STDOUT:
|
||||
# p_svcmgr = Popen( [ context['fbsvcmgr_path'], "localhost:service_mgr",
|
||||
# "action_trace_start",
|
||||
# "trc_cfg", f_trccfg.name
|
||||
# ],
|
||||
# stdout=f_trclog,
|
||||
# stdout=f_trclog,
|
||||
# stderr=subprocess.STDOUT
|
||||
# )
|
||||
#
|
||||
#
|
||||
# # Wait! Trace session is initialized not instantly!
|
||||
# time.sleep(2)
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
#
|
||||
#
|
||||
# # Determine active trace session ID (for further stop):
|
||||
#
|
||||
#
|
||||
# f_trclst=open( os.path.join(context['temp_directory'],'tmp_trace_4451.lst'), 'w')
|
||||
# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr",
|
||||
# "action_trace_list"],
|
||||
# stdout=f_trclst, stderr=subprocess.STDOUT
|
||||
# )
|
||||
# flush_and_close( f_trclst )
|
||||
#
|
||||
# # Session ID: 5
|
||||
# # user:
|
||||
# # date: 2015-08-27 15:24:14
|
||||
# # flags: active, trace
|
||||
#
|
||||
#
|
||||
# # Session ID: 5
|
||||
# # user:
|
||||
# # date: 2015-08-27 15:24:14
|
||||
# # flags: active, trace
|
||||
#
|
||||
# trcssn=0
|
||||
# with open( f_trclst.name,'r') as f:
|
||||
# for line in f:
|
||||
@ -132,79 +134,115 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# trcssn=word
|
||||
# i=i+1
|
||||
# break
|
||||
#
|
||||
# # Result: `trcssn` is ID of active trace session.
|
||||
#
|
||||
# # Result: `trcssn` is ID of active trace session.
|
||||
# # We have to terminate trace session that is running on server BEFORE we termitane process `p_svcmgr`
|
||||
# if trcssn==0:
|
||||
# print("Error parsing trace session ID.")
|
||||
# flush_and_close( f_trclog )
|
||||
# else:
|
||||
# #####################################################################
|
||||
#
|
||||
#
|
||||
# # Preparing script for ISQL:
|
||||
#
|
||||
#
|
||||
# sql_cmd='''select count(*) from test;'''
|
||||
#
|
||||
#
|
||||
# so=sys.stdout
|
||||
# se=sys.stderr
|
||||
#
|
||||
#
|
||||
# sys.stdout = open(os.devnull, 'w')
|
||||
# sys.stderr = sys.stdout
|
||||
#
|
||||
#
|
||||
# runProgram('isql',[dsn],sql_cmd)
|
||||
#
|
||||
#
|
||||
# sys.stdout = so
|
||||
# sys.stderr = se
|
||||
#
|
||||
#
|
||||
# # do NOT reduce this delay!
|
||||
# time.sleep(2)
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
#
|
||||
#
|
||||
# # Stop trace session:
|
||||
#
|
||||
#
|
||||
# f_trclst=open(f_trclst.name, "a")
|
||||
# f_trclst.seek(0,2)
|
||||
# subprocess.call( [ context['fbsvcmgr_path'], "localhost:service_mgr",
|
||||
# "action_trace_stop",
|
||||
# "trc_id",trcssn
|
||||
# ],
|
||||
# stdout=f_trclst,
|
||||
# stdout=f_trclst,
|
||||
# stderr=subprocess.STDOUT
|
||||
# )
|
||||
# flush_and_close( f_trclst )
|
||||
#
|
||||
#
|
||||
# p_svcmgr.terminate()
|
||||
# flush_and_close( f_trclog )
|
||||
#
|
||||
#
|
||||
# # do NOT remove this delay:
|
||||
# time.sleep(1)
|
||||
#
|
||||
#
|
||||
# show_line = 0
|
||||
# with open(f_trclog.name) as f:
|
||||
# for line in f:
|
||||
# show_line = ( show_line + 1 if ('^' * 79) in line or show_line>0 else show_line )
|
||||
# if show_line > 1:
|
||||
# print(line)
|
||||
#
|
||||
#
|
||||
# # cleanup:
|
||||
# ##########
|
||||
# time.sleep(1)
|
||||
# cleanup( [i.name for i in (f_trclst, f_trccfg, f_trclog) ] )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Select Expression
|
||||
-> Aggregate
|
||||
-> Table "TEST" Full Scan
|
||||
"""
|
||||
"""
|
||||
|
||||
def trace_session(act: Action, b: Barrier):
|
||||
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
|
||||
f'database=%[\\\\/]{act.db.db_path.name}',
|
||||
'{',
|
||||
' enabled = true',
|
||||
' time_threshold = 0',
|
||||
' log_initfini = false',
|
||||
' print_plan = true',
|
||||
' explain_plan = true',
|
||||
' log_statement_prepare = true',
|
||||
' include_filter=%(from|join)[[:whitespace:]]test%',
|
||||
'}']
|
||||
with act.connect_server() as srv:
|
||||
srv.trace.start(config='\n'.join(cfg30))
|
||||
b.wait()
|
||||
for line in srv:
|
||||
print(line)
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, capsys):
|
||||
b = Barrier(2)
|
||||
trace_thread = Thread(target=trace_session, args=[act_1, b])
|
||||
trace_thread.start()
|
||||
b.wait()
|
||||
act_1.isql(switches=[], input='select count(*) from test;')
|
||||
time.sleep(2)
|
||||
with act_1.connect_server() as srv:
|
||||
for session in list(srv.trace.sessions.keys()):
|
||||
srv.trace.stop(session_id=session)
|
||||
trace_thread.join(1.0)
|
||||
if trace_thread.is_alive():
|
||||
pytest.fail('Trace thread still alive')
|
||||
#
|
||||
show_line = 0
|
||||
for line in capsys.readouterr().out.splitlines():
|
||||
show_line = (show_line + 1 if ('^' * 79) in line or show_line>0 else show_line)
|
||||
if show_line > 1:
|
||||
print(line)
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,23 +2,21 @@
|
||||
#
|
||||
# id: bugs.core_4461
|
||||
# title: nbackup prints error messages to stdout instead stderr
|
||||
# decription:
|
||||
# decription:
|
||||
# tracker_id: CORE-4461
|
||||
# min_versions: ['2.5.4']
|
||||
# versions: 2.5.4
|
||||
# qmid:
|
||||
# qmid:
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 2.5.4
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('Failure: Database error', '')]
|
||||
|
||||
init_script_1 = """
|
||||
-- NB: line `Failure: Database error` exists only in 2.5.x output.
|
||||
"""
|
||||
init_script_1 = """"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
@ -26,7 +24,8 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
#---
|
||||
# runProgram('nbackup',['-user','nonExistentFoo','-pas','invalidBar','-L',dsn])
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stderr_1 = """
|
||||
[
|
||||
@ -34,11 +33,13 @@ expected_stderr_1 = """
|
||||
Your user name and password are not defined. Ask your database administrator to set up a Firebird login.
|
||||
SQLCODE:-902
|
||||
]
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=2.5.4')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
def test_1(act_1: Action):
|
||||
act_1.expected_stderr = expected_stderr_1
|
||||
act_1.nbackup(switches=['-user', 'nonExistentFoo', '-password', 'invalidBar',
|
||||
'-L', act_1.db.dsn], credentials=False)
|
||||
assert act_1.clean_stderr == act_1.clean_expected_stderr
|
||||
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
#
|
||||
# id: bugs.core_4470
|
||||
# title: gbak fails to restore database containing dependency between views and packaged functions
|
||||
# decription:
|
||||
# decription:
|
||||
# Confirmed on WI-T3.0.0.30809 Firebird 3.0 Alpha 2:
|
||||
# gbak: ERROR:action cancelled by trigger (0) to preserve data integrity
|
||||
# gbak: ERROR: could not find object for GRANT
|
||||
@ -15,14 +15,16 @@
|
||||
# 4.0.0.1633 CS: 3.438s.
|
||||
# 3.0.5.33180 SS: 2.137s.
|
||||
# 3.0.5.33178 CS: 2.490s.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4470
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from pathlib import Path
|
||||
from firebird.qa import db_factory, python_act, Action, temp_file
|
||||
from firebird.driver import SrvRestoreFlag
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
@ -49,7 +51,7 @@ init_script_1 = """
|
||||
set autoddl off;
|
||||
|
||||
set term ^;
|
||||
|
||||
|
||||
create package wf as
|
||||
begin
|
||||
function cest (t dtime) returns dtime;
|
||||
@ -61,18 +63,18 @@ init_script_1 = """
|
||||
function current_xml returns dvc512;
|
||||
function relpressure(airpressureabsolute dreal,temperature dreal,altitude dreal, humidity dreal) returns dreal;
|
||||
end^
|
||||
|
||||
|
||||
set term ;^
|
||||
commit work;
|
||||
|
||||
set autoddl on;
|
||||
|
||||
|
||||
create table constants (id did generated by default as identity not null,
|
||||
typ dvc255,
|
||||
val dvc255,
|
||||
keys dbigint,
|
||||
primary key (id));
|
||||
|
||||
|
||||
create table raw (id did generated by default as identity not null,
|
||||
readtime dtime not null,
|
||||
delay dsmallint,
|
||||
@ -90,12 +92,12 @@ init_script_1 = """
|
||||
uv dsmallint,
|
||||
primary key (id),
|
||||
constraint unq1_raw unique (readtime));
|
||||
|
||||
|
||||
create table timezone (jahr dsmallint not null,
|
||||
gmt_from dtime,
|
||||
gmt_thru dtime,
|
||||
constraint pk_timezone primary key (jahr));
|
||||
|
||||
|
||||
create index raw_idx1 on raw (hum_in);
|
||||
create descending index raw_idx10 on raw (abs_pressure);
|
||||
create descending index raw_idx11 on raw (readtime);
|
||||
@ -108,7 +110,7 @@ init_script_1 = """
|
||||
create index raw_idx7 on raw (wind_gust);
|
||||
create index raw_idx8 on raw (wind_dir);
|
||||
create index raw_idx9 on raw (rain);
|
||||
|
||||
|
||||
create view meteo (timestamp_utc, timestamp_local, tempint, humint, temp, hum, wind, wind_dir, wind_gust, wind_gust_dir, dew_point, rain, rain_rate, pressure, uv_index, solar_rad) as
|
||||
select readtime, wf.fn_local_time(readtime), temp_in, hum_in, temp_out, hum_out, wind_ave / 3.6 , 22.5 * wind_dir, wind_gust / 3.6 ,
|
||||
22.5 * wind_dir, wf.dewpoint(temp_out, hum_out), cast(rain - lead(rain) over(order by readtime desc) as numeric (6,3)) , 0,
|
||||
@ -118,28 +120,28 @@ init_script_1 = """
|
||||
|
||||
set autoddl off;
|
||||
set term ^;
|
||||
|
||||
|
||||
create package body wf as
|
||||
begin
|
||||
|
||||
|
||||
function CEST (T dtime)returns dtime
|
||||
AS
|
||||
begin
|
||||
return dateadd (2 hour to t);
|
||||
end
|
||||
|
||||
|
||||
function CET (T dtime)returns dtime
|
||||
AS
|
||||
begin
|
||||
return dateadd (1 hour to t);
|
||||
end
|
||||
|
||||
|
||||
function altitude returns dreal
|
||||
as
|
||||
begin
|
||||
return (select c.val from constants c where c.typ='Altitude');
|
||||
end
|
||||
|
||||
|
||||
function fn_local_time (t dtime)returns dtime
|
||||
as
|
||||
declare variable jahr dsmallint;
|
||||
@ -156,7 +158,7 @@ init_script_1 = """
|
||||
else
|
||||
return dateadd (1 hour to t);
|
||||
end
|
||||
|
||||
|
||||
function relpressure (airpressureabsolute dreal, temperature dreal, altitude dreal, humidity dreal) returns dreal
|
||||
as
|
||||
declare variable g_n dreal;
|
||||
@ -169,7 +171,7 @@ init_script_1 = """
|
||||
declare variable e_0 dreal;
|
||||
declare variable f_rel dreal;
|
||||
declare variable e_d dreal;
|
||||
|
||||
|
||||
begin
|
||||
g_n = 9.80665;-- erdbeschleunigung (m/s^2)
|
||||
gam = 0.0065;--temperaturabnahme in k pro geopotentiellen metern (k/gpm)
|
||||
@ -183,13 +185,13 @@ init_script_1 = """
|
||||
e_d = f_rel * e_0 * exp((17.5043 * temperature) / (241.2 + temperature));--momentaner stationsdampfdruck (hpa)
|
||||
return airpressureabsolute * exp((g_n * altitude) / (r * (temperature + t_0 + c * e_d + ((gam * altitude) / 2))));
|
||||
end
|
||||
|
||||
|
||||
function yesterday returns dtime
|
||||
as
|
||||
begin
|
||||
return dateadd (-1 day to current_date);
|
||||
end
|
||||
|
||||
|
||||
function dewpoint (temp dreal, hum dsmallint)
|
||||
returns dreal
|
||||
as
|
||||
@ -204,7 +206,7 @@ init_script_1 = """
|
||||
return temp - ((100 - hum) / 5.0);
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
function current_xml returns dvc512
|
||||
as
|
||||
declare variable timestamp_utc type of column meteo.timestamp_utc;
|
||||
@ -223,16 +225,16 @@ init_script_1 = """
|
||||
declare variable pressure type of column meteo.pressure;
|
||||
declare variable uv_index type of column meteo.uv_index;
|
||||
declare variable solar_rad type of column meteo.solar_rad;
|
||||
|
||||
|
||||
begin
|
||||
|
||||
|
||||
select first 1 timestamp_utc,timestamp_local, tempint, humint, temp, hum, wind, wind_dir, wind_gust, wind_gust_dir, dew_point,
|
||||
rain, rain_rate, pressure, uv_index, solar_rad
|
||||
from meteo order by timestamp_utc desc
|
||||
into :timestamp_utc, :timestamp_local, :tempint, :humint, :temp, :hum, :wind, :wind_dir, :wind_gust, :wind_gust_dir,
|
||||
:dew_point, :rain, :rain_rate, :pressure, :uv_index, :solar_rad;
|
||||
|
||||
|
||||
|
||||
|
||||
return '<current><thInt><temp>'||
|
||||
tempint||
|
||||
'</temp><humidity>'||
|
||||
@ -255,19 +257,19 @@ init_script_1 = """
|
||||
substring (timestamp_local from 1 for 19)||
|
||||
'</time></current>';
|
||||
end
|
||||
|
||||
|
||||
end^
|
||||
|
||||
|
||||
set term ;^
|
||||
commit;
|
||||
"""
|
||||
commit;
|
||||
"""
|
||||
|
||||
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
# import os
|
||||
#
|
||||
#
|
||||
# fbk = os.path.join(context['temp_directory'],'core_4470-backup.fbk')
|
||||
# fbn = os.path.join(context['temp_directory'],'core_4470-restored.fdb')
|
||||
# print('Creating backup...')
|
||||
@ -280,20 +282,26 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
|
||||
# os.remove(fbk)
|
||||
# if os.path.isfile(fbn):
|
||||
# os.remove(fbn)
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Creating backup...
|
||||
Creating restore...
|
||||
METEO
|
||||
WF
|
||||
"""
|
||||
"""
|
||||
|
||||
fbk_file_1 = temp_file('test.fbk')
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, fbk_file_1: Path):
|
||||
with act_1.connect_server() as srv:
|
||||
srv.database.backup(database=str(act_1.db.db_path), backup=str(fbk_file_1))
|
||||
srv.wait()
|
||||
srv.database.restore(backup=str(fbk_file_1), database=str(act_1.db.db_path),
|
||||
flags=SrvRestoreFlag.REPLACE)
|
||||
srv.wait()
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.isql(switches=['-q'], input='show view; show package;')
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,12 +2,12 @@
|
||||
#
|
||||
# id: bugs.core_4472
|
||||
# title: Message "Modifying function <F> which is currently in use" when running script with AUTODDL=OFF and <F> is called from INTERNAL function declared in other unit
|
||||
# decription:
|
||||
# decription:
|
||||
# Test call delivering of firebird.log TWICE: before and after running ISQL.
|
||||
# Then we compare only size of obtained logs rather than content (differencs of size should be zero).
|
||||
# Result on WI-V3.0.0.32239, WI-V3.0.0.32239: Ok.
|
||||
#
|
||||
# Result on WI-T3.0.0.30809 (Alpha2):
|
||||
#
|
||||
# Result on WI-T3.0.0.30809 (Alpha2):
|
||||
# Unexpected call to register plugin Remote, type 2 - ignored
|
||||
# Unexpected call to register plugin Loopback, type 2 - ignored
|
||||
# Unexpected call to register plugin Legacy_Auth, type 12 - ignored
|
||||
@ -16,18 +16,19 @@
|
||||
# Unexpected call to register plugin Arc4, type 16 - ignored
|
||||
# INET/inet_error: read errno = 10054
|
||||
# Modifying function FN_01 which is currently in use by active user requests
|
||||
#
|
||||
#
|
||||
# 13.04.2021. Adapted for run both on Windows and Linux. Checked on:
|
||||
# Windows: 3.0.8.33445, 4.0.0.2416
|
||||
# Linux: 3.0.8.33426, 4.0.0.2416
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4472
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from difflib import unified_diff
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
@ -43,29 +44,29 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# import os
|
||||
# import time
|
||||
# import subprocess
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -77,18 +78,18 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# print('type(f_names_list[i])=',type(f_names_list[i]))
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# fb_log_before=open( os.path.join(context['temp_directory'],'tmp_fb_log_4472_before.log'), 'w')
|
||||
# subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr","action_get_fb_log"],
|
||||
# stdout=fb_log_before, stderr=subprocess.STDOUT)
|
||||
# flush_and_close( fb_log_before )
|
||||
#
|
||||
#
|
||||
# sqltxt='''
|
||||
# set autoddl off;
|
||||
# commit;
|
||||
@ -98,7 +99,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# return 1;
|
||||
# end
|
||||
# ^
|
||||
#
|
||||
#
|
||||
# create or alter procedure sp_01
|
||||
# as
|
||||
# declare function fn_internal_01 returns int as
|
||||
@ -110,56 +111,81 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# end
|
||||
# ^
|
||||
# set term ;^
|
||||
# commit;
|
||||
# commit;
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# f_sqllog=open( os.path.join(context['temp_directory'],'tmp_isql_4472.log'), 'w')
|
||||
# f_sqllog.close()
|
||||
# runProgram('isql',[ dsn, '-q','-m','-o',f_sqllog.name],sqltxt)
|
||||
#
|
||||
#
|
||||
# fb_log_after=open( os.path.join(context['temp_directory'],'tmp_fb_log_4472_after.log'), 'w')
|
||||
# subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr", "action_get_fb_log"],
|
||||
# stdout=fb_log_after, stderr=subprocess.STDOUT)
|
||||
# flush_and_close( fb_log_after )
|
||||
#
|
||||
#
|
||||
# # This log should be EMPTY:
|
||||
# with open( f_sqllog.name,'r') as f:
|
||||
# for line in f:
|
||||
# if line.split() and not 'Database:' in line:
|
||||
# # This line must be ignored:
|
||||
# # Database: localhost/3333:C:\\FBTESTING\\qa
|
||||
# bt-repo mpugs.core_4472.fdb, User: SYSDBA
|
||||
# # Database: localhost/3333:C:\\FBTESTING\\qa\\fbt-repo\\tmp\\bugs.core_4472.fdb, User: SYSDBA
|
||||
# print('UNEXPECTED: ' + line)
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# # This difference should be ZERO:
|
||||
# fb_log_diff=os.path.getsize(fb_log_after.name)-os.path.getsize(fb_log_before.name)
|
||||
#
|
||||
#
|
||||
# if fb_log_diff == 0:
|
||||
# print("OK: log was not changed.")
|
||||
# else:
|
||||
# print("BAD: log was increased by "+str(fb_log_diff)+" bytes.")
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
# # Cleanup:
|
||||
#
|
||||
# # do NOT remove this pause otherwise some of logs will not be enable for deletion and test will finish with
|
||||
#
|
||||
# # do NOT remove this pause otherwise some of logs will not be enable for deletion and test will finish with
|
||||
# # Exception raised while executing Python test script. exception: WindowsError: 32
|
||||
# time.sleep(1)
|
||||
#
|
||||
#
|
||||
# cleanup( (fb_log_before, fb_log_after, f_sqllog ) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
OK: log was not changed.
|
||||
"""
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
def test_1(act_1: Action):
|
||||
script = """
|
||||
set autoddl off;
|
||||
commit;
|
||||
set term ^;
|
||||
create or alter function fn_01() returns int
|
||||
as begin
|
||||
return 1;
|
||||
end
|
||||
^
|
||||
|
||||
create or alter procedure sp_01
|
||||
as
|
||||
declare function fn_internal_01 returns int as
|
||||
begin
|
||||
if ( fn_01() > 0 ) then return 1;
|
||||
else return 0;
|
||||
end
|
||||
begin
|
||||
end
|
||||
^
|
||||
set term ;^
|
||||
commit;
|
||||
"""
|
||||
with act_1.connect_server() as srv:
|
||||
srv.info.get_log()
|
||||
log_before = srv.readlines()
|
||||
act_1.expected_stdout = ''
|
||||
act_1.isql(switches=['-q', '-m'], input=script)
|
||||
with act_1.connect_server() as srv:
|
||||
srv.info.get_log()
|
||||
log_after = srv.readlines()
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
assert list(unified_diff(log_before, log_after)) == []
|
||||
|
@ -2,45 +2,34 @@
|
||||
#
|
||||
# id: bugs.core_4503
|
||||
# title: ISQL command SHOW USERS display only me
|
||||
# decription:
|
||||
# decription:
|
||||
# 29.07.2016: instead of issuing SHOW USERS which has unstable output (can be changed multiple times!)
|
||||
# it was decided to replace it with SQL query which actually is done by this command.
|
||||
# This query can be easily found in trace when we run SHOW USERS.
|
||||
# Also, we limit output with only those users who is enumerated here thus one may do not warry about
|
||||
# another user logins which could left in securitiN.fdb after some test failed.
|
||||
#
|
||||
#
|
||||
# 29.03.2018: changed user names, replaced count of SYSDBA attachments with literal 1.
|
||||
# Checked on:
|
||||
# fb30Cs, build 3.0.4.32924: OK, 3.781s.
|
||||
# FB30SS, build 3.0.4.32939: OK, 1.312s.
|
||||
# FB40CS, build 4.0.0.918: OK, 4.547s.
|
||||
# FB40SS, build 4.0.0.943: OK, 2.094s.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4503
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid:
|
||||
# qmid:
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action, user_factory, User
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = []
|
||||
|
||||
init_script_1 = """
|
||||
create or alter user TMP$C4503_BILL password '123';
|
||||
create or alter user TMP$C4503_JOHN password '456';
|
||||
create or alter user TMP$C4503_MICK password '789';
|
||||
create or alter user TMP$C4503_BOSS password '000';
|
||||
|
||||
-- do NOT remove or change name 'test' - it is used in several old tests, see resources/test_user.fbr:
|
||||
-- core_1083.fbt core_1845.fbt core_1148.fbt core_2729.fbt -- all of them can create user 'TEST' and do not remove it.
|
||||
create or alter user test password 'test';
|
||||
drop user test; -- immediatelly remove this name
|
||||
commit;
|
||||
"""
|
||||
init_script_1 = ""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
@ -49,46 +38,46 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# import os
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# con_0a = kdb.connect(dsn=dsn.encode())
|
||||
# con_0b = kdb.connect(dsn=dsn.encode())
|
||||
#
|
||||
#
|
||||
# con_1a = kdb.connect(dsn=dsn.encode(),user='TMP$C4503_BILL',password='123')
|
||||
# con_1b = kdb.connect(dsn=dsn.encode(),user='TMP$C4503_BILL',password='123')
|
||||
# con_1c = kdb.connect(dsn=dsn.encode(),user='TMP$C4503_BILL',password='123')
|
||||
# con_1d = kdb.connect(dsn=dsn.encode(),user='TMP$C4503_BILL',password='123')
|
||||
# con_1e = kdb.connect(dsn=dsn.encode(),user='TMP$C4503_BILL',password='123')
|
||||
#
|
||||
#
|
||||
# con_2a = kdb.connect(dsn=dsn.encode(),user='TMP$C4503_JOHN',password='456')
|
||||
# con_2b = kdb.connect(dsn=dsn.encode(),user='TMP$C4503_JOHN',password='456')
|
||||
# con_2c = kdb.connect(dsn=dsn.encode(),user='TMP$C4503_JOHN',password='456')
|
||||
# con_2d = kdb.connect(dsn=dsn.encode(),user='TMP$C4503_JOHN',password='456')
|
||||
#
|
||||
#
|
||||
# con_3a = kdb.connect(dsn=dsn.encode(),user='TMP$C4503_MICK',password='789')
|
||||
# script = '''
|
||||
# set list on;
|
||||
# -- "SHOW USERS" command actually runs following query:
|
||||
# select
|
||||
# case
|
||||
# when coalesce(mon$user, sec$user_name) = current_user
|
||||
# then '#'
|
||||
# select
|
||||
# case
|
||||
# when coalesce(mon$user, sec$user_name) = current_user
|
||||
# then '#'
|
||||
# when sec$user_name is distinct from null
|
||||
# then ' '
|
||||
# else '-'
|
||||
# then ' '
|
||||
# else '-'
|
||||
# end is_current_user
|
||||
# ,coalesce(m.mon$user, u.sec$user_name) user_name
|
||||
# ,iif( m.mon$user = upper('SYSDBA'), 1, count(m.mon$user) ) keeps_attachments
|
||||
# from mon$attachments m
|
||||
# full join sec$users u on m.mon$user = u.sec$user_name
|
||||
# where
|
||||
# coalesce(mon$system_flag, 0) = 0
|
||||
# from mon$attachments m
|
||||
# full join sec$users u on m.mon$user = u.sec$user_name
|
||||
# where
|
||||
# coalesce(mon$system_flag, 0) = 0
|
||||
# and coalesce(m.mon$user, u.sec$user_name) in ( upper('TMP$C4503_BILL'), upper('TMP$C4503_BOSS'), upper('TMP$C4503_JOHN'), upper('TMP$C4503_MICK'), upper('SYSDBA') )
|
||||
# group by mon$user, sec$user_name
|
||||
# group by mon$user, sec$user_name
|
||||
# order by coalesce(mon$user, sec$user_name);
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# drop user TMP$C4503_BILL;
|
||||
# drop user TMP$C4503_JOHN;
|
||||
# drop user TMP$C4503_MICK;
|
||||
@ -96,7 +85,8 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# '''
|
||||
# runProgram('isql',[dsn,'-q'],script)
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
IS_CURRENT_USER #
|
||||
@ -118,11 +108,50 @@ expected_stdout_1 = """
|
||||
IS_CURRENT_USER
|
||||
USER_NAME TMP$C4503_MICK
|
||||
KEEPS_ATTACHMENTS 1
|
||||
"""
|
||||
"""
|
||||
|
||||
|
||||
user_bill = user_factory(name='TMP$C4503_BILL', password='123')
|
||||
user_john = user_factory(name='TMP$C4503_JOHN', password='456')
|
||||
user_mick = user_factory(name='TMP$C4503_MICK', password='789')
|
||||
user_boss = user_factory(name='TMP$C4503_BOSS', password='000')
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, user_bill: User, user_john: User, user_mick: User, user_boss: User):
|
||||
with act_1.db.connect() as con_0a, act_1.db.connect(), \
|
||||
act_1.db.connect(user='TMP$C4503_BILL', password='123'), \
|
||||
act_1.db.connect(user='TMP$C4503_BILL', password='123'), \
|
||||
act_1.db.connect(user='TMP$C4503_BILL', password='123'), \
|
||||
act_1.db.connect(user='TMP$C4503_BILL', password='123'), \
|
||||
act_1.db.connect(user='TMP$C4503_BILL', password='123'), \
|
||||
act_1.db.connect(user='TMP$C4503_JOHN', password='456'), \
|
||||
act_1.db.connect(user='TMP$C4503_JOHN', password='456'), \
|
||||
act_1.db.connect(user='TMP$C4503_JOHN', password='456'), \
|
||||
act_1.db.connect(user='TMP$C4503_JOHN', password='456'), \
|
||||
act_1.db.connect(user='TMP$C4503_MICK', password='789'):
|
||||
#
|
||||
script = """
|
||||
set list on;
|
||||
-- "SHOW USERS" command actually runs following query:
|
||||
select
|
||||
case
|
||||
when coalesce(mon$user, sec$user_name) = current_user
|
||||
then '#'
|
||||
when sec$user_name is distinct from null
|
||||
then ' '
|
||||
else '-'
|
||||
end is_current_user
|
||||
,coalesce(m.mon$user, u.sec$user_name) user_name
|
||||
,iif( m.mon$user = upper('SYSDBA'), 1, count(m.mon$user) ) keeps_attachments
|
||||
from mon$attachments m
|
||||
full join sec$users u on m.mon$user = u.sec$user_name
|
||||
where
|
||||
coalesce(mon$system_flag, 0) = 0
|
||||
and coalesce(m.mon$user, u.sec$user_name) in ( upper('TMP$C4503_BILL'), upper('TMP$C4503_BOSS'), upper('TMP$C4503_JOHN'), upper('TMP$C4503_MICK'), upper('SYSDBA') )
|
||||
group by mon$user, sec$user_name
|
||||
order by coalesce(mon$user, sec$user_name);
|
||||
commit;
|
||||
"""
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.isql(switches=['-q'], input=script)
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,20 +2,20 @@
|
||||
#
|
||||
# id: bugs.core_4524
|
||||
# title: New gbak option to enable encryption during restore
|
||||
# decription:
|
||||
# decription:
|
||||
# Part of this test was copied from core_6071.fbt.
|
||||
# We create new database and try to encrypt it using IBSurgeon Demo Encryption package
|
||||
# ( https://ib-aid.com/download-demo-firebird-encryption-plugin/ ; https://ib-aid.com/download/crypt/CryptTest.zip )
|
||||
# License file plugins\\dbcrypt.conf with unlimited expiration was provided by IBSurgeon to Firebird Foundation (FF).
|
||||
# This file was preliminary stored in FF Test machine.
|
||||
# Test assumes that this file and all neccessary libraries already were stored into FB_HOME and %FB_HOME%\\plugins.
|
||||
#
|
||||
#
|
||||
# We create several generators in the test DB and get number of generators page using query to RDB$PAGES (page_type=9).
|
||||
# Also we get page_size and using these data we can obtain binary content of generatord page. This content futher is parsed
|
||||
# in order to verify that generators names are readable (while DB is not yet encrypted).
|
||||
#
|
||||
#
|
||||
# Then we encrypt DB and make delay after this for ~1..2 seconds BEFORE detach from database.
|
||||
#
|
||||
#
|
||||
# After this we:
|
||||
# 1. Change temp DB state to full shutdown and bring it online - in order to be sure that we will able to drop this file later;
|
||||
# 2. Make backup of this temp DB, using gbak utility and '-KEYHOLDER <name_of_key_holder>' command switch.
|
||||
@ -23,17 +23,19 @@
|
||||
# 4. Make validation of just restored database by issuing command "gfix -v -full ..."
|
||||
# ( i.e. validate both data and metadata rather than online val which can check user data only).
|
||||
# 5. Open restored DB as binary file and attempt to read again generators names - this must fail, their names must be encrypted.
|
||||
# 6. Check that NO errors occured on any above mentioned steps. Also check that backup and restore STDOUT logs contain expected
|
||||
# 6. Check that NO errors occured on any above mentioned steps. Also check that backup and restore STDOUT logs contain expected
|
||||
# text about successful completition
|
||||
#
|
||||
#
|
||||
# 13.04.2021. Adapted for run both on Windows and Linux. Checked on:
|
||||
# Windows: 4.0.0.2416
|
||||
# Linux: 4.0.0.2416
|
||||
# Note: different names for encryption plugin and keyholde rare used for Windows vs Linux:
|
||||
# PLUGIN_NAME = 'dbcrypt' if os.name == 'nt' else '"fbSampleDbCrypt"'
|
||||
# KHOLDER_NAME = 'KeyHolder' if os.name == 'nt' else "fbSampleKeyHolder"
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# [pcisar] 23.11.2021
|
||||
# Test not implemented because it depends on 3rd party encryption plugin.
|
||||
# tracker_id: CORE-4524
|
||||
# min_versions: ['4.0']
|
||||
# versions: 4.0
|
||||
@ -53,35 +55,35 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import time
|
||||
# import subprocess
|
||||
# import binascii
|
||||
# import re
|
||||
# import fdb
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -93,56 +95,56 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# print('type(f_names_list[i])=',type(f_names_list[i]))
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def check_page_for_readable_values(dbname, gen_page_number, pg_size, check_sequence_values):
|
||||
#
|
||||
#
|
||||
# global binascii
|
||||
#
|
||||
#
|
||||
# db_handle = open( dbname, "rb")
|
||||
# db_handle.seek( gen_page_number * pg_size )
|
||||
# page_content = db_handle.read( pg_size )
|
||||
# # read_binary_content( db_handle, gen_page_number * pg_size, pg_size )
|
||||
# db_handle.close()
|
||||
# page_as_hex=binascii.hexlify( page_content )
|
||||
#
|
||||
#
|
||||
# # Iterate for each sequence value:
|
||||
# for n in check_sequence_values:
|
||||
#
|
||||
#
|
||||
# # Get HEX representation of digital value.
|
||||
# # NOTE: format( 830624, 'x') is 'caca0' contains five (odd number!) characters.
|
||||
# hex_string = format(abs(n),'x')
|
||||
#
|
||||
#
|
||||
# # Here we 'pad' hex representation to EVEN number of digits in it,
|
||||
# # otherwise binascii.hexlify fails with "Odd-length string error":
|
||||
# hex_string = ''.join( ('0' * ( len(hex_string)%2 ), hex_string ) )
|
||||
#
|
||||
#
|
||||
# # ::: NOTE :::
|
||||
# # Generator value is stored in REVERSED bytes order.
|
||||
# # dec 830624 --> hex 0x0caca0 --> 0c|ac|a0 --> stored in page as three bytes: {a0; ac; 0c}
|
||||
#
|
||||
#
|
||||
# # Decode string that is stored in variable 'hex_string' to HEX number,
|
||||
# # REVERSE its bytes and convert it to string again for further search
|
||||
# # in page content:
|
||||
# n_as_reversed_hex = binascii.hexlify( hex_string.decode('hex')[::-1] )
|
||||
#
|
||||
#
|
||||
# print(n, n_as_reversed_hex, 'FOUND.' if n_as_reversed_hex in page_as_hex else 'NOT FOUND.' )
|
||||
# # print(n, n_as_reversed_hex, 'UNEXPECTEDLY FOUND AT POS. ' + '{:5d}'.format( page_as_hex.index(n_as_reversed_hex) ) if n_as_reversed_hex in page_as_hex else 'Not found (expected).' )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# tmpfdb='$(DATABASE_LOCATION)'+'tmp_core_4524.encrypted.fdb'
|
||||
# tmpres='$(DATABASE_LOCATION)'+'tmp_core_4524.restored.fdb'
|
||||
# tmpbkp='$(DATABASE_LOCATION)'+'tmp_core_4524.encrypted.fbk'
|
||||
#
|
||||
#
|
||||
# cleanup( (tmpfdb, tmpres) )
|
||||
#
|
||||
#
|
||||
# con = fdb.create_database( dsn = 'localhost:'+tmpfdb )
|
||||
#
|
||||
#
|
||||
# con.execute_immediate('create sequence gen_ba0bab start with 12192683')
|
||||
# con.execute_immediate('create sequence gen_badf00d start with 195948557')
|
||||
# con.execute_immediate('create sequence gen_caca0 start with 830624')
|
||||
@ -151,7 +153,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# con.execute_immediate('create sequence gen_decade start with 14600926')
|
||||
# con.execute_immediate('create sequence gen_7FFFFFFF start with 2147483647')
|
||||
# con.commit()
|
||||
#
|
||||
#
|
||||
# cur=con.cursor()
|
||||
# get_current_seq_values='''
|
||||
# execute block returns( gen_curr bigint) as
|
||||
@ -166,7 +168,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# end
|
||||
# end
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# # Obtain current values of user generators:
|
||||
# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
# cur.execute(get_current_seq_values)
|
||||
@ -174,8 +176,8 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# for r in cur:
|
||||
# check_sequence_values += r[0],
|
||||
# #print('check_sequence_values=',check_sequence_values)
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# # Obtain page size and number of generators page:
|
||||
# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
# cur.execute('select m.mon$page_size,min(rdb$page_number) from mon$database m cross join rdb$pages p where p.rdb$page_type = 9 group by 1')
|
||||
@ -185,17 +187,17 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# gen_page_number=r[1]
|
||||
# # print(r[0],r[1])
|
||||
# cur.close()
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# # Read gen page, convert it to hex and check whether generator values can be found there or no:
|
||||
# # Expected result: YES for all values because DB not encrypted now.
|
||||
# # ~~~~~~~~~~~~~~~
|
||||
# check_page_for_readable_values(tmpfdb, gen_page_number, pg_size, check_sequence_values)
|
||||
#
|
||||
#
|
||||
# ################################################
|
||||
# ### e n c r y p t d a t a b a s e ###
|
||||
# ################################################
|
||||
#
|
||||
#
|
||||
# # 14.04.2021.
|
||||
# # Name of encryption plugin depends on OS:
|
||||
# # * for Windows we (currently) use plugin by IBSurgeon, its name is 'dbcrypt';
|
||||
@ -212,7 +214,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# time.sleep(2)
|
||||
# # ^
|
||||
# # +-------- !! ALLOW BACKGROUND ENCRYPTION PROCESS TO COMPLETE ITS JOB !!
|
||||
#
|
||||
#
|
||||
# #######################################
|
||||
# # Added 14.04.2021: check that database is actually encrypted.
|
||||
# # Column MON$DATABASE.MON$CRYPT_STATE can have following values:
|
||||
@ -226,9 +228,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# print( r[0] )
|
||||
# con.close()
|
||||
# cur.close()
|
||||
#
|
||||
#
|
||||
# #-------------------------- shutdown temp DB and bring it online --------------------
|
||||
#
|
||||
#
|
||||
# f_dbshut_log = open( os.path.join(context['temp_directory'],'tmp_dbshut_4524.log'), 'w')
|
||||
# subprocess.call( [ context['gfix_path'], 'localhost:'+tmpfdb, "-shut", "full", "-force", "0" ],
|
||||
# stdout = f_dbshut_log,
|
||||
@ -239,13 +241,13 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# stderr = subprocess.STDOUT
|
||||
# )
|
||||
# flush_and_close( f_dbshut_log )
|
||||
#
|
||||
#
|
||||
# #--------------------------- backup and restore --------------------------------------
|
||||
# fn_bkp_log=open( os.path.join(context['temp_directory'],'tmp_backup_4524.log'), 'w')
|
||||
# fn_bkp_err=open( os.path.join(context['temp_directory'],'tmp_backup_4524.err'), 'w')
|
||||
#
|
||||
#
|
||||
# # /var/tmp/fb40tmp/bin/gbak -b -v -keyholder fbSampleKeyHolder -crypt fbSampleDbCrypt localhost:/path/to/encrypted.fdb /path/to/encrypted.fbk
|
||||
#
|
||||
#
|
||||
# subprocess.call([ context['gbak_path']
|
||||
# ,"-b"
|
||||
# ,"-v"
|
||||
@ -255,14 +257,14 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# ,tmpbkp
|
||||
# ],
|
||||
# stdout=fn_bkp_log, stderr=fn_bkp_err)
|
||||
#
|
||||
#
|
||||
# flush_and_close( fn_bkp_log )
|
||||
# flush_and_close( fn_bkp_err )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# fn_res_log=open( os.path.join(context['temp_directory'],'tmp_restore_4524.log'), 'w')
|
||||
# fn_res_err=open( os.path.join(context['temp_directory'],'tmp_restore_4524.err'), 'w')
|
||||
#
|
||||
#
|
||||
# # C:\\FB SS\\gbak.exe -rep -KEYHOLDER KeyHolder C:\\FBTESTING\\qa\\misc\\C4524.fbk /:C:\\FBTESTING\\qa\\misc\\c4524.restored.FDB
|
||||
# subprocess.call([ context['gbak_path']
|
||||
# ,"-rep"
|
||||
@ -272,51 +274,51 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# ,'localhost:' + tmpres
|
||||
# ],
|
||||
# stdout=fn_res_log, stderr=fn_res_err)
|
||||
#
|
||||
#
|
||||
# flush_and_close( fn_res_log )
|
||||
# flush_and_close( fn_res_err )
|
||||
#
|
||||
#
|
||||
# #-------------------------- validate just restored database --------------------
|
||||
#
|
||||
#
|
||||
# f_valid_log = open( os.path.join(context['temp_directory'],'tmp_valid_4524.log'), 'w')
|
||||
# subprocess.call( [ context['gfix_path'], 'localhost:'+tmpres, "-v", "-full" ],
|
||||
# stdout = f_valid_log,
|
||||
# stderr = subprocess.STDOUT
|
||||
# )
|
||||
# flush_and_close( f_valid_log )
|
||||
#
|
||||
#
|
||||
# #-----------------------------------------------
|
||||
#
|
||||
#
|
||||
# # Read gen page in RESTORED database, convert it to hex and check whether generator values can be found there or no.
|
||||
# # Expected result: NOT for all values because DB was encrypted.
|
||||
# # ~~~~~~~~~~~~~~~~
|
||||
# check_page_for_readable_values(tmpres, gen_page_number, pg_size, check_sequence_values)
|
||||
#
|
||||
#
|
||||
# #-----------------------------------------------
|
||||
#
|
||||
#
|
||||
# # Check that all was fine:
|
||||
#
|
||||
#
|
||||
# with open(f_dbshut_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# if line.split():
|
||||
# print('UNEXPECTED SHUTDOWN OUTPUT: ' + line)
|
||||
#
|
||||
#
|
||||
# with open(fn_bkp_err.name,'r') as f:
|
||||
# for line in f:
|
||||
# if line.split():
|
||||
# print('UNEXPECTED BACKUP STDERR: ' + line)
|
||||
#
|
||||
#
|
||||
# with open(fn_res_err.name,'r') as f:
|
||||
# for line in f:
|
||||
# if line.split():
|
||||
# print('UNEXPECTED RESTORE STDERR: ' + line)
|
||||
#
|
||||
#
|
||||
# with open(f_dbshut_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# if line.split():
|
||||
# print('UNEXPECTED VALIDATION OUTPUT: ' + line)
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# # gbak -b should finish with line:
|
||||
# # gbak:closing file, committing, and finishing. 512 bytes written
|
||||
# gbak_backup_finish_ptn=re.compile('gbak:closing\\s+file,\\s+committing,\\s+and\\s+finishing.*', re.IGNORECASE)
|
||||
@ -324,26 +326,26 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# for line in f:
|
||||
# if gbak_backup_finish_ptn.search(line):
|
||||
# print('EXPECTED BACKUP FINISH FOUND: '+line.upper() )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# # gbak -c should finish with lines:
|
||||
# # gbak:finishing, closing, and going home
|
||||
# # gbak:adjusting the ONLINE and FORCED WRITES flags
|
||||
#
|
||||
#
|
||||
# gbak_restore_finish_ptn=re.compile('gbak:adjusting\\s+the\\s+ONLINE\\s+and\\s+FORCED\\s+WRITES\\s+.*', re.IGNORECASE)
|
||||
# with open(fn_res_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# if gbak_restore_finish_ptn.search(line):
|
||||
# print('EXPECTED RESTORE FINISH FOUND: '+line.upper() )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# # cleanup
|
||||
# ##########
|
||||
# time.sleep(1)
|
||||
# f_list = [ i.name for i in ( f_dbshut_log, fn_bkp_log, fn_bkp_err, fn_res_log, fn_res_err, f_valid_log ) ] + [ tmpfdb, tmpres, tmpbkp ]
|
||||
# cleanup( f_list )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
@ -367,11 +369,9 @@ expected_stdout_1 = """
|
||||
2147483646 feffff7f NOT FOUND.
|
||||
EXPECTED BACKUP FINISH FOUND: GBAK:CLOSING FILE, COMMITTING, AND FINISHING.
|
||||
EXPECTED RESTORE FINISH FOUND: GBAK:ADJUSTING THE ONLINE AND FORCED WRITES FLAGS
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=4.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
pytest.skip("Test requires 3rd party encryption plugin")
|
||||
#pytest.fail("Test not IMPLEMENTED")
|
||||
|
@ -2,7 +2,7 @@
|
||||
#
|
||||
# id: bugs.core_4582
|
||||
# title: Within linger period one can not change some database properties
|
||||
# decription:
|
||||
# decription:
|
||||
# Confirmed on WI-T3.0.0.31374 Beta 1: running GFIX -buffers N has NO affect if this is done within linger period
|
||||
# Results for 22.05.2017:
|
||||
# fb30Cs, build 3.0.3.32725: OK, 4.703ss.
|
||||
@ -11,19 +11,25 @@
|
||||
# FB40CS, build 4.0.0.645: OK, 5.047ss.
|
||||
# FB40SC, build 4.0.0.645: OK, 2.703ss.
|
||||
# FB40SS, build 4.0.0.645: OK, 2.187ss.
|
||||
#
|
||||
#
|
||||
# [pcisar] 23.11.2021
|
||||
# This test FAILs on v4.0.0.2496 as database couldn't be reverted to online state, not yet tested with 3.0
|
||||
# tracker_id: CORE-4582
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid:
|
||||
# qmid:
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
from firebird.driver import DbWriteMode, DbAccessMode, ShutdownMode, ShutdownMethod, \
|
||||
SrvStatFlag
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('^((?!:::MSG:::|buffers|before|after|Attributes).)*$', ''), ('Page buffers([\t]|[ ])+', 'Page buffers '), ('Attributes([\t]|[ ])+', 'Attributes '), ('N/A', 'YES')]
|
||||
substitutions_1 = [('^((?!:::MSG:::|buffers|before|after|Attributes).)*$', ''),
|
||||
('Page buffers([\t]|[ ])+', 'Page buffers '),
|
||||
('Attributes([\t]|[ ])+', 'Attributes '), ('N/A', 'YES')]
|
||||
|
||||
init_script_1 = """
|
||||
-- Result of this test on WI-T3.0.0.31374 Beta 1:
|
||||
@ -40,10 +46,10 @@ init_script_1 = """
|
||||
create or alter procedure sp_get_buff returns(mon_buffers int) as
|
||||
begin
|
||||
mon_buffers = -1;
|
||||
if ( exists( select * from mon$attachments where mon$user containing 'cache writer' and mon$system_flag = 1 ) )
|
||||
if (exists(select * from mon$attachments where mon$user containing 'cache writer' and mon$system_flag = 1))
|
||||
then
|
||||
select mon$page_buffers from mon$database into mon_buffers;
|
||||
|
||||
|
||||
suspend;
|
||||
|
||||
end
|
||||
@ -52,31 +58,31 @@ init_script_1 = """
|
||||
commit;
|
||||
recreate table log(buf_before int, buf_after int);
|
||||
commit;
|
||||
"""
|
||||
"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# script='''
|
||||
# insert into log(buf_before) select mon_buffers from sp_get_buff;
|
||||
# commit;
|
||||
# alter database set linger to 15;
|
||||
# alter database set linger to 15;
|
||||
# commit;
|
||||
# set list on;
|
||||
# select rdb$linger as ":::MSG::: linger_time" from rdb$database;
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# print (':::MSG::: Starting ISQL setting new value for linger...')
|
||||
# runProgram('isql',[dsn],script)
|
||||
# print (':::MSG::: ISQL setting new value for linger finished.')
|
||||
#
|
||||
#
|
||||
# print (':::MSG::: Starting GFIX setting new value for page buffers...')
|
||||
# runProgram('gfix',[dsn,'-buffers','3791'])
|
||||
# runProgram('gfix',[dsn,'-w','async'])
|
||||
@ -88,20 +94,21 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# print (':::MSG::: GFIX setting new value for page buffers finished.')
|
||||
# print (':::MSG::: Starting ISQL for extract old and new value of page buffers...')
|
||||
# script='''
|
||||
# set list on;
|
||||
# set list on;
|
||||
# update log set buf_after = (select mon_buffers from sp_get_buff);
|
||||
# commit;
|
||||
# select iif( g.buf_before > 0,
|
||||
# iif( g.buf_before is distinct from g.buf_after, 'YES', 'NO!' ),
|
||||
# 'N/A'
|
||||
# select iif( g.buf_before > 0,
|
||||
# iif( g.buf_before is distinct from g.buf_after, 'YES', 'NO!' ),
|
||||
# 'N/A'
|
||||
# ) as "GFIX could change buffers ? =>"
|
||||
# from log g;
|
||||
# '''
|
||||
# runProgram('isql',[dsn],script)
|
||||
# print (':::MSG::: ISQL for extract old and new value of page finished.')
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
:::MSG::: Starting ISQL setting new value for linger...
|
||||
@ -114,11 +121,64 @@ expected_stdout_1 = """
|
||||
:::MSG::: Starting ISQL for extract old and new value of page buffers...
|
||||
GFIX could change buffers ? => YES
|
||||
:::MSG::: ISQL for extract old and new value of page finished.
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, capsys):
|
||||
script_1 = """
|
||||
insert into log(buf_before) select mon_buffers from sp_get_buff;
|
||||
commit;
|
||||
alter database set linger to 15;
|
||||
commit;
|
||||
set list on;
|
||||
select rdb$linger as ":::MSG::: linger_time" from rdb$database;
|
||||
"""
|
||||
print (':::MSG::: Starting ISQL setting new value for linger...')
|
||||
act_1.isql(switches=[], input=script_1)
|
||||
print (':::MSG::: ISQL setting new value for linger finished.')
|
||||
print (':::MSG::: Starting GFIX setting new value for page buffers...')
|
||||
#with act_1.connect_server() as srv:
|
||||
#srv.database.set_default_cache_size(database=str(act_1.db.db_path), size=3791)
|
||||
#srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.ASYNC)
|
||||
#srv.database.set_access_mode(database=str(act_1.db.db_path), mode=DbAccessMode.READ_ONLY)
|
||||
#srv.database.shutdown(database=str(act_1.db.db_path), mode=ShutdownMode.SINGLE,
|
||||
#method=ShutdownMethod.DENNY_ATTACHMENTS, timeout=20)
|
||||
#srv.database.get_statistics(database=str(act_1.db.db_path), flags=SrvStatFlag.HDR_PAGES,
|
||||
#callback=print)
|
||||
#srv.database.bring_online(database=str(act_1.db.db_path))
|
||||
#srv.database.set_access_mode(database=str(act_1.db.db_path), mode=DbAccessMode.READ_WRITE)
|
||||
act_1.reset()
|
||||
act_1.gfix(switches=['-buffers', '3791', act_1.db.dsn])
|
||||
act_1.reset()
|
||||
act_1.gfix(switches=['-write','async', act_1.db.dsn])
|
||||
act_1.reset()
|
||||
act_1.gfix(switches=['-mode','read_only', act_1.db.dsn])
|
||||
act_1.reset()
|
||||
act_1.gfix(switches=['-shutdown','single', '-at', '20', act_1.db.dsn])
|
||||
act_1.reset()
|
||||
act_1.gstat(switches=['-h'])
|
||||
print(act_1.stdout)
|
||||
act_1.reset()
|
||||
act_1.gfix(switches=[act_1.db.dsn, '-online'])
|
||||
act_1.reset()
|
||||
act_1.gfix(switches=['-mode','read_write', act_1.db.dsn])
|
||||
print (':::MSG::: GFIX setting new value for page buffers finished.')
|
||||
print (':::MSG::: Starting ISQL for extract old and new value of page buffers...')
|
||||
script_2 = """
|
||||
set list on;
|
||||
update log set buf_after = (select mon_buffers from sp_get_buff);
|
||||
commit;
|
||||
select iif( g.buf_before > 0,
|
||||
iif( g.buf_before is distinct from g.buf_after, 'YES', 'NO!' ),
|
||||
'N/A'
|
||||
) as "GFIX could change buffers ? =>"
|
||||
from log g;
|
||||
"""
|
||||
act_1.reset()
|
||||
act_1.isql(switches=[], input=script_2)
|
||||
print(act_1.stdout)
|
||||
print (':::MSG::: ISQL for extract old and new value of page finished.')
|
||||
# Check
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_expected_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,7 +2,7 @@
|
||||
#
|
||||
# id: bugs.core_4645
|
||||
# title: internal Firebird consistency check (cannot find tip page (165), file: tra.cpp line: 2375)
|
||||
# decription:
|
||||
# decription:
|
||||
# Both STDOUT and STDERR in this test should be empty.
|
||||
# Confirmed:
|
||||
# 1) bugcheck exception on 3.0.0.32378; 4.0.0.98:
|
||||
@ -11,14 +11,15 @@
|
||||
# Statement failed, SQLSTATE = XX000
|
||||
# internal Firebird consistency check (can't continue after bugcheck)
|
||||
# 2) normal work on 3.0.0.32471; 4.0.0.127.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4645
|
||||
# min_versions: ['2.5']
|
||||
# versions: 2.5.6
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
from firebird.driver import DbAccessMode
|
||||
|
||||
# version: 2.5.6
|
||||
# resources: None
|
||||
@ -31,7 +32,7 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
# runProgram('gfix',[dsn,'-user',user_name,'-pas',user_password,'-mode','read_only'])
|
||||
# script='''
|
||||
@ -41,7 +42,7 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
|
||||
# execute block as
|
||||
# declare n int = 20000;
|
||||
# begin
|
||||
# while (n>0) do
|
||||
# while (n>0) do
|
||||
# in autonomous transaction do
|
||||
# select :n-1 from rdb$database into n;
|
||||
# end
|
||||
@ -50,14 +51,30 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
|
||||
# commit;
|
||||
# '''
|
||||
# runProgram('isql',[dsn,'-user',user_name,'-password',user_password],script)
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
|
||||
@pytest.mark.version('>=2.5.6')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action):
|
||||
with act_1.connect_server() as srv:
|
||||
srv.database.set_access_mode(database=str(act_1.db.db_path), mode=DbAccessMode.READ_ONLY)
|
||||
script = """
|
||||
commit;
|
||||
set transaction read committed;
|
||||
set term ^;
|
||||
execute block as
|
||||
declare n int = 20000;
|
||||
begin
|
||||
while (n>0) do
|
||||
in autonomous transaction do
|
||||
select :n-1 from rdb$database into n;
|
||||
end
|
||||
^
|
||||
set term ;^
|
||||
commit;
|
||||
"""
|
||||
act_1.isql(switches=[], input=script)
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,37 +2,42 @@
|
||||
#
|
||||
# id: bugs.core_4648
|
||||
# title: no permission for CREATE access to DATABASE (for RDB$ADMIN)
|
||||
# decription:
|
||||
# decription:
|
||||
# tracker_id: CORE-4648
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid:
|
||||
# qmid:
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from io import BytesIO
|
||||
from pathlib import Path
|
||||
from firebird.qa import db_factory, python_act, Action, user_factory, User, temp_file
|
||||
from firebird.driver import SrvRestoreFlag
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = []
|
||||
|
||||
init_script_1 = """
|
||||
set wng off;
|
||||
create or alter user tmp$c4648 password '123' revoke admin role;
|
||||
commit;
|
||||
revoke all on all from tmp$c4648;
|
||||
-- 'create ... grant admin role' or 'grant rdb$admin' are NOT neccessary
|
||||
-- for enabling to creating database by non-sysdba user:
|
||||
grant create database to user tmp$c4648;
|
||||
commit;
|
||||
"""
|
||||
init_script_1 = ""
|
||||
|
||||
#init_script_1 = """
|
||||
#set wng off;
|
||||
#create or alter user tmp$c4648 password '123' revoke admin role;
|
||||
#commit;
|
||||
#revoke all on all from tmp$c4648;
|
||||
#-- 'create ... grant admin role' or 'grant rdb$admin' are NOT neccessary
|
||||
#-- for enabling to creating database by non-sysdba user:
|
||||
#grant create database to user tmp$c4648;
|
||||
#commit;
|
||||
#"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
# import os
|
||||
#
|
||||
#
|
||||
# print ('Starting backup...')
|
||||
# fbk = os.path.join(context['temp_directory'],'tmp4648.fbk')
|
||||
# fdn = 'localhost:'+os.path.join(context['temp_directory'],'tmp4648.tmp')
|
||||
@ -45,15 +50,15 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# print ('Delete backup file...')
|
||||
# os.remove(fbk)
|
||||
# print ('Backup file deleted.')
|
||||
#
|
||||
#
|
||||
# script = '''
|
||||
# set list on;
|
||||
# select
|
||||
# select
|
||||
# a.mon$user as "Who am I:"
|
||||
# ,left(a.mon$remote_protocol,3) as "Used protocol:"
|
||||
# ,iif(m.mon$database_name containing 'tmp4648.tmp','YES','NO! ' || m.mon$database_name ) as "Connected to restored DB ?"
|
||||
# ,m.mon$owner as "Owner of DB is:"
|
||||
# from mon$attachments a, mon$database m
|
||||
# from mon$attachments a, mon$database m
|
||||
# where a.mon$attachment_id=current_connection;
|
||||
# commit;
|
||||
# drop database;
|
||||
@ -61,34 +66,71 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# print ('Starting ISQL using NON sysdba user account...')
|
||||
# runProgram('isql',[fdn,'-q','-user','tmp$c4648','-pas','123'],script)
|
||||
# print ('ISQL using NON sysdba user account finished.')
|
||||
#
|
||||
#
|
||||
# script='''revoke create database from user tmp$c4648;
|
||||
# drop user tmp$c4648;
|
||||
# commit;
|
||||
# '''
|
||||
# runProgram('isql',[dsn,'-q','-user',user_name,'-password',user_password],script)
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Starting backup...
|
||||
Backup finished.
|
||||
Starting restore using NON sysdba user account...
|
||||
Restore using NON sysdba user account finished.
|
||||
Delete backup file...
|
||||
Backup file deleted.
|
||||
Starting ISQL using NON sysdba user account...
|
||||
Who am I: TMP$C4648
|
||||
Used protocol: TCP
|
||||
Connected to restored DB ? YES
|
||||
Owner of DB is: TMP$C4648
|
||||
ISQL using NON sysdba user account finished.
|
||||
"""
|
||||
"""
|
||||
|
||||
user_1 = user_factory(name='tmp$c4648', password='123')
|
||||
temp_db_1 = temp_file('tmp4648.fdb')
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, user_1: User, temp_db_1: Path, capsys):
|
||||
with act_1.db.connect() as con:
|
||||
c = con.cursor()
|
||||
#-- 'create ... grant admin role' or 'grant rdb$admin' are NOT neccessary
|
||||
#-- for enabling to creating database by non-sysdba user:
|
||||
c.execute('grant create database to user tmp$c4648')
|
||||
con.commit()
|
||||
#
|
||||
print ('Starting backup...')
|
||||
backup = BytesIO()
|
||||
with act_1.connect_server() as srv:
|
||||
srv.database.local_backup(database=str(act_1.db.db_path), backup_stream=backup)
|
||||
print ('Backup finished.')
|
||||
backup.seek(0)
|
||||
with act_1.connect_server(user=user_1.name, password=user_1.password) as srv:
|
||||
print ('Starting restore using NON sysdba user account...')
|
||||
srv.database.local_restore(database=str(temp_db_1), backup_stream=backup,
|
||||
flags=SrvRestoreFlag.REPLACE)
|
||||
print ('Restore using NON sysdba user account finished.')
|
||||
#
|
||||
script = """
|
||||
set list on;
|
||||
select
|
||||
a.mon$user as "Who am I:"
|
||||
,left(a.mon$remote_protocol,3) as "Used protocol:"
|
||||
,iif(m.mon$database_name containing 'tmp4648.fdb','YES','NO! ' || m.mon$database_name ) as "Connected to restored DB ?"
|
||||
,m.mon$owner as "Owner of DB is:"
|
||||
from mon$attachments a, mon$database m
|
||||
where a.mon$attachment_id=current_connection;
|
||||
commit;
|
||||
"""
|
||||
print ('Starting ISQL using NON sysdba user account...')
|
||||
act_1.isql(switches=['-q', '-user', 'tmp$c4648', '-pas', '123', f'localhost:{temp_db_1}'],
|
||||
connect_db=False, input=script)
|
||||
print(act_1.stdout)
|
||||
print ('ISQL using NON sysdba user account finished.')
|
||||
act_1.reset()
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,21 +2,25 @@
|
||||
#
|
||||
# id: bugs.core_4707
|
||||
# title: Implement ability to validate tables and indices online (without exclusive access to database)
|
||||
# decription:
|
||||
# decription:
|
||||
# Checked on: 4.0.0.1635 SS: 7.072s; 4.0.0.1633 CS: 7.923s; 3.0.5.33180 SS: 6.599s; 3.0.5.33178 CS: 7.189s. 2.5.9.27119 SS: 5.951s; 2.5.9.27146 SC: 5.748s.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4707
|
||||
# min_versions: ['2.5.5']
|
||||
# versions: 2.5.5
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
import subprocess
|
||||
import time
|
||||
from pathlib import Path
|
||||
from firebird.qa import db_factory, python_act, Action, temp_file
|
||||
|
||||
# version: 2.5.5
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('[\\d]{2}:[\\d]{2}:[\\d]{2}.[\\d]{2}', ''), ('Relation [\\d]{3,4}', 'Relation')]
|
||||
substitutions_1 = [('[\\d]{2}:[\\d]{2}:[\\d]{2}.[\\d]{2}', ''),
|
||||
('Relation [\\d]{3,4}', 'Relation')]
|
||||
|
||||
init_script_1 = """
|
||||
set term ^;
|
||||
@ -33,16 +37,16 @@ init_script_1 = """
|
||||
recreate table test2(id int primary key using index test2_pk, s varchar(1000), t computed by (s) );
|
||||
recreate table test3(id int);
|
||||
commit;
|
||||
|
||||
|
||||
insert into test1(id, s) select gen_id(g,1), rpad('', 1000, gen_id(g,0) ) from rdb$types rows 100;
|
||||
insert into test2(id, s) select id, s from test1;
|
||||
commit;
|
||||
|
||||
|
||||
create index test2_s on test2(s);
|
||||
create index test2_c on test2 computed by(s);
|
||||
create index test2_t on test2 computed by(t);
|
||||
commit;
|
||||
"""
|
||||
"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
@ -53,32 +57,32 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# from subprocess import Popen
|
||||
# import time
|
||||
# from fdb import services
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# # Obtain engine version:
|
||||
# engine = str(db_conn.engine_version) # convert to text because 'float' object has no attribute 'startswith'
|
||||
# db_file = db_conn.database_name
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# #-----------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close(file_handle):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# os.fsync(file_handle.fileno())
|
||||
#
|
||||
#
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -86,10 +90,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# os.remove( f_names_list[i] )
|
||||
# if os.path.isfile( f_names_list[i]):
|
||||
# print('ERROR: can not remove file ' + f_names_list[i])
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# # Following script will hang for sevral seconds (see 'lock timeout' argument - and this will serve as pause
|
||||
# # during which we can launch fbsvcmgr to validate database:
|
||||
# lock_sql='''
|
||||
@ -101,9 +105,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# end ^
|
||||
# set term ;^
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# set transaction wait;
|
||||
#
|
||||
#
|
||||
# delete from test1;
|
||||
# insert into test3(id) values(1);
|
||||
# set list on;
|
||||
@ -121,47 +125,47 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# set term ;^
|
||||
# select 'EB with pause finished.' as msg_2 from rdb$database;
|
||||
# ''' % (user_name, user_password)
|
||||
#
|
||||
#
|
||||
# f_hang_sql=open( os.path.join(context['temp_directory'],'tmp_4707_hang.sql'), 'w')
|
||||
# f_hang_sql.write(lock_sql)
|
||||
# flush_and_close( f_hang_sql )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# ################ ##############################################################################
|
||||
# # Make asynchronous call of ISQL which will stay several seconds in pause due to row-level lock
|
||||
# # #############################################################################################
|
||||
# # Execute a child program in a new process, redirecting STDERR to the same target as of STDOUT:
|
||||
# f_hang_log=open( os.path.join(context['temp_directory'],'tmp_4707_hang.log'), 'w')
|
||||
# p_hang = Popen([context['isql_path'], dsn, "-i", f_hang_sql.name],stdout=f_hang_log, stderr=subprocess.STDOUT)
|
||||
#
|
||||
# # Here we should wait while ISQL will establish its connect (in separate child window, call asynchronous) and
|
||||
#
|
||||
# # Here we should wait while ISQL will establish its connect (in separate child window, call asynchronous) and
|
||||
# # stay in pause:
|
||||
# time.sleep(2)
|
||||
#
|
||||
#
|
||||
# #############################################################################################
|
||||
# # Make SYNC. call of fbsvcmgr in order to validate database which has locks on some relations
|
||||
# #############################################################################################
|
||||
# f_svc_log=open( os.path.join(context['temp_directory'],'tmp_4707_svc.log'), 'w')
|
||||
# subprocess.call([ context['fbsvcmgr_path'], 'localhost:service_mgr','action_validate','dbname', db_file,'val_lock_timeout','1'],stdout=f_svc_log, stderr=subprocess.STDOUT)
|
||||
# flush_and_close( f_svc_log )
|
||||
#
|
||||
#
|
||||
# #######################################################
|
||||
# # TERMINATE separate (child) process of ISQL that hangs
|
||||
# #######################################################
|
||||
# p_hang.terminate()
|
||||
# flush_and_close( f_hang_log )
|
||||
#
|
||||
#
|
||||
# with open( f_hang_log.name,'r') as f:
|
||||
# print(f.read())
|
||||
#
|
||||
#
|
||||
# with open( f_svc_log.name,'r') as f:
|
||||
# print(f.read())
|
||||
#
|
||||
#
|
||||
# # cleanup:
|
||||
# ##########
|
||||
# time.sleep(1)
|
||||
# cleanup( [i.name for i in (f_hang_sql, f_hang_log, f_svc_log) ] )
|
||||
#
|
||||
#
|
||||
# ## ||||||||||||||||||||||||||||
|
||||
# ## ###################################||| FB 4.0+, SS and SC |||##############################
|
||||
# ## ||||||||||||||||||||||||||||
|
||||
@ -177,10 +181,11 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# con4cleanup=fdb.connect( dsn = dsn, user = user_name, password = user_password )
|
||||
# con4cleanup.execute_immediate('delete from mon$attachments where mon$attachment_id != current_connection')
|
||||
# con4cleanup.commit()
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
ISQL_MSG Starting EB with infinite pause.
|
||||
@ -199,11 +204,67 @@ expected_stdout_1 = """
|
||||
08:37:03.17 Acquire relation lock failed
|
||||
08:37:03.17 Relation 130 (TEST3) : 1 ERRORS found
|
||||
08:37:03.17 Validation finished
|
||||
"""
|
||||
"""
|
||||
|
||||
hang_script_1 = temp_file('hang_script.sql')
|
||||
hang_output_1 = temp_file('hang_script.out')
|
||||
|
||||
@pytest.mark.version('>=2.5.5')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
def test_1(act_1: Action, hang_script_1: Path, hang_output_1: Path, capsys, request):
|
||||
# Fializer for FB4
|
||||
def drop_connections():
|
||||
with act_1.db.connect() as con4cleanup:
|
||||
con4cleanup.execute_immediate('delete from mon$attachments where mon$attachment_id != current_connection')
|
||||
con4cleanup.commit()
|
||||
|
||||
request.addfinalizer(drop_connections)
|
||||
# Following script will hang for sevral seconds (see 'lock timeout' argument - and this will serve as pause
|
||||
# during which we can launch fbsvcmgr to validate database:
|
||||
hang_script_1.write_text(f"""
|
||||
set term ^;
|
||||
execute block as
|
||||
begin
|
||||
execute statement 'drop role tmp$r4707';
|
||||
when any do begin end
|
||||
end ^
|
||||
set term ;^
|
||||
commit;
|
||||
|
||||
set transaction wait;
|
||||
|
||||
delete from test1;
|
||||
insert into test3(id) values(1);
|
||||
set list on;
|
||||
select 'Starting EB with infinite pause.' as isql_msg from rdb$database;
|
||||
set term ^;
|
||||
execute block as
|
||||
begin
|
||||
execute statement 'update test1 set id=-id'
|
||||
on external 'localhost:' || rdb$get_context('SYSTEM','DB_NAME')
|
||||
as user '{act_1.db.user}' password '{act_1.db.password}'
|
||||
role 'TMP$R4707' -- this will force to create new attachment, and its Tx will be paused on INFINITE time.
|
||||
;
|
||||
when any do begin end
|
||||
end ^
|
||||
set term ;^
|
||||
select 'EB with pause finished.' as msg_2 from rdb$database;
|
||||
""")
|
||||
# Make asynchronous call of ISQL which will stay several seconds in pause due to row-level lock
|
||||
with open(hang_output_1, mode='w') as hang_out:
|
||||
p_hang_sql = subprocess.Popen([act_1.vars['isql'], '-i', str(hang_script_1),
|
||||
'-user', act_1.db.user,
|
||||
'-password', act_1.db.password, act_1.db.dsn],
|
||||
stdout=hang_out, stderr=subprocess.STDOUT)
|
||||
try:
|
||||
time.sleep(2)
|
||||
# Make SYNC. call of fbsvcmgr in order to validate database which has locks on some relations
|
||||
act_1.svcmgr(switches=['action_validate', 'dbname', str(act_1.db.db_path),
|
||||
'val_lock_timeout', '1'])
|
||||
finally:
|
||||
p_hang_sql.terminate()
|
||||
#
|
||||
print(hang_output_1.read_text())
|
||||
print(act_1.stdout)
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,19 +2,22 @@
|
||||
#
|
||||
# id: bugs.core_4715
|
||||
# title: Restore of shadowed database fails using -k ("restore without shadow") switch
|
||||
# decription:
|
||||
# decription: [pcisar] 23.11.2021
|
||||
# For unknow reason, on v4.0.0.2496 the gstat -h does not report active shadow
|
||||
# and test thus fail.
|
||||
# tracker_id: CORE-4715
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from pathlib import Path
|
||||
from firebird.qa import db_factory, python_act, Action, temp_file
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('^((?!HASH_IN_SOURCE|RDB\\$SHADOW_NUMBER|HASH_IN_RESTORED|Attributes).)*$', ''), ('[ ]+', ' '), ('[\t]*', ' ')]
|
||||
substitutions_1 = [('^((?!HASH_IN_SOURCE|RDB\\$SHADOW_NUMBER|HASH_IN_RESTORED|Attributes).)*$', '')]
|
||||
|
||||
init_script_1 = """
|
||||
-- Confirmed on WI-T3.0.0.31374:
|
||||
@ -24,14 +27,14 @@ init_script_1 = """
|
||||
-- gbak:Exiting before completion due to errors
|
||||
recreate table test(s varchar(30));
|
||||
commit;
|
||||
"""
|
||||
"""
|
||||
|
||||
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
# import os
|
||||
#
|
||||
#
|
||||
# shd=os.path.join(context['temp_directory'],'core_4715.shd')
|
||||
# script = '''create shadow 1 '%s'; commit; insert into test select 'line #'||lpad(row_number()over(), 3, '0') from rdb$types rows 200; commit; set list on; select hash(list(s)) hash_in_source from test; select * from rdb$files;''' % shd
|
||||
# runProgram('isql',[dsn,'-q','-user',user_name,'-password',user_password],script)
|
||||
@ -46,9 +49,10 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
|
||||
# os.remove(fbk)
|
||||
# if os.path.isfile(fbn):
|
||||
# os.remove(fbn)
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Creating shadow...
|
||||
@ -56,11 +60,30 @@ HASH_IN_SOURCE 1499836372373901520
|
||||
RDB$SHADOW_NUMBER 1
|
||||
Attributes force write, active shadow
|
||||
HASH_IN_RESTORED 1499836372373901520
|
||||
"""
|
||||
"""
|
||||
|
||||
fbk_1 = temp_file('core_4715-shadowed.fbk')
|
||||
fbn_1 = temp_file('core_4715-restored.fdb')
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, fbk_1: Path, fbn_1: Path, capsys):
|
||||
act_1.isql(switches=['-q'],
|
||||
input=f'''create shadow 1 '{fbn_1.with_suffix('.shd')}'; commit; insert into test select 'line #'||lpad(row_number()over(), 3, '0') from rdb$types rows 200; commit; set list on; select hash(list(s)) hash_in_source from test; select * from rdb$files;''')
|
||||
print(act_1.stdout)
|
||||
act_1.reset()
|
||||
act_1.gstat(switches=['-h'])
|
||||
print(act_1.stdout)
|
||||
act_1.reset()
|
||||
act_1.gbak(switches=['-b', act_1.db.dsn, str(fbk_1)])
|
||||
act_1.reset()
|
||||
act_1.gbak(switches=['-rep', '-k', str(fbk_1), str(fbn_1)])
|
||||
act_1.reset()
|
||||
act_1.isql(switches=['-q', '-user', act_1.db.user, '-password', act_1.db.password,
|
||||
str(fbn_1)], connect_db=False,
|
||||
input='set list on; select hash(list(s)) hash_in_restored from test;')
|
||||
print(act_1.stdout)
|
||||
#
|
||||
act_1.reset()
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,28 +2,28 @@
|
||||
#
|
||||
# id: bugs.core_4731
|
||||
# title: Prohibit an ability to issue DML or DDL statements on RDB$ tables
|
||||
# decription:
|
||||
# decription:
|
||||
# Integral test for verifying ability to change system tables by non-privileged user and by those
|
||||
# who have been granted with RDB$ADMIN role.
|
||||
# Main idea: read system tables (hereafter - 'ST') metadata and generate all possible DML and DDL
|
||||
# statements that are intended to:
|
||||
# Main idea: read system tables (hereafter - 'ST') metadata and generate all possible DML and DDL
|
||||
# statements that are intended to:
|
||||
# a) restrict ST by creating new table with foreign key to selected ST (if it has PK or UK);
|
||||
# b) change data by issuing INSERT / UPDATE / DELETE statements; also try SELECT ... WITH LOCK;
|
||||
# c) change metadata: add column, alter column (drop NULL constraint, add new contraint, add DEFAULT value),
|
||||
# drop column;
|
||||
# d) aux. actions: attempt to drop ST.
|
||||
# d) aux. actions: attempt to drop ST.
|
||||
# *** 11-apr-2018: EXCLUDED attempt to create index on ST: now it is allowed, see CORE-5746 ***
|
||||
# e) make indirect changes: apply ALTER SEQUENCE statement for system generators
|
||||
#
|
||||
#
|
||||
# Test contains following statements and procedures:
|
||||
# 1) creating two users, one of them is granted with role RDB$ADMIN.
|
||||
# 1) creating two users, one of them is granted with role RDB$ADMIN.
|
||||
# Both these users are granted to create/alter/drop any kinds of database objects.
|
||||
# 2) creating several user objects (domain, exception, collation, sequence, master/detail tables, trigger,
|
||||
# 2) creating several user objects (domain, exception, collation, sequence, master/detail tables, trigger,
|
||||
# view, stanalone procedure and standalone function and package). These objects are created in order
|
||||
# to add some data in system tables that can be later actually affected by vulnerable expressions;
|
||||
# 3) proc sp_gen_expr_for_creating_fkeys:
|
||||
# reads definition of every system table and if it has PK/UK than generate expressions for item "a":
|
||||
# they will create completely new table with set of fields which id appropriate to build FOREIGN KEY
|
||||
# they will create completely new table with set of fields which id appropriate to build FOREIGN KEY
|
||||
# to selected ST. Generated expressions are added to special table `vulnerable_on_sys_tables`;
|
||||
# 4) proc sp_gen_expr_for_direct_change:
|
||||
# reads definition of every system table and generates DML and DDL expressions for items "b" ... "e" described
|
||||
@ -36,11 +36,11 @@
|
||||
# 6) two calls of sp_run_vulnerable_expressions: one for non-privileged user and second for user with role RDB$ADMIN.
|
||||
# 7) select values of raised gdscodes (distinct) in order to check that only ONE gdscode occured (335544926).
|
||||
# 8) select expressions that were PASSED without exceptions.
|
||||
#
|
||||
#
|
||||
# Checked on:
|
||||
# 3.0.4.32947: OK, SS: 22s, CS: 37s
|
||||
# 4.0.0.955: OK, SS: 35s, CS: 33s
|
||||
#
|
||||
#
|
||||
# REFACTORED 18.02.2020: most of initial code was moved into $files_location/core_4731.sql; changed test_type to 'Python'.
|
||||
# Checked 18.02.2020 afte refactoring:
|
||||
# 4.0.0.1773 SS: 11.759s.
|
||||
@ -49,14 +49,16 @@
|
||||
# 3.0.6.33247 SS: 8.431s.
|
||||
# 3.0.6.33247 SC: 11.419s.
|
||||
# 3.0.6.33247 CS: 10.846s.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4731
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from pathlib import Path
|
||||
from firebird.qa import db_factory, python_act, Action, user_factory, User
|
||||
from firebird.driver import ShutdownMode, ShutdownMethod
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
@ -69,38 +71,38 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import sys
|
||||
# import subprocess
|
||||
# import time
|
||||
# from fdb import services
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# this_db = db_conn.database_name
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# dba_privileged_name = 'tmp_c4731_cooldba'
|
||||
# non_privileged_name = 'tmp_c4731_manager'
|
||||
#
|
||||
#
|
||||
# #-----------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close(file_handle):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# os.fsync(file_handle.fileno())
|
||||
#
|
||||
#
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -108,91 +110,91 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# os.remove( f_names_list[i] )
|
||||
# if os.path.isfile( f_names_list[i]):
|
||||
# print('ERROR: can not remove file ' + f_names_list[i])
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# f_sql=open(os.path.join(context['files_location'],'core_4731.sql'),'r')
|
||||
# sql_for_prepare = f_sql.read()
|
||||
# f_sql.close()
|
||||
#
|
||||
#
|
||||
# f_pre_sql = open( os.path.join(context['temp_directory'],'tmp_core_4731_pre.sql'), 'w')
|
||||
# f_pre_sql.write( sql_for_prepare % dict(globals(), **locals()) )
|
||||
# flush_and_close( f_pre_sql )
|
||||
#
|
||||
#
|
||||
# f_pre_log = open( '.'.join( (os.path.splitext( f_pre_sql.name )[0], 'log') ), 'w')
|
||||
# f_pre_err = open( '.'.join( (os.path.splitext( f_pre_sql.name )[0], 'err') ), 'w')
|
||||
# subprocess.call( [ context['isql_path'], dsn, '-q', '-i', f_pre_sql.name ], stdout = f_pre_log, stderr = f_pre_err)
|
||||
# flush_and_close( f_pre_log )
|
||||
# flush_and_close( f_pre_err )
|
||||
#
|
||||
#
|
||||
# runProgram( context['gfix_path'],[dsn, '-shut','full','-force','0'] )
|
||||
# runProgram( context['gfix_path'],[dsn, '-online'] )
|
||||
#
|
||||
#
|
||||
# sql_run='''
|
||||
# -- ###################################################################################
|
||||
# -- R U N A S N O N - P R I V I L E G E D U S E R
|
||||
# -- R U N A S N O N - P R I V I L E G E D U S E R
|
||||
# -- ###################################################################################
|
||||
# execute procedure sp_run_vulnerable_expressions('%(non_privileged_name)s', '123', 'NONE');
|
||||
#
|
||||
#
|
||||
# -- Note: as of build 3.0.31810, we can SKIP restoring of 'pure-state' of RDB$ tables
|
||||
# -- after this SP because non-privileged user can NOT change enything.
|
||||
# -- after this SP because non-privileged user can NOT change enything.
|
||||
# -- All his attempts should FAIL, system tables should be in unchanged state.
|
||||
#
|
||||
#
|
||||
# set list off;
|
||||
# set heading off;
|
||||
#
|
||||
#
|
||||
# select '-- Executed with role: '||trim(( select actual_role from vulnerable_on_sys_tables rows 1 ))
|
||||
# ||'. Expressions that passes WITHOUT errors:' as msg
|
||||
# ||'. Expressions that passes WITHOUT errors:' as msg
|
||||
# from rdb$database
|
||||
# ;
|
||||
#
|
||||
#
|
||||
# commit; -- 11-04-2018, do not remove!
|
||||
# set transaction no wait;
|
||||
#
|
||||
#
|
||||
# set list on;
|
||||
# select count(*) as "-- count_of_passed: "
|
||||
# from v_passed;
|
||||
#
|
||||
#
|
||||
# set list on;
|
||||
# select * from v_passed;
|
||||
#
|
||||
#
|
||||
# set list on;
|
||||
# select distinct vulnerable_gdscode as "-- gdscode list for blocked:"
|
||||
# from vulnerable_on_sys_tables
|
||||
# where vulnerable_gdscode is distinct from -1;
|
||||
#
|
||||
#
|
||||
# -- #########################################################################################
|
||||
# -- R U N A S U S E R W H O I S G R A N T E D W I T H R B D $ A D M I N
|
||||
# -- #########################################################################################
|
||||
# execute procedure sp_run_vulnerable_expressions('%(dba_privileged_name)s', '123', 'RDB$ADMIN');
|
||||
#
|
||||
#
|
||||
# set list off;
|
||||
# set heading off;
|
||||
#
|
||||
#
|
||||
# select '-- Executed with role: '||trim(( select actual_role from vulnerable_on_sys_tables rows 1 ))
|
||||
# ||'. Expressions that passes WITHOUT errors:' as msg
|
||||
# ||'. Expressions that passes WITHOUT errors:' as msg
|
||||
# from rdb$database
|
||||
# ;
|
||||
# commit; -- 11-04-2018, do not remove!
|
||||
#
|
||||
#
|
||||
# set list on;
|
||||
# select count(*) as "-- count_of_passed: "
|
||||
# from v_passed;
|
||||
#
|
||||
#
|
||||
# set list on;
|
||||
# select * from v_passed;
|
||||
#
|
||||
#
|
||||
# set list on;
|
||||
# select distinct vulnerable_gdscode as "-- gdscode list for blocked:"
|
||||
# from vulnerable_on_sys_tables
|
||||
# where vulnerable_gdscode is distinct from -1;
|
||||
#
|
||||
#
|
||||
# ----------------
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# connect '%(dsn)s' user '%(user_name)s' password '%(user_password)s';
|
||||
#
|
||||
#
|
||||
# -- ||||||||||||||||||||||||||||
|
||||
# -- ###################################||| FB 4.0+, SS and SC |||##############################
|
||||
# -- ||||||||||||||||||||||||||||
|
||||
@ -206,26 +208,26 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# -- SQLCODE: -901 / lock time-out on wait transaction / object <this_test_DB> is in use
|
||||
# -- #############################################################################################
|
||||
# delete from mon$attachments where mon$attachment_id != current_connection;
|
||||
# commit;
|
||||
#
|
||||
# commit;
|
||||
#
|
||||
# drop user %(dba_privileged_name)s;
|
||||
# drop user %(non_privileged_name)s;
|
||||
# commit;
|
||||
# ''' % dict(globals(), **locals())
|
||||
#
|
||||
#
|
||||
# f_sql_run = open( os.path.join(context['temp_directory'],'tmp_core_4731_run.sql'), 'w')
|
||||
# f_sql_run.write( sql_run % dict(globals(), **locals()) )
|
||||
# flush_and_close( f_sql_run )
|
||||
#
|
||||
#
|
||||
# f_run_log = open( '.'.join( (os.path.splitext( f_sql_run.name )[0], 'log') ), 'w')
|
||||
# f_run_err = open( '.'.join( (os.path.splitext( f_sql_run.name )[0], 'err') ), 'w')
|
||||
# subprocess.call( [ context['isql_path'], dsn, '-q', '-i', f_sql_run.name ], stdout = f_run_log, stderr = f_run_err)
|
||||
# flush_and_close( f_run_log )
|
||||
# flush_and_close( f_run_err )
|
||||
#
|
||||
#
|
||||
# # Check results:
|
||||
# # ==============
|
||||
#
|
||||
#
|
||||
# # 1. Print UNEXPECTED output:
|
||||
# #############################
|
||||
# for f in (f_pre_log, f_pre_err):
|
||||
@ -233,26 +235,27 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# for line in f:
|
||||
# if line.strip():
|
||||
# print( 'UNEXPECTED '+('STDOUT' if f == f_pre_log else 'STDERR')+' WHEN PREPARE DB: ' + line )
|
||||
#
|
||||
#
|
||||
# with open( f_run_err.name,'r') as f:
|
||||
# for line in f:
|
||||
# if line.strip():
|
||||
# print( 'UNEXPECTED STDERR WHEN RUN: ' + line )
|
||||
#
|
||||
#
|
||||
# # 2. Print EXPECTED output:
|
||||
# ###########################
|
||||
# with open( f_run_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# if line.strip():
|
||||
# print( line )
|
||||
#
|
||||
#
|
||||
# # Cleanup
|
||||
# #########
|
||||
# cleanup( [ i.name for i in (f_pre_sql,f_pre_log,f_pre_err,f_sql_run,f_run_log,f_run_err) ] )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
-- Executed with role: NONE. Expressions that passes WITHOUT errors:
|
||||
@ -285,11 +288,104 @@ expected_stdout_1 = """
|
||||
VULNERABLE_EXPR update RDB$TYPES t set t.RDB$TYPE_NAME = 'C' where coalesce(rdb$system_flag,0)=0 rows 1 returning t.rdb$db_key; -- length of returned rdb$dbkey=8
|
||||
VULNERABLE_EXPR update RDB$TYPES t set t.RDB$TYPE_NAME = null where coalesce(rdb$system_flag,0)=0 rows 1 returning t.rdb$db_key; -- length of returned rdb$dbkey=8
|
||||
-- gdscode list for blocked: 335544926
|
||||
"""
|
||||
"""
|
||||
|
||||
dba_privileged_user = user_factory(name='tmp_c4731_cooldba', password='123')
|
||||
non_privileged_user = user_factory(name='tmp_c4731_manager', password='123')
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
def test_1(act_1: Action, dba_privileged_user: User, non_privileged_user: User, capsys):
|
||||
# Run prepare script
|
||||
prep_script = (act_1.vars['files'] / 'core_4731.sql').read_text()
|
||||
prep_script = prep_script % {'dba_privileged_name': dba_privileged_user.name,
|
||||
'non_privileged_name': non_privileged_user.name}
|
||||
act_1.isql(switches=['-q'], input=prep_script)
|
||||
#
|
||||
with act_1.connect_server() as srv:
|
||||
srv.database.shutdown(database=str(act_1.db.db_path), mode=ShutdownMode.FULL,
|
||||
method=ShutdownMethod.FORCED, timeout=0)
|
||||
srv.database.bring_online(database=str(act_1.db.db_path))
|
||||
#
|
||||
test_script = f"""
|
||||
-- ###################################################################################
|
||||
-- R U N A S N O N - P R I V I L E G E D U S E R
|
||||
-- ###################################################################################
|
||||
execute procedure sp_run_vulnerable_expressions('{non_privileged_user.name}', '123', 'NONE');
|
||||
|
||||
-- Note: as of build 3.0.31810, we can SKIP restoring of 'pure-state' of RDB$ tables
|
||||
-- after this SP because non-privileged user can NOT change enything.
|
||||
-- All his attempts should FAIL, system tables should be in unchanged state.
|
||||
|
||||
set list off;
|
||||
set heading off;
|
||||
|
||||
select '-- Executed with role: '||trim(( select actual_role from vulnerable_on_sys_tables rows 1 ))
|
||||
||'. Expressions that passes WITHOUT errors:' as msg
|
||||
from rdb$database
|
||||
;
|
||||
|
||||
commit; -- 11-04-2018, do not remove!
|
||||
set transaction no wait;
|
||||
|
||||
set list on;
|
||||
select count(*) as "-- count_of_passed: "
|
||||
from v_passed;
|
||||
|
||||
set list on;
|
||||
select * from v_passed;
|
||||
|
||||
set list on;
|
||||
select distinct vulnerable_gdscode as "-- gdscode list for blocked:"
|
||||
from vulnerable_on_sys_tables
|
||||
where vulnerable_gdscode is distinct from -1;
|
||||
|
||||
-- #########################################################################################
|
||||
-- R U N A S U S E R W H O I S G R A N T E D W I T H R B D $ A D M I N
|
||||
-- #########################################################################################
|
||||
execute procedure sp_run_vulnerable_expressions('{dba_privileged_user.name}', '123', 'RDB$ADMIN');
|
||||
|
||||
set list off;
|
||||
set heading off;
|
||||
|
||||
select '-- Executed with role: '||trim(( select actual_role from vulnerable_on_sys_tables rows 1 ))
|
||||
||'. Expressions that passes WITHOUT errors:' as msg
|
||||
from rdb$database
|
||||
;
|
||||
commit; -- 11-04-2018, do not remove!
|
||||
|
||||
set list on;
|
||||
select count(*) as "-- count_of_passed: "
|
||||
from v_passed;
|
||||
|
||||
set list on;
|
||||
select * from v_passed;
|
||||
|
||||
set list on;
|
||||
select distinct vulnerable_gdscode as "-- gdscode list for blocked:"
|
||||
from vulnerable_on_sys_tables
|
||||
where vulnerable_gdscode is distinct from -1;
|
||||
|
||||
----------------
|
||||
commit;
|
||||
|
||||
connect '{act_1.db.dsn}' user '{act_1.db.user}' password '{act_1.db.password}';
|
||||
|
||||
-- ||||||||||||||||||||||||||||
|
||||
-- ###################################||| FB 4.0+, SS and SC |||##############################
|
||||
-- ||||||||||||||||||||||||||||
|
||||
-- If we check SS or SC and ExtConnPoolLifeTime > 0 (config parameter FB 4.0+) then current
|
||||
-- DB (bugs.core_NNNN.fdb) will be 'captured' by firebird.exe process and fbt_run utility
|
||||
-- will not able to drop this database at the final point of test.
|
||||
-- Moreover, DB file will be hold until all activity in firebird.exe completed and AFTER this
|
||||
-- we have to wait for <ExtConnPoolLifeTime> seconds after it (discussion and small test see
|
||||
-- in the letter to hvlad and dimitr 13.10.2019 11:10).
|
||||
-- This means that one need to kill all connections to prevent from exception on cleanup phase:
|
||||
-- SQLCODE: -901 / lock time-out on wait transaction / object <this_test_DB> is in use
|
||||
-- #############################################################################################
|
||||
delete from mon$attachments where mon$attachment_id != current_connection;
|
||||
commit;
|
||||
"""
|
||||
act_1.reset()
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.isql(switches=['-q'], input=test_script)
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,41 +2,41 @@
|
||||
#
|
||||
# id: bugs.core_4743
|
||||
# title: Granted role does not work with non-ascii username
|
||||
# decription:
|
||||
# decription:
|
||||
# Test creates non-ascii user and role, and also several kind of DB objects (table, procedure, function etc).
|
||||
# Then role is granted to user, and privileges for DB objects are granted to this role.
|
||||
# All these actions are done in ISQL which is launched as separate (child) process.
|
||||
# No errors must be raised in it (see 'f_ddl_log' - it must remain empty).
|
||||
#
|
||||
#
|
||||
# Further, we try to establish connection to the test DB using non-ascii user and role.
|
||||
#
|
||||
#
|
||||
# ::: NB :::
|
||||
# Attempt to use ISQL for connect with non-ascii login will FAIL with:
|
||||
# Statement failed, SQLSTATE = 22021
|
||||
# Bad international character in tag isc_dpb_user_name
|
||||
# -Cannot transliterate character between character sets
|
||||
# -Invalid or incomplete multibyte or wide character
|
||||
#
|
||||
#
|
||||
# Fortunately, this can be done without problems using fdb.connect().
|
||||
#
|
||||
#
|
||||
# After connect, we obtain:
|
||||
# * name of current user and his role (both of them must be non-ascii);
|
||||
# * privileges that was granted to this user (see query to v_current_privileges);
|
||||
#
|
||||
#
|
||||
# Finally, we disconnect, generate SQL script for drop this user and run ISQL for this
|
||||
# (we have to do this because it seems that there is no way to drop NON-ASCII user via FDB Services).
|
||||
#
|
||||
#
|
||||
# NOTE: Python package 'io' is used here instead of codecs (the latter is obsolete in Python).
|
||||
#
|
||||
#
|
||||
# Checked on: 4.0.0.2416 (Windows and Linux)
|
||||
#
|
||||
# tracker_id:
|
||||
#
|
||||
# tracker_id:
|
||||
# min_versions: ['4.0']
|
||||
# versions: 4.0
|
||||
# qmid: bugs.core_4743
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action, user_factory, User
|
||||
|
||||
# version: 4.0
|
||||
# resources: None
|
||||
@ -49,32 +49,32 @@ db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import io
|
||||
# #import codecs
|
||||
# import subprocess
|
||||
# import time
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -86,23 +86,23 @@ db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1)
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# print('type(f_names_list[i])=',type(f_names_list[i]))
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# sql_txt=''' set bail on;
|
||||
# set names utf8;
|
||||
# connect '%(dsn)s' user '%(user_name)s' password '%(user_password)s';
|
||||
#
|
||||
#
|
||||
# create or alter user "Вася Пупкин" password '123' using plugin Srp;
|
||||
# create role "Старший дворник";
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# grant "Старший дворник" to "Вася Пупкин";
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# create table "Документы"(id int primary key, pid int references "Документы");
|
||||
# create exception "НЕ_число" 'Ваша строка не может быть преобразована в число.';
|
||||
# create sequence "ИД_документа";
|
||||
@ -116,7 +116,7 @@ db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1)
|
||||
# return 0;
|
||||
# end
|
||||
# ^
|
||||
#
|
||||
#
|
||||
# create or alter package "Утилиты" as
|
||||
# begin
|
||||
# procedure pg_sp_worker;
|
||||
@ -131,7 +131,7 @@ db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1)
|
||||
# ^
|
||||
# set term ;^
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# create or alter view v_current_privileges as
|
||||
# select
|
||||
# g.rdb$user as who_is_granted
|
||||
@ -166,7 +166,7 @@ db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1)
|
||||
# from rdb$user_privileges g
|
||||
# where g.rdb$user in( current_user, current_role )
|
||||
# group by 1,2,3;
|
||||
#
|
||||
#
|
||||
# grant select on v_current_privileges to "Старший дворник";
|
||||
# grant select,insert,update,delete,references on "Документы" to "Старший дворник";
|
||||
# grant usage on exception "НЕ_число" to "Старший дворник";
|
||||
@ -178,7 +178,7 @@ db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1)
|
||||
# grant alter any table to "Старший дворник";
|
||||
# grant drop any table to "Старший дворник";
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# /*
|
||||
# DO NOT try to use ISQL for connecti using non-ascii user name! It will fail with:
|
||||
# =====
|
||||
@ -191,26 +191,26 @@ db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1)
|
||||
# Instead, FDB connect() method must be used for this.
|
||||
# */
|
||||
# ''' % dict(globals(), **locals())
|
||||
#
|
||||
#
|
||||
# f_ddl_sql = open( os.path.join(context['temp_directory'], 'tmp_4743_utf8_ddl.sql'), 'w' )
|
||||
# f_ddl_sql.write( sql_txt )
|
||||
# flush_and_close( f_ddl_sql )
|
||||
#
|
||||
#
|
||||
# f_ddl_log = open( os.path.splitext(f_ddl_sql.name)[0]+'.log', 'w')
|
||||
# subprocess.call( [ context['isql_path'], '-q', '-i', f_ddl_sql.name ],
|
||||
# stdout = f_ddl_log,
|
||||
# stderr = subprocess.STDOUT
|
||||
# )
|
||||
# flush_and_close( f_ddl_log )
|
||||
#
|
||||
#
|
||||
# with io.open(f_ddl_log.name, 'r', encoding='utf8' ) as f:
|
||||
# result_log = f.readlines()
|
||||
#
|
||||
#
|
||||
# for i in result_log:
|
||||
# print( i.encode('utf8') ) # do not miss '.encode()' here, otherwise get: "ordinal not in range(128)"
|
||||
#
|
||||
#
|
||||
# f_run_log = io.open( os.path.join(context['temp_directory'], 'tmp_4743_utf8_run.log'), 'w', encoding = 'utf8' )
|
||||
#
|
||||
#
|
||||
# con = fdb.connect(dsn = dsn, user = "Вася Пупкин", password = '123', role = 'Старший дворник', charset = 'utf8', utf8params = True)
|
||||
# cur = con.cursor()
|
||||
# cur.execute('select m.mon$user,m.mon$role from mon$attachments m where m.mon$attachment_id = current_connection')
|
||||
@ -218,20 +218,20 @@ db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1)
|
||||
# for r in cur:
|
||||
# for i in range(0,len(col)):
|
||||
# f_run_log.write( ' '.join((col[i][0],':',r[i], '\\n')) )
|
||||
#
|
||||
#
|
||||
# cur.execute('select v.* from v_current_privileges v')
|
||||
# col = cur.description
|
||||
# for r in cur:
|
||||
# for i in range(0,len(col)):
|
||||
# if 'privilege:' not in col[i][0] or 'privilege:' in col[i][0] and r[i] == 'YES':
|
||||
# f_run_log.write( ' '.join((col[i][0],':',r[i], '\\n')) )
|
||||
#
|
||||
#
|
||||
# flush_and_close( f_run_log )
|
||||
#
|
||||
#
|
||||
# # Check that privileges actually work for current (non-ascii) user / role:
|
||||
# #####################################
|
||||
# # All following actions must not raise any exception:
|
||||
#
|
||||
#
|
||||
# '''
|
||||
# ### DEFERRED ###
|
||||
# Got exception on Linux:
|
||||
@ -242,19 +242,19 @@ db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1)
|
||||
# - "Документы"
|
||||
# -104
|
||||
# 335544569
|
||||
#
|
||||
#
|
||||
# con.execute_immediate('insert into "Документы"(id) values(gen_id("ИД_документа",1))')
|
||||
# cur.callproc('"Хранимка"')
|
||||
# cur.execute('select "СтрВЧисло"(?) from rdb$database', (123,))
|
||||
# for r in cur:
|
||||
# pass
|
||||
#
|
||||
#
|
||||
# cur.callproc('"Утилиты".pg_sp_worker')
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# cur.close()
|
||||
# con.close()
|
||||
#
|
||||
#
|
||||
# # Generate SQL script for DROP non-ascii user.
|
||||
# ##############################################
|
||||
# sql_txt='''
|
||||
@ -268,41 +268,123 @@ db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1)
|
||||
# commit;
|
||||
# select count(*) non_ascii_user_after_drop from sec$users where sec$user_name ='Вася Пупкин';
|
||||
# ''' % dict(globals(), **locals())
|
||||
#
|
||||
#
|
||||
# f_drop_sql = open( os.path.join(context['temp_directory'], 'tmp_4743_utf8_drop.sql'), 'w' )
|
||||
# f_drop_sql.write( sql_txt )
|
||||
# flush_and_close( f_drop_sql )
|
||||
#
|
||||
#
|
||||
# f_drop_log = open( os.path.splitext(f_drop_sql.name)[0]+'.log', 'w')
|
||||
# subprocess.call( [ context['isql_path'], '-q', '-i', f_drop_sql.name ],
|
||||
# stdout = f_drop_log,
|
||||
# stderr = subprocess.STDOUT
|
||||
# )
|
||||
# flush_and_close( f_drop_log )
|
||||
#
|
||||
#
|
||||
# with io.open(f_run_log.name, 'r', encoding='utf8' ) as f:
|
||||
# result_in_utf8 = f.readlines()
|
||||
#
|
||||
#
|
||||
# for i in result_in_utf8:
|
||||
# print( i.encode('utf8') )
|
||||
#
|
||||
#
|
||||
# with open(f_drop_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# print(line)
|
||||
#
|
||||
#
|
||||
# # cleanup:
|
||||
# ###########
|
||||
# time.sleep(2)
|
||||
#
|
||||
# # DO NOT use here: cleanup( (f_ddl_sql, f_ddl_log, f_drop_sql, f_drop_log, f_run_log) ) --
|
||||
#
|
||||
# # DO NOT use here: cleanup( (f_ddl_sql, f_ddl_log, f_drop_sql, f_drop_log, f_run_log) ) --
|
||||
# # Unrecognized type of element: <closed file 'C:\\FBTESTING\\qa\\fbt-repo\\tmp\\tmp_4743_utf8_run.log', mode 'wb' at 0x0000000005A20780> - can not be treated as file.
|
||||
# # type(f_names_list[i])= <type 'instance'>Traceback (most recent call last):
|
||||
#
|
||||
#
|
||||
# cleanup( [i.name for i in (f_ddl_sql, f_ddl_log, f_drop_sql, f_drop_log, f_run_log)] )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
ddl_script_1 = """
|
||||
grant "Старший дворник" to "Вася Пупкин";
|
||||
commit;
|
||||
|
||||
create table "Документы"(id int primary key, pid int references "Документы");
|
||||
create exception "НЕ_число" 'Ваша строка не может быть преобразована в число.';
|
||||
create sequence "ИД_документа";
|
||||
set term ^;
|
||||
create procedure "Хранимка" as
|
||||
begin
|
||||
end
|
||||
^
|
||||
create function "СтрВЧисло"(a_text varchar(100)) returns int as
|
||||
begin
|
||||
return 0;
|
||||
end
|
||||
^
|
||||
|
||||
create or alter package "Утилиты" as
|
||||
begin
|
||||
procedure pg_sp_worker;
|
||||
end
|
||||
^
|
||||
recreate package body "Утилиты" as
|
||||
begin
|
||||
procedure pg_sp_worker as
|
||||
begin
|
||||
end
|
||||
end
|
||||
^
|
||||
set term ;^
|
||||
commit;
|
||||
|
||||
create or alter view v_current_privileges as
|
||||
select
|
||||
g.rdb$user as who_is_granted
|
||||
,g.rdb$relation_name as obj_name
|
||||
,decode( g.rdb$object_type
|
||||
,0,'table'
|
||||
,1,'view'
|
||||
,2,'trigger'
|
||||
,5,'procedure'
|
||||
,7,'exception'
|
||||
,9,'domain'
|
||||
,11,'charset'
|
||||
,13,'role'
|
||||
,14,'generator'
|
||||
,15,'function'
|
||||
,16,'blob filt'
|
||||
,18,'package'
|
||||
,22,'systable'
|
||||
,cast(g.rdb$object_type as varchar(50))
|
||||
) as obj_type
|
||||
,max(iif(g.rdb$privilege='S','YES',' ')) as "privilege:select"
|
||||
,max(iif(g.rdb$privilege='I','YES',' ')) as "privilege:insert"
|
||||
,max(iif(g.rdb$privilege='U','YES',' ')) as "privilege:update"
|
||||
,max(iif(g.rdb$privilege='D','YES',' ')) as "privilege:delete"
|
||||
,max(iif(g.rdb$privilege='G','YES',' ')) as "privilege:usage"
|
||||
,max(iif(g.rdb$privilege='X','YES',' ')) as "privilege:exec"
|
||||
,max(iif(g.rdb$privilege='R','YES',' ')) as "privilege:refer"
|
||||
,max(iif(g.rdb$privilege='C','YES',' ')) as "privilege:create"
|
||||
,max(iif(g.rdb$privilege='L','YES',' ')) as "privilege:alter"
|
||||
,max(iif(g.rdb$privilege='O','YES',' ')) as "privilege:drop"
|
||||
,max(iif(g.rdb$privilege='M','YES',' ')) as "privilege:member"
|
||||
from rdb$user_privileges g
|
||||
where g.rdb$user in( current_user, current_role )
|
||||
group by 1,2,3;
|
||||
|
||||
grant select on v_current_privileges to "Старший дворник";
|
||||
grant select,insert,update,delete,references on "Документы" to "Старший дворник";
|
||||
grant usage on exception "НЕ_число" to "Старший дворник";
|
||||
grant usage on sequence "ИД_документа" to "Старший дворник";
|
||||
grant execute on procedure "Хранимка" to "Старший дворник";
|
||||
grant execute on function "СтрВЧисло" to "Старший дворник";
|
||||
grant execute on package "Утилиты" to "Старший дворник";
|
||||
grant create table to "Старший дворник";
|
||||
grant alter any table to "Старший дворник";
|
||||
grant drop any table to "Старший дворник";
|
||||
commit;
|
||||
"""
|
||||
|
||||
expected_stdout_1 = """
|
||||
MON$USER : Вася Пупкин
|
||||
@ -358,14 +440,31 @@ expected_stdout_1 = """
|
||||
OBJ_NAME : Хранимка
|
||||
OBJ_TYPE : procedure
|
||||
privilege:exec : YES
|
||||
"""
|
||||
|
||||
NON_ASCII_USER_BEFORE_DROP 1
|
||||
NON_ASCII_USER_AFTER_DROP 0
|
||||
"""
|
||||
non_acii_user = user_factory(name='"Вася Пупкин"', password= '123', encoding= 'utf8')
|
||||
|
||||
@pytest.mark.version('>=4.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, non_acii_user: User, capsys):
|
||||
with act_1.test_role('"Старший дворник"', charset= 'utf8'):
|
||||
act_1.isql(switches=['-b', '-q'], input=ddl_script_1)
|
||||
print(act_1.stdout)
|
||||
with act_1.db.connect(user=non_acii_user.name, password=non_acii_user.password,
|
||||
role='"Старший дворник"') as con:
|
||||
cur = con.cursor()
|
||||
cur.execute('select m.mon$user,m.mon$role from mon$attachments m where m.mon$attachment_id = current_connection')
|
||||
col = cur.description
|
||||
for r in cur:
|
||||
for i in range(len(col)):
|
||||
print(' '.join((col[i][0], ':', r[i])))
|
||||
cur.execute("select v.* from v_current_privileges v")
|
||||
col = cur.description
|
||||
for r in cur:
|
||||
for i in range(len(col)):
|
||||
if 'privilege:' not in col[i][0] or 'privilege:' in col[i][0] and r[i] == 'YES':
|
||||
print(' '.join((col[i][0], ':', r[i])))
|
||||
#
|
||||
act_1.reset()
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,25 +2,26 @@
|
||||
#
|
||||
# id: bugs.core_4754
|
||||
# title: Bugcheck 167 (invalid SEND request) while working with GTT from several attachments (using EXECUTE STATEMENT ... ON EXTERNAL and different roles)
|
||||
# decription:
|
||||
# decription:
|
||||
# We start two transactions and do DML ('insert ...') in 1st and DDL ('create index ...') in second.
|
||||
# Then we issue COMMIT in DDL transaction. This should raise 'lock conflict ... table "<name>" is in use'.
|
||||
# On 2.5.4 this COMMIT in DDL did NOT raise any error and subsequent reconnect and DML raised bugcheck.
|
||||
#
|
||||
#
|
||||
# Checked on 2.5.6.27013, 3.0.1.32524 - works OK.
|
||||
# Bugcheck can be reproduced on 2.5.4.26856.
|
||||
#
|
||||
# PS. Old ticket name:
|
||||
# Manipulations with GTT from several attachments (using ES/EDS and different roles) leads to:
|
||||
#
|
||||
# PS. Old ticket name:
|
||||
# Manipulations with GTT from several attachments (using ES/EDS and different roles) leads to:
|
||||
# "internal Firebird consistency check (invalid SEND request (167), file: JrdStatement.cpp line: 325)"
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4754
|
||||
# min_versions: ['2.5.5']
|
||||
# versions: 2.5.5
|
||||
# qmid:
|
||||
# qmid:
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
from firebird.driver import TPB, Isolation, TraAccessMode, DatabaseError
|
||||
|
||||
# version: 2.5.5
|
||||
# resources: None
|
||||
@ -30,26 +31,26 @@ substitutions_1 = []
|
||||
init_script_1 = """
|
||||
recreate global temporary table gtt_session(x int, y int) on commit preserve rows;
|
||||
commit;
|
||||
"""
|
||||
"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import subprocess
|
||||
# import fdb
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
# #db_file="$(DATABASE_LOCATION)bugs.core_4754.fdb"
|
||||
#
|
||||
#
|
||||
# customTPB = ( [ fdb.isc_tpb_read_committed, fdb.isc_tpb_rec_version, fdb.isc_tpb_nowait ] )
|
||||
# con1 = fdb.connect(dsn=dsn)
|
||||
# #print(con1.firebird_version)
|
||||
#
|
||||
#
|
||||
# tx1a=con1.trans( default_tpb = customTPB )
|
||||
# tx1b=con1.trans( default_tpb = customTPB )
|
||||
# cur1a = tx1a.cursor()
|
||||
@ -64,12 +65,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# print('Error-1:')
|
||||
# msg = e[0]
|
||||
# print(msg)
|
||||
#
|
||||
#
|
||||
# con1.close()
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# # ---------------------------------------------------------------
|
||||
#
|
||||
#
|
||||
# if not msg.split():
|
||||
# # 2.5.5: control should NOT pass here at all!
|
||||
# con2 = fdb.connect(dsn=dsn)
|
||||
@ -93,22 +94,48 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# print('Error-2:')
|
||||
# print(e[0])
|
||||
# con2.close()
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Error-1:
|
||||
Error while commiting transaction:
|
||||
- SQLCODE: -901
|
||||
- lock conflict on no wait transaction
|
||||
- unsuccessful metadata update
|
||||
- object TABLE "GTT_SESSION" is in use
|
||||
"""
|
||||
Error-1:
|
||||
lock conflict on no wait transaction
|
||||
-unsuccessful metadata update
|
||||
-object TABLE "GTT_SESSION" is in use
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=2.5.5')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, capsys):
|
||||
custom_tpb = TPB(isolation=Isolation.READ_COMMITTED_RECORD_VERSION,
|
||||
access_mode=TraAccessMode.WRITE, lock_timeout=0).get_buffer()
|
||||
with act_1.db.connect() as con1:
|
||||
tx1a = con1.transaction_manager(custom_tpb)
|
||||
cur1a = tx1a.cursor()
|
||||
tx1b = con1.transaction_manager(custom_tpb)
|
||||
cur1b = tx1b.cursor()
|
||||
try:
|
||||
cur1a.execute("insert into gtt_session select rand()*10, rand()*10 from rdb$types")
|
||||
cur1b.execute("create index gtt_session_x_y on gtt_session computed by (x+y)")
|
||||
tx1b.commit() # WI-V2.5.6.27013 issues here: lock conflict on no wait transaction unsuccessful metadata update object TABLE "GTT_SESSION" is in use -901 335544345
|
||||
tx1a.commit()
|
||||
except DatabaseError as e:
|
||||
print('Error-1:')
|
||||
msg = e.args[0]
|
||||
print(msg)
|
||||
#
|
||||
if not msg.split():
|
||||
# 2.5.5: control should NOT pass here at all!
|
||||
with act_1.db.connect() as con2:
|
||||
try:
|
||||
tx2a = con2.transaction_manager()
|
||||
cur2a = tx2a.cursor()
|
||||
cur2a.execute("insert into gtt_session select rand()*11, rand()*11 from rdb$types")
|
||||
except DatabaseError as e:
|
||||
print('Error-2:')
|
||||
print(e.args[0])
|
||||
#
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,22 +2,26 @@
|
||||
#
|
||||
# id: bugs.core_4760
|
||||
# title: Can not create user with non-ascii (multi-byte) characters in it's name
|
||||
# decription:
|
||||
# decription:
|
||||
# User with name Εὐκλείδης ('Euclid') encoded in UTF8 is used in this test.
|
||||
#
|
||||
#
|
||||
# NB-1: connection is made using FDB connect() method: ISQL (and also its CONNECT statement) has
|
||||
# problems when trying to use non-ascii names.
|
||||
# NB-2: separate SQL script is generated for DROP this user.
|
||||
#
|
||||
#
|
||||
# Checked on: 4.0.0.2416 (Windows and Linux)
|
||||
#
|
||||
# tracker_id:
|
||||
#
|
||||
# [pcisar] 24.11.2021
|
||||
# 1. This problem is covered by test for core_4743 as side effect
|
||||
# 2. For sake of completness, it was reimplemented by simply using
|
||||
# user_factory fixture.
|
||||
# tracker_id:
|
||||
# min_versions: ['4.0']
|
||||
# versions: 4.0
|
||||
# qmid: bugs.core_4760
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action, user_factory, User
|
||||
|
||||
# version: 4.0
|
||||
# resources: None
|
||||
@ -30,31 +34,31 @@ db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import io
|
||||
# import subprocess
|
||||
# import time
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -66,38 +70,38 @@ db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1)
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# print('type(f_names_list[i])=',type(f_names_list[i]))
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# sql_txt=''' set bail on;
|
||||
# set names utf8;
|
||||
# connect '%(dsn)s' user '%(user_name)s' password '%(user_password)s';
|
||||
#
|
||||
#
|
||||
# create or alter user "Εὐκλείδης" password '123' using plugin Srp;
|
||||
# ''' % dict(globals(), **locals())
|
||||
#
|
||||
#
|
||||
# f_ddl_sql = open( os.path.join(context['temp_directory'], 'tmp_4760_utf8_ddl.sql'), 'w' )
|
||||
# f_ddl_sql.write( sql_txt )
|
||||
# flush_and_close( f_ddl_sql )
|
||||
#
|
||||
#
|
||||
# f_ddl_log = open( os.path.splitext(f_ddl_sql.name)[0]+'.log', 'w')
|
||||
# subprocess.call( [ context['isql_path'], '-q', '-i', f_ddl_sql.name ],
|
||||
# stdout = f_ddl_log,
|
||||
# stderr = subprocess.STDOUT
|
||||
# )
|
||||
# flush_and_close( f_ddl_log )
|
||||
#
|
||||
#
|
||||
# with io.open(f_ddl_log.name, 'r', encoding='utf8' ) as f:
|
||||
# result_log = f.readlines()
|
||||
#
|
||||
#
|
||||
# for i in result_log:
|
||||
# print( i.encode('utf8') ) # do not miss '.encode()' here, otherwise get: "ordinal not in range(128)"
|
||||
#
|
||||
#
|
||||
# f_run_log = io.open( os.path.join(context['temp_directory'], 'tmp_4760_utf8_run.log'), 'w', encoding = 'utf8' )
|
||||
#
|
||||
#
|
||||
# con = fdb.connect(dsn = dsn, user = "Εὐκλείδης", password = '123', charset = 'utf8', utf8params = True)
|
||||
# cur = con.cursor()
|
||||
# cur.execute('select m.mon$user from mon$attachments m where m.mon$attachment_id = current_connection')
|
||||
@ -105,11 +109,11 @@ db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1)
|
||||
# for r in cur:
|
||||
# for i in range(0,len(col)):
|
||||
# f_run_log.write( ' '.join((col[i][0],':',r[i], '\\n')) )
|
||||
#
|
||||
#
|
||||
# cur.close()
|
||||
# con.close()
|
||||
# flush_and_close(f_run_log)
|
||||
#
|
||||
#
|
||||
# # Generate SQL script for DROP non-ascii user.
|
||||
# ##############################################
|
||||
# sql_txt='''
|
||||
@ -123,52 +127,55 @@ db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1)
|
||||
# commit;
|
||||
# select count(*) non_ascii_user_after_drop from sec$users where sec$user_name ='Εὐκλείδης';
|
||||
# ''' % dict(globals(), **locals())
|
||||
#
|
||||
#
|
||||
# f_drop_sql = open( os.path.join(context['temp_directory'], 'tmp_4760_utf8_drop.sql'), 'w' )
|
||||
# f_drop_sql.write( sql_txt )
|
||||
# flush_and_close( f_drop_sql )
|
||||
#
|
||||
#
|
||||
# f_drop_log = open( os.path.splitext(f_drop_sql.name)[0]+'.log', 'w')
|
||||
# subprocess.call( [ context['isql_path'], '-q', '-i', f_drop_sql.name ],
|
||||
# stdout = f_drop_log,
|
||||
# stderr = subprocess.STDOUT
|
||||
# )
|
||||
# flush_and_close( f_drop_log )
|
||||
#
|
||||
#
|
||||
# with io.open(f_run_log.name, 'r', encoding='utf8' ) as f:
|
||||
# result_in_utf8 = f.readlines()
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# for i in result_in_utf8:
|
||||
# print( i.encode('utf8') )
|
||||
#
|
||||
#
|
||||
# with open(f_drop_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# print(line)
|
||||
#
|
||||
#
|
||||
# # cleanup:
|
||||
# ###########
|
||||
# time.sleep(2)
|
||||
#
|
||||
# # DO NOT use here: cleanup( (f_ddl_sql, f_ddl_log, f_drop_sql, f_drop_log, f_run_log) ) --
|
||||
#
|
||||
# # DO NOT use here: cleanup( (f_ddl_sql, f_ddl_log, f_drop_sql, f_drop_log, f_run_log) ) --
|
||||
# # Unrecognized type of element: <closed file 'C:\\FBTESTING\\qa\\fbt-repo\\tmp\\tmp_4760_utf8_run.log', mode 'wb' at 0x0000000005A20780> - can not be treated as file.
|
||||
# # type(f_names_list[i])= <type 'instance'>Traceback (most recent call last):
|
||||
#
|
||||
#
|
||||
# cleanup( [i.name for i in (f_ddl_sql, f_ddl_log, f_drop_sql, f_drop_log, f_run_log)] )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
MON$USER : Εὐκλείδης
|
||||
NON_ASCII_USER_BEFORE_DROP 1
|
||||
NON_ASCII_USER_AFTER_DROP 0
|
||||
"""
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
#expected_stdout_1 = """
|
||||
#MON$USER : Εὐκλείδης
|
||||
#NON_ASCII_USER_BEFORE_DROP 1
|
||||
#NON_ASCII_USER_AFTER_DROP 0
|
||||
#"""
|
||||
|
||||
non_ascii_user = user_factory(name='"Εὐκλείδης"', password='123', encoding='utf8')
|
||||
|
||||
@pytest.mark.version('>=4.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
def test_1(act_1: Action, non_ascii_user: User):
|
||||
with act_1.db.connect(user=non_ascii_user.name, password=non_ascii_user.password) as con:
|
||||
pass
|
||||
|
||||
|
||||
|
@ -2,26 +2,26 @@
|
||||
#
|
||||
# id: bugs.core_4766
|
||||
# title: AV when trying to manage users list using EXECUTE STATEMENT on behalf of non-sysdba user which has RDB$ADMIN role
|
||||
# decription:
|
||||
# decription:
|
||||
# 05.01.2020. Refactored in order to make its code more flexible because 3.0 and 4.0 have significant differences in stdout/stderr.
|
||||
# Common SQL code was stored in fbt-repo
|
||||
# iles\\core_4766.sql with embedding variable names there from here:
|
||||
# %(current_auth_plugin)s, %(dsn)s et al.
|
||||
#
|
||||
#
|
||||
# Content of this file is stored in variable 'sql_text' and this variable is changed using common Python rule for substitutions:
|
||||
# sql_text % dict(globals(), **locals()
|
||||
#
|
||||
#
|
||||
# Then we save this variable to temporarily .sql script and run it.
|
||||
# This action is done for two possible values of auth plugin (see var. current_auth_plugin): Srp and Legacy_UserManager.
|
||||
#
|
||||
#
|
||||
# As result, we have to compare only expected* data for different FB major versions.
|
||||
#
|
||||
#
|
||||
# ::: NB :::
|
||||
# Only Legacy_UserManager is checked for FB 4.0. Srp has totally different behaviour, at least for 4.0.0.1714.
|
||||
# Sent letter to dimitr and alex, 05.01.2020 22:00.
|
||||
#
|
||||
#
|
||||
# Crash is reproduced on WI-T3.0.0.31374 Firebird 3.0 Beta 1 (build 24-nov-2014).
|
||||
#
|
||||
#
|
||||
# Checked on:
|
||||
# 4.0.0.1743 SS: 1.452s.
|
||||
# 4.0.0.1740 SC: 1.870s.
|
||||
@ -29,20 +29,28 @@
|
||||
# 3.0.6.33236 SS: 1.220s.
|
||||
# 3.0.5.33221 SC: 5.416s.
|
||||
# 3.0.5.33084 CS: 2.891s.
|
||||
#
|
||||
#
|
||||
#
|
||||
# [pcisar] 24.11.2021
|
||||
# On FB v4.0.0.2496 this test fails as provided script file raises error in
|
||||
# execute block->execute statement->create/drop user:
|
||||
# Statement failed, SQLSTATE = 28000
|
||||
# Your user name and password are not defined. Ask your database administrator to set up a Firebird login.
|
||||
# -At block line: 3, col: 9
|
||||
# Variant for FB 3 not yet implemented.
|
||||
#
|
||||
# tracker_id: CORE-4766
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0, 4.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('TCPv.*', 'TCP'), ('.*After line \\d+.*', ''), ('find/delete', 'delete'), ('TABLE PLG\\$.*', 'TABLE PLG')]
|
||||
substitutions_1 = [('TCPv.*', 'TCP'), ('.*After line \\d+.*', ''),
|
||||
('find/delete', 'delete'), ('TABLE PLG\\$.*', 'TABLE PLG')]
|
||||
|
||||
init_script_1 = """"""
|
||||
|
||||
@ -50,65 +58,66 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import sys
|
||||
# import subprocess
|
||||
# from fdb import services
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# this_db = db_conn.database_name
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close(file_handle):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb'):
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
# if os.path.isfile( f_names_list[i]):
|
||||
# os.remove( f_names_list[i] )
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# f_sql=open(os.path.join(context['files_location'],'core_4766.sql'),'r')
|
||||
# sql_text = f_sql.read()
|
||||
# f_sql.close()
|
||||
#
|
||||
#
|
||||
# for current_auth_plugin in ('Srp', 'Legacy_UserManager'):
|
||||
# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_core_4766.' + current_auth_plugin[:3] + '.sql'), 'w')
|
||||
# f_sql_chk.write( sql_text % dict(globals(), **locals()) )
|
||||
# flush_and_close( f_sql_chk )
|
||||
#
|
||||
#
|
||||
# f_sql_log = open( '.'.join( (os.path.splitext( f_sql_chk.name )[0], 'log') ), 'w')
|
||||
# subprocess.call( [ context['isql_path'], '-q', '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = subprocess.STDOUT)
|
||||
# flush_and_close( f_sql_log )
|
||||
#
|
||||
#
|
||||
# with open( f_sql_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# if line.strip():
|
||||
# print( current_auth_plugin[:3] + ': ' + line )
|
||||
#
|
||||
#
|
||||
# cleanup( (f_sql_log.name, f_sql_chk.name) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Srp: BOSS_SEC_NAME TMP_4766_BOSS
|
||||
@ -141,18 +150,19 @@ expected_stdout_1 = """
|
||||
Leg: find/delete record error
|
||||
Leg: -no permission for DELETE access to TABLE PLG$VIEW_USERS
|
||||
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.version('>=3.0,<4')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
def test_1(act_1: Action):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
# version: 4.0
|
||||
# resources: None
|
||||
|
||||
substitutions_2 = [('TCPv.*', 'TCP'), ('.*After line \\d+.*', ''), ('find/delete', 'delete'), ('TABLE PLG\\$.*', 'TABLE PLG')]
|
||||
substitutions_2 = [('TCPv.*', 'TCP'), ('.*After line \\d+.*', ''),
|
||||
('find/delete', 'delete'), ('TABLE PLG\\$.*', 'TABLE PLG')]
|
||||
|
||||
init_script_2 = """"""
|
||||
|
||||
@ -160,67 +170,67 @@ db_2 = db_factory(sql_dialect=3, init=init_script_2)
|
||||
|
||||
# test_script_2
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import sys
|
||||
# import subprocess
|
||||
# from fdb import services
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# this_db = db_conn.database_name
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close(file_handle):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb'):
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
# if os.path.isfile( f_names_list[i]):
|
||||
# os.remove( f_names_list[i] )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# f_sql=open(os.path.join(context['files_location'],'core_4766.sql'),'r')
|
||||
# sql_text = f_sql.read()
|
||||
# f_sql.close()
|
||||
#
|
||||
#
|
||||
# # ::: NB :::
|
||||
# # Only Legacy_UserManager is checked for FB 4.0. Srp has totally different behaviour, at least for 4.0.0.1714.
|
||||
# # Sent letter to dimitr and alex, 05.01.2020 22:00.
|
||||
#
|
||||
#
|
||||
# for current_auth_plugin in ('Legacy_UserManager',):
|
||||
# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_core_4766.' + current_auth_plugin[:3] + '.sql'), 'w')
|
||||
# f_sql_chk.write( sql_text % dict(globals(), **locals()) )
|
||||
# flush_and_close( f_sql_chk )
|
||||
#
|
||||
#
|
||||
# f_sql_log = open( '.'.join( (os.path.splitext( f_sql_chk.name )[0], 'log') ), 'w')
|
||||
# subprocess.call( [ context['isql_path'], '-q', '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = subprocess.STDOUT)
|
||||
# flush_and_close( f_sql_log )
|
||||
#
|
||||
#
|
||||
# with open( f_sql_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# if line.strip():
|
||||
# print( current_auth_plugin[:3] + ': ' + line )
|
||||
#
|
||||
#
|
||||
# cleanup( (f_sql_log.name, f_sql_chk.name) )
|
||||
#
|
||||
#
|
||||
# '''
|
||||
# 'substitutions':[
|
||||
# ('TCPv.*', 'TCP'),
|
||||
@ -229,10 +239,11 @@ db_2 = db_factory(sql_dialect=3, init=init_script_2)
|
||||
# ('TABLE PLG\\$.*', 'TABLE PLG')
|
||||
# ]
|
||||
# '''
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_2 = python_act('db_2', test_script_2, substitutions=substitutions_2)
|
||||
|
||||
act_2 = python_act('db_2', substitutions=substitutions_2)
|
||||
|
||||
expected_stdout_2 = """
|
||||
Leg: BOSS_SEC_NAME TMP_4766_BOSS
|
||||
@ -252,11 +263,24 @@ expected_stdout_2 = """
|
||||
Leg: find/delete record error
|
||||
Leg: -no permission for DELETE access to TABLE PLG$VIEW_USERS
|
||||
Leg: -Effective user is TMP_4766_BOSS
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=4.0')
|
||||
@pytest.mark.xfail
|
||||
def test_2(db_2):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_2(act_2: Action, capsys):
|
||||
sql_text = (act_2.vars['files'] / 'core_4766.sql').read_text()
|
||||
# ::: NB :::
|
||||
# Only Legacy_UserManager is checked for FB 4.0. Srp has totally different behaviour, at least for 4.0.0.1714.
|
||||
# Sent letter to dimitr and alex, 05.01.2020 22:00.
|
||||
subs = {'dsn': act_2.db.dsn, 'user_name': act_2.db.user, 'user_password': act_2.db.password,
|
||||
'current_auth_plugin': None,}
|
||||
for current_auth_plugin in ['Legacy_UserManager', ]:
|
||||
subs['current_auth_plugin'] = current_auth_plugin
|
||||
act_2.isql(switches=['-q'], input=sql_text % subs)
|
||||
for line in act_2.stdout.splitlines():
|
||||
if line.strip():
|
||||
print(current_auth_plugin[:3] + ': ' + line)
|
||||
#
|
||||
act_2.reset()
|
||||
act_2.expected_stdout = expected_stdout_2
|
||||
act_2.stdout = capsys.readouterr().out
|
||||
assert act_2.clean_stdout == act_2.clean_expected_stdout
|
||||
|
@ -2,18 +2,18 @@
|
||||
#
|
||||
# id: bugs.core_4768
|
||||
# title: CREATE USER ... TAGS ( argument_1 = 'value1', ..., argument_N = 'valueN' ) - wrong results of statement when there are many arguments
|
||||
# decription:
|
||||
# decription:
|
||||
# Checked on:
|
||||
# FB30SS, build 3.0.4.32985: OK, 7.672s.
|
||||
# FB40SS, build 4.0.0.1000: OK, 13.094s.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4768
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action, user_factory, User
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
@ -26,52 +26,52 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import subprocess
|
||||
# import time
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close(file_handle):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb'):
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
# if os.path.isfile( f_names_list[i]):
|
||||
# os.remove( f_names_list[i] )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# TAGS_COUNT=100000
|
||||
# #^^^^^^^^^^^^^^^^--- TAGS COUNT: THRESHOLD
|
||||
#
|
||||
#
|
||||
# f_chk_sql=open( os.path.join(context['temp_directory'],'tmp_tags_4768.sql'), 'w')
|
||||
# f_chk_sql.write('set bail on;\\n')
|
||||
# f_chk_sql.write("create or alter user tmp$c4768_1 password '123' using plugin Srp tags (\\n")
|
||||
#
|
||||
#
|
||||
# for i in range(0,TAGS_COUNT):
|
||||
# f_chk_sql.write( (' ,' if i>0 else ' ') + 'arg_'+str(i)+"='val"+str(i)+"'\\n" )
|
||||
# f_chk_sql.write(');\\n')
|
||||
# f_chk_sql.write('commit;\\n')
|
||||
#
|
||||
#
|
||||
# sql_check='''set count on;
|
||||
# set list on;
|
||||
# select
|
||||
@ -91,54 +91,81 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# drop user tmp$c4768_1 using plugin Srp;
|
||||
# commit;
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# f_chk_sql.write(sql_check)
|
||||
# f_chk_sql.write('commit;\\n')
|
||||
#
|
||||
#
|
||||
# flush_and_close( f_chk_sql )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# f_tags_log = open( os.path.join(context['temp_directory'],'tmp_tags_4768.log'), 'w')
|
||||
# f_tags_err = open( os.path.join(context['temp_directory'],'tmp_tags_4768.err'), 'w')
|
||||
#
|
||||
#
|
||||
# subprocess.call( [ context['isql_path'], dsn, "-q", "-i", f_chk_sql.name],
|
||||
# stdout = f_tags_log,
|
||||
# stderr = f_tags_err
|
||||
# )
|
||||
#
|
||||
#
|
||||
# flush_and_close( f_tags_log )
|
||||
# flush_and_close( f_tags_err )
|
||||
#
|
||||
#
|
||||
# with open(f_tags_log.name) as f:
|
||||
# for line in f:
|
||||
# print(line)
|
||||
#
|
||||
#
|
||||
# with open(f_tags_err.name) as f:
|
||||
# for line in f:
|
||||
# print('UNEXPECTED STDERR: ' + line)
|
||||
#
|
||||
#
|
||||
# # CLEANUP:
|
||||
# ##########
|
||||
# time.sleep(1)
|
||||
# cleanup( [i.name for i in ( f_chk_sql, f_tags_log, f_tags_err ) ] )
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
USR_NAME TMP$C4768_1
|
||||
SEC_PLUGIN Srp
|
||||
USR_NAME TMP$C4768_1
|
||||
SEC_PLUGIN Srp
|
||||
TAG_MIN ARG_0
|
||||
VAL_MIN VAL0
|
||||
TAG_MAX ARG_99999
|
||||
VAL_MAX VAL99999
|
||||
TAG_CNT 100000
|
||||
Records affected: 1
|
||||
"""
|
||||
"""
|
||||
|
||||
test_user_1 = user_factory(name='tmp$c4768_1', password='123')
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, test_user_1: User):
|
||||
TAGS_COUNT = 100000
|
||||
check_lines = ['set bail on;',
|
||||
"create or alter user tmp$c4768_1 password '123' using plugin Srp tags ("]
|
||||
for i in range(TAGS_COUNT):
|
||||
check_lines.append(f"{' ,' if i > 0 else ' '}arg_{i}='val{i}'")
|
||||
#check_lines.append((' ,' if i>0 else ' ') + 'arg_' + str(i) + "='val" + str(i) + "'")
|
||||
check_lines.append(');')
|
||||
check_lines.append('commit;')
|
||||
test_script = '\n'.join(check_lines) + """
|
||||
set count on;
|
||||
set list on;
|
||||
select
|
||||
u.sec$user_name as usr_name
|
||||
,u.sec$plugin sec_plugin
|
||||
,upper(min( a.sec$key )) tag_min
|
||||
,upper(min( a.sec$value )) val_min
|
||||
,upper(max( a.sec$key )) tag_max
|
||||
,upper(max( a.sec$value )) val_max
|
||||
,count(*) tag_cnt
|
||||
from sec$users u
|
||||
left join sec$user_attributes a on u.sec$user_name = a.sec$user_name
|
||||
where u.sec$user_name = upper('tmp$c4768_1')
|
||||
group by 1,2 ;
|
||||
commit;
|
||||
"""
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1 .isql(switches=['-q'], input=test_script)
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,26 +2,26 @@
|
||||
#
|
||||
# id: bugs.core_4821
|
||||
# title: grant create database to ROLE does not work: "no permission for CREATE access to DATABASE ..."
|
||||
# decription:
|
||||
# decription:
|
||||
# ::: NOTE ::: Test requires that databases.conf contains 'RemoteAccess = true' for security.db
|
||||
# This line is added there by scenario '<QA_HOME>
|
||||
# undaily.bat' every time when check new FB snapshot.
|
||||
#
|
||||
#
|
||||
# We make connection to security.db and create there user and role, with granting this role to user.
|
||||
# (so, ROLE also is stored in the security.db).
|
||||
# Then we grant privilege to create DB to just created role.
|
||||
# After this we check that:
|
||||
# * user can NOT create database if 'ROLE <this_role>' missed in the 'create database' statement;
|
||||
# * user _CAN_ create database when issues: create database ... user ... password ... ROLE <this_role>
|
||||
#
|
||||
#
|
||||
# Then we create backup of just created database and check that:
|
||||
# * user can NOT restore it until specifying '-ROLE <this_role>' in gbak command line;
|
||||
# * user _CAN_ restore database when issues: gbak -rep ... -user ... -pas ... -ROLE <this_role>;
|
||||
# * the same pair of checks is performed for fbsvcmgr invocation;
|
||||
#
|
||||
#
|
||||
# Finally, we connect again to security.db and drop <this_role>. After this restore must not be allowed
|
||||
# even when "ROLE <this_role>" is specified in commands gbak or fbsvcmgr.
|
||||
#
|
||||
#
|
||||
# Checked on:
|
||||
# 4.0.0.1713 SS: 5.250s.
|
||||
# 4.0.0.1346 SC: 5.734s.
|
||||
@ -29,23 +29,41 @@
|
||||
# 3.0.5.33218 SS: 4.313s.
|
||||
# 3.0.5.33084 SC: 4.031s.
|
||||
# 3.0.5.33212 CS: 7.672s.
|
||||
#
|
||||
#
|
||||
# 13.04.2021. Adapted for run both on Windows and Linux. Checked on:
|
||||
# Windows: 3.0.8.33445, 4.0.0.2416
|
||||
# Linux: 3.0.8.33426, 4.0.0.2416
|
||||
#
|
||||
#
|
||||
# [pcisar] 24.11.2021
|
||||
# This test FAILs because it's not possible to grant create database to role tmp$db_creator
|
||||
# although it exists (in test database):
|
||||
# Statement failed, SQLSTATE = 28000
|
||||
# unsuccessful metadata update
|
||||
# -GRANT failed
|
||||
# -SQL role TMP$DB_CREATOR does not exist
|
||||
# -in security database
|
||||
#
|
||||
# tracker_id: CORE-4821
|
||||
# min_versions: ['3.0.5']
|
||||
# versions: 3.0.5
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from firebird.qa import db_factory, python_act, Action, temp_file, user_factory, User
|
||||
|
||||
# version: 3.0.5
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('no permission for CREATE access to DATABASE .*', 'no permission for CREATE access to DATABASE'), ('gbak: ERROR:failed to create database .*', 'gbak: ERROR:failed to create database'), ('-failed to create database .*', '-failed to create database'), ('CREATED_DB_NAME .*', 'CREATED_DB_NAME'), ('FDB_RESTORED_USING_GBAK .*', 'FDB_RESTORED_USING_GBAK'), ('FDB_RESTORED_USING_SMGR .*', 'FDB_RESTORED_USING_SMGR')]
|
||||
substitutions_1 = [('no permission for CREATE access to DATABASE .*',
|
||||
'no permission for CREATE access to DATABASE'),
|
||||
('gbak: ERROR:failed to create database .*',
|
||||
'gbak: ERROR:failed to create database'),
|
||||
('-failed to create database .*', '-failed to create database'),
|
||||
('CREATED_DB_NAME .*', 'CREATED_DB_NAME'),
|
||||
('FDB_RESTORED_USING_GBAK .*', 'FDB_RESTORED_USING_GBAK'),
|
||||
('FDB_RESTORED_USING_SMGR .*', 'FDB_RESTORED_USING_SMGR')]
|
||||
|
||||
init_script_1 = """"""
|
||||
|
||||
@ -53,32 +71,32 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# this_db = db_conn.database_name
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -90,22 +108,22 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# print('type(f_names_list[i])=',type(f_names_list[i]))
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# fdb_test1 = os.path.join(context['temp_directory'],'tmp_4821_test1.fdb')
|
||||
# fdb_test2 = os.path.join(context['temp_directory'],'tmp_4821_test2.fdb')
|
||||
# fbk_name = os.path.join(context['temp_directory'],'tmp_4821_test2.fbk')
|
||||
# fdb_restored_using_gbak = os.path.join(context['temp_directory'],'tmp_4821_restored.gbak.fdb')
|
||||
# fdb_restored_using_smgr = os.path.join(context['temp_directory'],'tmp_4821_restored.smgr.fdb')
|
||||
# fdb_restored_unexpected = os.path.join(context['temp_directory'],'tmp_4821_restored.wo_grant.fdb')
|
||||
#
|
||||
#
|
||||
# f_list = [fdb_test1, fdb_test2, fbk_name, fdb_restored_using_gbak, fdb_restored_using_smgr, fdb_restored_unexpected]
|
||||
# cleanup( f_list )
|
||||
#
|
||||
#
|
||||
# sql_init='''
|
||||
# set wng off;
|
||||
# set bail on;
|
||||
@ -126,9 +144,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# commit;
|
||||
# --show grants;
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# runProgram('isql', [ 'localhost:security.db' ], sql_init)
|
||||
#
|
||||
#
|
||||
# sql_test='''
|
||||
# create database 'localhost:%(fdb_test1)s' user tmp$c4821_boss password '123';
|
||||
# select mon$database_name as created_db_name from mon$database;
|
||||
@ -137,60 +155,62 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# set list on;
|
||||
# select mon$database_name as created_db_name from mon$database;
|
||||
# ''' % locals()
|
||||
#
|
||||
#
|
||||
# runProgram('isql', [ '-q' ], sql_test)
|
||||
#
|
||||
#
|
||||
# # Must PASS because user tmp$c4821_boss is the owner of this DB:
|
||||
# runProgram('gbak', [ '-b', 'localhost:' + fdb_test2, fbk_name, '-user', 'tmp$c4821_boss', '-pas', '123'] )
|
||||
#
|
||||
#
|
||||
# # Must FAIL because we do not specify role, with text:
|
||||
# # "gbak: ERROR:no permission for CREATE access to DATABASE ... / gbak: ERROR:failed to create database localhost:tmp_4821_restored.gbak.fdb"
|
||||
# runProgram('gbak', [ '-rep', fbk_name, 'localhost:'+fdb_restored_using_gbak, '-user', 'tmp$c4821_boss', '-pas', '123'] )
|
||||
#
|
||||
#
|
||||
# # Must PASS because we DO specify role:
|
||||
# runProgram('gbak', [ '-rep', fbk_name, 'localhost:'+fdb_restored_using_gbak, '-user', 'tmp$c4821_boss', '-pas', '123', '-role', 'tmp$db_creator'] )
|
||||
#
|
||||
#
|
||||
# runProgram('isql', [ 'localhost:'+fdb_restored_using_gbak ], 'set list on; select mon$database_name as fdb_restored_using_gbak from mon$database;')
|
||||
#
|
||||
#
|
||||
# # Must FAIL because we do not specify role, with text: "no permission for CREATE access to DATABASE ... / failed to create database tmp_4821_restored.smgr.fdb"
|
||||
# runProgram('fbsvcmgr', [ 'localhost:service_mgr', 'user', 'tmp$c4821_boss', 'password', '123', 'action_restore', 'res_replace', 'bkp_file', fbk_name, 'dbname', fdb_restored_using_smgr ] )
|
||||
#
|
||||
#
|
||||
# # Must PASS because we DO specify role:
|
||||
# runProgram('fbsvcmgr', [ 'localhost:service_mgr', 'user', 'tmp$c4821_boss', 'password', '123', 'role', 'tmp$db_creator', 'action_restore', 'res_replace', 'bkp_file', fbk_name, 'dbname', fdb_restored_using_smgr ] )
|
||||
#
|
||||
#
|
||||
# runProgram('isql', [ 'localhost:'+fdb_restored_using_smgr ], 'set list on; select mon$database_name as fdb_restored_using_smgr from mon$database;')
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# # ATTENTION: now we DROP role and check that after this action user tmp$c4821_boss will not be allowed to restore DB
|
||||
# #############################
|
||||
# runProgram('isql', [ 'localhost:security.db' ], 'drop role tmp$db_creator; commit;')
|
||||
#
|
||||
#
|
||||
# # Must FAIL because there is no more role which was allowed to create DB
|
||||
# # Error text: "gbak: ERROR:no permission for CREATE ... TMP_4821_RESTORED.WO_GRANT.FDB"
|
||||
# runProgram('gbak', [ '-rep', fbk_name, 'localhost:'+fdb_restored_unexpected, '-user', 'tmp$c4821_boss', '-pas', '123', '-role', 'tmp$db_creator'] )
|
||||
#
|
||||
#
|
||||
# # Must FAIL (because of the same reason), with text: "no permission for CREATE access to DATABASE TMP_4821_RESTORED.WO_GRANT.FDB"
|
||||
# runProgram('fbsvcmgr', [ 'localhost:service_mgr', 'user', 'tmp$c4821_boss', 'password', '123', 'role', 'tmp$db_creator', 'action_restore', 'res_replace', 'bkp_file', fbk_name, 'dbname', fdb_restored_unexpected ] )
|
||||
#
|
||||
#
|
||||
# # CLEANUP:
|
||||
# ##########
|
||||
#
|
||||
#
|
||||
# sql_fini='''
|
||||
# drop user tmp$c4821_boss using plugin Srp;
|
||||
# commit;
|
||||
# '''
|
||||
# runProgram('isql', [ 'localhost:security.db' ], sql_fini)
|
||||
#
|
||||
#
|
||||
# cleanup( f_list )
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
CREATED_DB_NAME /opt/scripts/qa-rundaily/dbg-repo/tmp/tmp_4821_test2.fdb
|
||||
FDB_RESTORED_USING_GBAK /opt/scripts/qa-rundaily/dbg-repo/tmp/tmp_4821_restored.gbak.fdb
|
||||
FDB_RESTORED_USING_SMGR /opt/scripts/qa-rundaily/dbg-repo/tmp/tmp_4821_restored.smgr.fdb
|
||||
"""
|
||||
"""
|
||||
|
||||
expected_stderr_1 = """
|
||||
Statement failed, SQLSTATE = 28000
|
||||
no permission for CREATE access to DATABASE
|
||||
@ -205,12 +225,87 @@ expected_stderr_1 = """
|
||||
gbak:Exiting before completion due to errors
|
||||
no permission for CREATE access to DATABASE
|
||||
-failed to create database
|
||||
-Exiting before completion due to errors
|
||||
"""
|
||||
-Exiting before completion due to errors
|
||||
"""
|
||||
|
||||
test_user_1 = user_factory(name='tmp$c4821_boss', password='123')
|
||||
|
||||
fdb_test1 = temp_file('tmp_4821_test1.fdb')
|
||||
fdb_test2 = temp_file('tmp_4821_test2.fdb')
|
||||
fbk_name = temp_file('tmp_4821_test2.fbk')
|
||||
fdb_restored_using_gbak = temp_file('tmp_4821_restored.gbak.fdb')
|
||||
fdb_restored_using_smgr = temp_file('tmp_4821_restored.smgr.fdb')
|
||||
fdb_restored_unexpected = temp_file('tmp_4821_restored.no_grant.fdb')
|
||||
|
||||
@pytest.mark.version('>=3.0.5')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, test_user_1: User, capsys, fdb_test1: Path, fdb_test2: Path,
|
||||
fbk_name: Path, fdb_restored_using_gbak: Path, fdb_restored_using_smgr: Path,
|
||||
fdb_restored_unexpected: Path):
|
||||
with act_1.test_role('tmp$db_creator'):
|
||||
with act_1.db.connect() as con:
|
||||
#con.execute_immediate('revoke all on all from tmp$c4821_boss')
|
||||
con.execute_immediate('grant create database to role tmp$db_creator')
|
||||
con.execute_immediate('grant tmp$db_creator to tmp$c4821_boss')
|
||||
con.commit()
|
||||
#
|
||||
sql_test = f"""
|
||||
create database 'localhost:{fdb_test1}' user tmp$c4821_boss password '123';
|
||||
select mon$database_name as created_db_name from mon$database;
|
||||
rollback;
|
||||
create database 'localhost:{fdb_test2}' user tmp$c4821_boss password '123' role tmp$db_creator;
|
||||
set list on;
|
||||
select mon$database_name as created_db_name from mon$database;
|
||||
"""
|
||||
act_1.isql(switches=['-q'], input=sql_test)
|
||||
print(act_1.stdout)
|
||||
# Must PASS because user tmp$c4821_boss is the owner of this DB:
|
||||
act_1.reset()
|
||||
act_1.gbak(switches=['-b', '-user', 'tmp$c4821_boss', '-pas', '123',
|
||||
f'localhost:{fdb_test2}', str(fbk_name)],
|
||||
credentials=False)
|
||||
# Must FAIL because we do not specify role, with text:
|
||||
# "gbak: ERROR:no permission for CREATE access to DATABASE ... / gbak: ERROR:failed to create database localhost:tmp_4821_restored.gbak.fdb"
|
||||
act_1.reset()
|
||||
act_1.expected_stderr = "Must FAIL because we do not specify role"
|
||||
act_1.gbak(switches=['-rep', '-user', 'tmp$c4821_boss', '-pas', '123',
|
||||
str(fbk_name), f'localhost:{fdb_restored_using_gbak}'],
|
||||
credentials=False)
|
||||
print(act_1.stderr, file=sys.stderr)
|
||||
# Must PASS because we DO specify role:
|
||||
act_1.reset()
|
||||
act_1.gbak(switches=['-rep', '-user', 'tmp$c4821_boss', '-pas', '123', '-role', 'tmp$db_creator',
|
||||
str(fbk_name), f'localhost:{fdb_restored_using_gbak}'],
|
||||
credentials=False)
|
||||
#
|
||||
act_1.reset()
|
||||
act_1.isql(switches=['-user', act_1.db.user, '-password', act_1.db.password,
|
||||
f'localhost:{fdb_restored_using_gbak}'], connect_db=False,
|
||||
input='set list on; select mon$database_name as fdb_restored_using_gbak from mon$database;')
|
||||
print(act_1.stdout)
|
||||
# Must FAIL because we do not specify role, with text: "no permission for CREATE access to DATABASE ... / failed to create database tmp_4821_restored.smgr.fdb"
|
||||
act_1.reset()
|
||||
act_1.expected_stderr = "Must FAIL because we do not specify role"
|
||||
act_1.svcmgr(switches=['localhost:service_mgr', 'user', 'tmp$c4821_boss', 'password', '123',
|
||||
'action_restore', 'res_replace', 'bkp_file', str(fbk_name),
|
||||
'dbname', str(fdb_restored_using_smgr)], connect_mngr=False)
|
||||
print(act_1.stderr, file=sys.stderr)
|
||||
# Must PASS because we DO specify role:
|
||||
act_1.reset()
|
||||
act_1.svcmgr(switches=['localhost:service_mgr', 'user', 'tmp$c4821_boss', 'password', '123',
|
||||
'role', 'tmp$db_creator', 'action_restore', 'res_replace',
|
||||
'bkp_file', str(fbk_name), 'dbname', str(fdb_restored_using_smgr)],
|
||||
connect_mngr=False)
|
||||
#
|
||||
act_1.reset()
|
||||
act_1.isql(switches=['-user', act_1.db.user, '-password', act_1.db.password,
|
||||
f'localhost:{fdb_restored_using_gbak}'], connect_db=False,
|
||||
input='set list on; select mon$database_name as fdb_restored_using_smgr from mon$database;')
|
||||
print(act_1.stdout)
|
||||
#
|
||||
act_1.reset()
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.expected_stderr = expected_stderr_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
act_1.stderr = capsys.readouterr().err
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
assert act_1.clean_stderr == act_1.clean_expected_stderr
|
||||
|
@ -2,7 +2,7 @@
|
||||
#
|
||||
# id: bugs.core_4840
|
||||
# title: Transactions with isc_tpb_autocommit can hang the server
|
||||
# decription:
|
||||
# decription:
|
||||
# Test creates trivial SP and comment for it (in UTF8 with multi-byte characters) in single Tx with autocommit = true.
|
||||
# Confirmed:
|
||||
# 1. Crash on LI-V3.0.0.32173, WI-V3.0.0.32239
|
||||
@ -12,20 +12,21 @@
|
||||
# internal Firebird consistency check (wrong record version (185), file: vio.cpp line: 3823)
|
||||
# ===
|
||||
# (after this FB instance is unavaliable; no any *other* database can be attached).
|
||||
#
|
||||
#
|
||||
# 2. Normal work on LI-V3.0.0.32239 Rev: 62705
|
||||
# Example of TPB creation can be found here:
|
||||
# http://www.firebirdsql.org/file/documentation/drivers_documentation/python/3.3.0/beyond-python-db-api.html
|
||||
# List of allowed TPB parameters: C:\\Python27\\Lib\\site-packages
|
||||
# db\\ibase.py
|
||||
#
|
||||
# db\\ibase.py
|
||||
#
|
||||
# tracker_id: CORE-4840
|
||||
# min_versions: ['2.5.0']
|
||||
# versions: 2.5
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
from firebird.driver import TPB, Isolation
|
||||
|
||||
# version: 2.5
|
||||
# resources: None
|
||||
@ -39,20 +40,20 @@ db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1)
|
||||
# test_script_1
|
||||
#---
|
||||
# import fdb
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
# conn = kdb.connect(dsn=dsn.encode(),user='SYSDBA',password='masterkey')
|
||||
#
|
||||
#
|
||||
# customTPB = ( [ fdb.isc_tpb_read_committed, fdb.isc_tpb_rec_version, fdb.isc_tpb_autocommit ] )
|
||||
#
|
||||
#
|
||||
# conn.begin( tpb=customTPB )
|
||||
# xcur=conn.cursor()
|
||||
#
|
||||
#
|
||||
# sp_ddl="create or alter procedure sp_test (in1 integer, in2 float)" + "returns (" + " out1 varchar(20)," + " out2 double precision, " + " out3 integer" + ") as " + " declare x integer;" + "begin" + " out1 = 'out string';" + " out2 = 2 * in2;" + " out3 = 3 * in1;" + " suspend; " + "end"
|
||||
#
|
||||
#
|
||||
# sp_rem="comment on procedure sp_test is 'Det är inte alla präster, som göra prästgården eller dess torp till änkesäte åt orkeslösa trotjänarinnor, hade biskopen en gång sagt om Helenas prost.'"
|
||||
#
|
||||
# ### 27.01.2016: fdb 1.5 will produce
|
||||
#
|
||||
# ### 27.01.2016: fdb 1.5 will produce
|
||||
# ### ===
|
||||
# ### ReferenceError:
|
||||
# ### weakly-referenced object no longer exists
|
||||
@ -62,30 +63,56 @@ db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1)
|
||||
# ###xcur.execute(xcmd)
|
||||
# ###xcmd=xcur.prep( sp_rem )
|
||||
# ###xcur.execute(xcmd)
|
||||
#
|
||||
#
|
||||
# # For confirmation of ticket issues it is enough to do just this:
|
||||
#
|
||||
#
|
||||
# xcur.execute(sp_ddl)
|
||||
# xcur.execute(sp_rem)
|
||||
#
|
||||
#
|
||||
# conn.commit()
|
||||
# conn.close()
|
||||
#
|
||||
#
|
||||
# sqltxt="set list on; select * from sp_test(12345, 3.1415926);"
|
||||
# runProgram('isql',[dsn,'-user',user_name,'-pas',user_password,'-q'],sqltxt)
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
OUT1 out string
|
||||
OUT2 6.283185005187988
|
||||
OUT3 37035
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=2.5')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action):
|
||||
with act_1.db.connect() as con:
|
||||
custom_tpb = TPB(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, auto_commit=True).get_buffer()
|
||||
con.begin(custom_tpb)
|
||||
c = con.cursor()
|
||||
sp_ddl = """
|
||||
create or alter procedure sp_test (in1 integer, in2 float)
|
||||
returns (
|
||||
out1 varchar(20),
|
||||
out2 double precision,
|
||||
out3 integer
|
||||
) as
|
||||
declare x integer;
|
||||
begin
|
||||
out1 = 'out string';
|
||||
out2 = 2 * in2;
|
||||
out3 = 3 * in1;
|
||||
suspend;
|
||||
end
|
||||
"""
|
||||
#
|
||||
sp_rem = "comment on procedure sp_test is 'Det är inte alla präster, som göra prästgården eller dess torp till änkesäte åt orkeslösa trotjänarinnor, hade biskopen en gång sagt om Helenas prost.'"
|
||||
# For confirmation of ticket issues it is enough to do just this:
|
||||
c.execute(sp_ddl)
|
||||
c.execute(sp_rem)
|
||||
con.commit()
|
||||
#
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.isql(switches=['-q'], input='set list on; select * from sp_test(12345, 3.1415926);')
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,39 +2,46 @@
|
||||
#
|
||||
# id: bugs.core_4855
|
||||
# title: Online validation during DML activity in other connection leads to message "Error while trying to read from file" and "page in use during flush (210), file: cch.cpp line: 2672"
|
||||
# decription:
|
||||
# decription:
|
||||
# In order to check ticket issues this test does following:
|
||||
# 1. Change on test database FW to OFF - this will increase DML performance.
|
||||
# 2. Create two tables: one for inserting rows ('test') and second to serve as 'signal' to stop DML:
|
||||
# inserts will be done until 2nd table (with name = 'stop') is empty.
|
||||
# 3. Adds to SQL script DML (execute block) that will be used in ISQL session #1: it inserts rows
|
||||
# into 'test' and checks after each inserting table 'stop' on presence there at least one row.
|
||||
# 3. Adds to SQL script DML (execute block) that will be used in ISQL session #1: it inserts rows
|
||||
# into 'test' and checks after each inserting table 'stop' on presence there at least one row.
|
||||
# This 'stop-row' will be inserted into 'stop' table in another ISQL session.
|
||||
# 4. Launches ISQL connection #1 in separate (child) process. This ISQL will start 'heavy DML'.
|
||||
# 5. Proceeds several online-validation actions by using synchronous call of 'FBSVCMGR action_validate'.
|
||||
# Adds result of each validation to log.
|
||||
# 6. Launches ISQL connection #2 in separate (child) process and give to this session trivial job:
|
||||
# 'insert into stop(id) values(1); commit;'. This will cause ISQL session #1 to stop its activity
|
||||
# 'insert into stop(id) values(1); commit;'. This will cause ISQL session #1 to stop its activity
|
||||
# because it runs in transaction with TIL = RC.
|
||||
# 7. Outputs log of ISQL-1 and online validation results.
|
||||
#
|
||||
#
|
||||
# Tested on WI-V3.0.0.32008, SS, SC and CS. Result: OK.
|
||||
# Updated since WI-V3.0.0.32064: reduced key-length from extremely huge (1000) to "normal" value 36,
|
||||
# Updated since WI-V3.0.0.32064: reduced key-length from extremely huge (1000) to "normal" value 36,
|
||||
# otherwise get 'Statement failed, SQLSTATE = 54000 / Implementation limit exceeded / -Maximum index level reached'.
|
||||
# (since CORE-4914 was fixed, see: http://sourceforge.net/p/firebird/code/62266 )
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4855
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
import subprocess
|
||||
import time
|
||||
from pathlib import Path
|
||||
from firebird.qa import db_factory, python_act, Action, temp_file
|
||||
from firebird.driver import DbWriteMode
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('[0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9]', ''), ('Relation [0-9]{3,4}', 'Relation'), ('Statement failed, SQLSTATE = HY008', ''), ('operation was cancelled', ''), ('After line .*', '')]
|
||||
substitutions_1 = [('[0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9]', ''),
|
||||
('Relation [0-9]{3,4}', 'Relation'),
|
||||
('Statement failed, SQLSTATE = HY008', ''),
|
||||
('operation was cancelled', ''), ('After line .*', '')]
|
||||
|
||||
init_script_1 = """"""
|
||||
|
||||
@ -42,41 +49,41 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import subprocess
|
||||
# from subprocess import Popen
|
||||
# #import signal
|
||||
# import time
|
||||
# from fdb import services
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# # Obtain engine version:
|
||||
# engine = str(db_conn.engine_version) # convert to text because 'float' object has no attribute 'startswith'
|
||||
# dbname=db_conn.database_name
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close(file_handle):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb'):
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -87,37 +94,37 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
# # Change database FW to OFF in order to increase speed of insertions and output its header info:
|
||||
#
|
||||
#
|
||||
# fwoff_log=open( os.path.join(context['temp_directory'],'tmp_fw_off_4855.log'), 'w')
|
||||
# subprocess.call([ context['fbsvcmgr_path'], "localhost:service_mgr",
|
||||
# "action_properties",
|
||||
# "prp_write_mode", "prp_wm_async",
|
||||
# "dbname", dbname ],
|
||||
# stdout=fwoff_log, stderr=subprocess.STDOUT)
|
||||
#
|
||||
#
|
||||
# fwoff_log.seek(0,2)
|
||||
# subprocess.call([ context['fbsvcmgr_path'], "localhost:service_mgr",
|
||||
# "action_db_stats",
|
||||
# "dbname", dbname, "sts_hdr_pages"],
|
||||
# stdout=fwoff_log, stderr=subprocess.STDOUT)
|
||||
# flush_and_close( fwoff_log )
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
# # Preparing script for ISQL that will do 'heavy DML':
|
||||
#
|
||||
#
|
||||
# sql_cmd='''
|
||||
# recreate sequence g;
|
||||
# recreate table test(id int, s varchar( 36 ) unique using index test_s_unq);
|
||||
# recreate table stop(id int);
|
||||
# commit;
|
||||
# commit;
|
||||
# set list on;
|
||||
# set transaction read committed;
|
||||
# set term ^;
|
||||
@ -134,34 +141,34 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
|
||||
# set term ;^
|
||||
# commit;
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# f_heavy_dml_cmd=open( os.path.join(context['temp_directory'],'tmp_isql_4855.sql'), 'w')
|
||||
# f_heavy_dml_cmd.write(sql_cmd)
|
||||
# flush_and_close( f_heavy_dml_cmd )
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
# # Starting ISQL in separate process with doing 'heavy DML' (bulk-inserts) until table 'stop'
|
||||
# # Starting ISQL in separate process with doing 'heavy DML' (bulk-inserts) until table 'stop'
|
||||
# # remains empty (this table will get one row in separate ISQL session, see below p_stopper):
|
||||
#
|
||||
#
|
||||
# f_heavy_dml_log=open( os.path.join(context['temp_directory'],'tmp_isql_4855.log'), 'w')
|
||||
# p_heavy_dml = Popen([ context['isql_path'] , dsn, "-i", f_heavy_dml_cmd.name ], stdout=f_heavy_dml_log, stderr=subprocess.STDOUT)
|
||||
#
|
||||
# # Here we have to wait for sure that ISQL could establish its connect and starts DML
|
||||
#
|
||||
# # Here we have to wait for sure that ISQL could establish its connect and starts DML
|
||||
# # before we will run online-validation:
|
||||
#
|
||||
#
|
||||
# # 16.03.2016: increased time delay because under some circumstances ISQL could not establish connect
|
||||
# # and this lead validation to start verify table TEST (which was not expected).
|
||||
# # Detected many times on CS/SC.
|
||||
#
|
||||
#
|
||||
# time.sleep(4)
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
# # Doing online-validation.
|
||||
# # Use subprocess.call() with waiting in main thread for it will finish.
|
||||
#
|
||||
# # Use subprocess.call() with waiting in main thread for it will finish.
|
||||
#
|
||||
# val_log=open( os.path.join(context['temp_directory'],'tmp_onval_4855.log'), 'w')
|
||||
#
|
||||
#
|
||||
# val_log.write('Iteration #1:\\n')
|
||||
# val_log.seek(0,2)
|
||||
# subprocess.call( [ context['fbsvcmgr_path'], "localhost:service_mgr",
|
||||
@ -170,11 +177,11 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
|
||||
# ],
|
||||
# stdout=val_log, stderr=subprocess.STDOUT
|
||||
# )
|
||||
#
|
||||
#
|
||||
# time.sleep(2)
|
||||
#
|
||||
#
|
||||
# # Iteration #2:
|
||||
#
|
||||
#
|
||||
# val_log.seek(0,2)
|
||||
# val_log.write('\\n\\nIteration #2:\\n')
|
||||
# val_log.seek(0,2)
|
||||
@ -183,43 +190,43 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
|
||||
# "dbname",dbname],
|
||||
# stdout=val_log, stderr=subprocess.STDOUT)
|
||||
# flush_and_close( val_log )
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
#
|
||||
#
|
||||
# # Stopping ISQL that is doing now 'heavy DML' (bulk-inserts):
|
||||
#
|
||||
#
|
||||
# f_stopper_cmd=open( os.path.join(context['temp_directory'],'tmp_stop_4855.sql'), 'w')
|
||||
# f_stopper_cmd.write('insert into stop(id) values(1); commit;')
|
||||
# flush_and_close( f_stopper_cmd )
|
||||
# p_stopper = subprocess.call([ context['isql_path'], dsn, "-i", f_stopper_cmd.name])
|
||||
#
|
||||
#
|
||||
# # Stop working ISQL. NB: in rare cases this can lead to:
|
||||
# # + Statement failed, SQLSTATE = HY008
|
||||
# # + operation was cancelled
|
||||
# # + After line ... in file .../tmp_isql_4855.sql
|
||||
#
|
||||
#
|
||||
# p_heavy_dml.terminate()
|
||||
# flush_and_close( f_heavy_dml_log )
|
||||
#
|
||||
#
|
||||
# with open( f_heavy_dml_log.name,'r') as f:
|
||||
# print(f.read())
|
||||
#
|
||||
#
|
||||
# with open( val_log.name,'r') as f:
|
||||
# print(f.read())
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
# # Cleanup:
|
||||
# time.sleep(1)
|
||||
# cleanup( (fwoff_log, val_log, f_heavy_dml_cmd, f_heavy_dml_log, f_stopper_cmd) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
INSERTED_ROWS OK, LOT OF.
|
||||
Iteration #1:
|
||||
21:16:28.31 Validation started
|
||||
21:16:28.31 Relation 128 (TEST)
|
||||
@ -238,11 +245,63 @@ Iteration #2:
|
||||
21:16:35.09 process pointer page 0 of 1
|
||||
21:16:35.09 Relation 129 (STOP) is ok
|
||||
21:16:35.09 Validation finished
|
||||
"""
|
||||
INSERTED_ROWS OK, LOT OF.
|
||||
"""
|
||||
|
||||
heavy_script_1 = temp_file('heavy_script.sql')
|
||||
heavy_output_1 = temp_file('heavy_script.out')
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
def test_1(act_1: Action, heavy_script_1: Path, heavy_output_1: Path, capsys):
|
||||
# Change database FW to OFF in order to increase speed of insertions and output its header info
|
||||
with act_1.connect_server() as srv:
|
||||
srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.ASYNC)
|
||||
# Preparing script for ISQL that will do 'heavy DML'
|
||||
heavy_script_1.write_text("""
|
||||
recreate sequence g;
|
||||
recreate table test(id int, s varchar( 36 ) unique using index test_s_unq);
|
||||
recreate table stop(id int);
|
||||
commit;
|
||||
set list on;
|
||||
set transaction read committed;
|
||||
set term ^;
|
||||
execute block returns( inserted_rows varchar(20) ) as
|
||||
begin
|
||||
while ( not exists(select * from stop) ) do
|
||||
begin
|
||||
insert into test(id, s) values( gen_id(g,1), rpad('', 36, uuid_to_char(gen_uuid())) );
|
||||
end
|
||||
inserted_rows = iif(gen_id(g,0) > 0, 'OK, LOT OF.', 'FAIL: ZERO!');
|
||||
suspend;
|
||||
end
|
||||
^
|
||||
set term ;^
|
||||
commit;
|
||||
""")
|
||||
with open(heavy_output_1, mode='w') as heavy_out:
|
||||
p_heavy_sql = subprocess.Popen([act_1.vars['isql'], '-i', str(heavy_script_1),
|
||||
'-user', act_1.db.user,
|
||||
'-password', act_1.db.password, act_1.db.dsn],
|
||||
stdout=heavy_out, stderr=subprocess.STDOUT)
|
||||
try:
|
||||
time.sleep(4)
|
||||
# Run validation twice
|
||||
with act_1.connect_server() as srv:
|
||||
print('Iteration #1:')
|
||||
srv.database.validate(database=str(act_1.db.db_path), lock_timeout=1,
|
||||
callback=print)
|
||||
print('Iteration #2:')
|
||||
srv.database.validate(database=str(act_1.db.db_path), lock_timeout=1,
|
||||
callback=print)
|
||||
# Stopping ISQL that is doing now 'heavy DML' (bulk-inserts):
|
||||
act_1.isql(switches=[], input='insert into stop(id) values(1); commit;')
|
||||
finally:
|
||||
p_heavy_sql.terminate()
|
||||
#
|
||||
print(heavy_output_1.read_text())
|
||||
# Check
|
||||
act_1.reset()
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
||||
|
@ -2,9 +2,9 @@
|
||||
#
|
||||
# id: bugs.core_4864
|
||||
# title: CREATE DATABASE fail with ISQL
|
||||
# decription:
|
||||
# decription:
|
||||
# Test obtains full path to $fb_home via FBSVCMGR info_get_env.
|
||||
# Then it makes copy of file 'databases.conf' that is in $fb_home directory because
|
||||
# Then it makes copy of file 'databases.conf' that is in $fb_home directory because
|
||||
# following lines will be added to that 'databases.conf':
|
||||
# ===
|
||||
# tmp_alias_4864 = ...
|
||||
@ -12,7 +12,7 @@
|
||||
# SecurityDatabase = tmp_alias_4864
|
||||
# }
|
||||
# ===
|
||||
# Then we run ISQL and give to it command to create database which definition
|
||||
# Then we run ISQL and give to it command to create database which definition
|
||||
# should be taken from 'databases.conf', as it was explained in the ticket by Alex:
|
||||
# ===
|
||||
# create database 'tmp_alias_4864' user 'SYSDBA';
|
||||
@ -21,9 +21,9 @@
|
||||
# to be sure that we really got proper result.
|
||||
# .............................................
|
||||
# ::: NB :::
|
||||
# It is impossible to check ability to create new user in new database that was made by such way:
|
||||
# It is impossible to check ability to create new user in new database that was made by such way:
|
||||
# plugin 'Srp' is required that currently is replaced before any test with 'Legacy' one.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4864
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
@ -43,36 +43,36 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import time
|
||||
# import shutil
|
||||
# import subprocess
|
||||
# from subprocess import Popen
|
||||
# from fdb import services
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close(file_handle):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb'):
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -83,31 +83,31 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# svc = services.connect(host='localhost')
|
||||
# fb_home=svc.get_home_directory()
|
||||
# svc.close()
|
||||
#
|
||||
#
|
||||
# dbconf = os.path.join(fb_home,'databases.conf')
|
||||
# dbcbak = os.path.join(fb_home,'databases.bak')
|
||||
# shutil.copy2( dbconf, dbcbak )
|
||||
#
|
||||
#
|
||||
# tmp_fdb = os.path.join(context['temp_directory'],'tmp_4864.fdb')
|
||||
#
|
||||
#
|
||||
# cleanup( (tmp_fdb,) )
|
||||
#
|
||||
#
|
||||
# f_dbconf=open(dbconf,'a')
|
||||
# f_dbconf.seek(0, 2)
|
||||
# f_dbconf.write("\\n\\n# Created temply by fbtest, CORE-4864. Should be removed auto.")
|
||||
# f_dbconf.write("\\n\\ntmp_alias_4864 = " + tmp_fdb )
|
||||
# f_dbconf.write("\\n{\\n SecurityDatabase = tmp_alias_4864 \\n}\\n")
|
||||
# f_dbconf.close()
|
||||
#
|
||||
#
|
||||
# isql_script='''
|
||||
# create database 'tmp_alias_4864' user 'SYSDBA';
|
||||
# set list on;
|
||||
@ -125,45 +125,43 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# drop database;
|
||||
# quit;
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# f_isql_cmd=open( os.path.join(context['temp_directory'],'tmp_create_4864.sql'), 'w')
|
||||
# f_isql_cmd.write( isql_script )
|
||||
# flush_and_close( f_isql_cmd )
|
||||
#
|
||||
#
|
||||
# f_isql_log=open( os.path.join(context['temp_directory'],'tmp_create_4864.log'), 'w')
|
||||
# subprocess.call([context['isql_path'],"-q","-i",f_isql_cmd.name], stdout=f_isql_log, stderr=subprocess.STDOUT)
|
||||
# flush_and_close( f_isql_log )
|
||||
#
|
||||
#
|
||||
# shutil.move(dbcbak, dbconf)
|
||||
#
|
||||
#
|
||||
# with open( f_isql_log.name,'r') as f:
|
||||
# print(f.read())
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
# # Cleanup:
|
||||
#
|
||||
# # do NOT remove this pause otherwise some of logs will not be enable for deletion and test will finish with
|
||||
#
|
||||
# # do NOT remove this pause otherwise some of logs will not be enable for deletion and test will finish with
|
||||
# # Exception raised while executing Python test script. exception: WindowsError: 32
|
||||
# time.sleep(1)
|
||||
# cleanup( (f_isql_log, f_isql_cmd, tmp_fdb) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
MON$SEC_DATABASE Self
|
||||
MON$SEC_DATABASE Self
|
||||
MON$ATTACHMENT_NAME tmp_alias_4864
|
||||
MON$USER SYSDBA
|
||||
MON$USER SYSDBA
|
||||
MON$REMOTE_PROTOCOL <null>
|
||||
MON$REMOTE_ADDRESS <null>
|
||||
MON$REMOTE_PROCESS <null>
|
||||
MON$AUTH_METHOD User name in DPB
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
pytest.skip("Test requires manipulation with firebird.conf")
|
||||
#pytest.fail("Test not IMPLEMENTED")
|
||||
|
@ -2,21 +2,21 @@
|
||||
#
|
||||
# id: bugs.core_4879
|
||||
# title: Minor inconvenience in user management via services API
|
||||
# decription:
|
||||
# decription:
|
||||
# Confirmed weird error message on 3.0.0.31374.
|
||||
# Command:
|
||||
# fbsvcmgr.exe localhost:service_mgr user sysdba password masterkey action_add_user dbname employee sec_username foo sec_password 123
|
||||
# - failed with:
|
||||
# unexpected item in service parameter block, expected isc_spb_sec_username
|
||||
# Checked on 4.0.0.2307; 3.0.8.33393.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4879
|
||||
# min_versions: ['3.0.0']
|
||||
# versions: 3.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
@ -33,24 +33,24 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# import subprocess
|
||||
# from subprocess import Popen
|
||||
# import time
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close(file_handle):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb'):
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -58,15 +58,15 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# os.remove( f_names_list[i] )
|
||||
# if os.path.isfile( f_names_list[i]):
|
||||
# print('ERROR: can not remove file ' + f_names_list[i])
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# this_fdb=db_conn.database_name
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# f_svc_log=open( os.path.join(context['temp_directory'],'tmp_4879_fbsvc.log'), "w", buffering = 0)
|
||||
# # C:\\FB\\old.3b1
|
||||
# bsvcmgr.exe localhost:service_mgr user sysdba password masterkey action_add_user dbname employee sec_username foo sec_password 123
|
||||
@ -74,33 +74,39 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# stdout=f_svc_log,
|
||||
# stderr=subprocess.STDOUT
|
||||
# )
|
||||
#
|
||||
#
|
||||
# subprocess.call( [context['fbsvcmgr_path'], "localhost:service_mgr", "action_delete_user", "dbname", this_fdb, "sec_username", "TMP$C4879" ],
|
||||
# stdout=f_svc_log,
|
||||
# stderr=subprocess.STDOUT
|
||||
# )
|
||||
#
|
||||
#
|
||||
# flush_and_close(f_svc_log)
|
||||
#
|
||||
#
|
||||
# with open( f_svc_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# if line.split():
|
||||
# print('UNEXPECTED OUTPUT in '+f_svc_log.name+': '+line)
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# # CLEANUP
|
||||
# #########
|
||||
# time.sleep(1)
|
||||
# cleanup( (f_svc_log.name,) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action):
|
||||
act_1.expected_stderr = 'We do not expect any error output'
|
||||
act_1.svcmgr(switches=['action_add_user', 'dbname', str(act_1.db.db_path),
|
||||
'sec_username', 'TMP$C4879', 'sec_password', '123'])
|
||||
# There should be no output at all
|
||||
assert act_1.clean_stderr == ''
|
||||
assert act_1.clean_stdout == ''
|
||||
# add_user passed, so remove it
|
||||
act_1.reset()
|
||||
act_1.svcmgr(switches=['action_delete_user', 'dbname', str(act_1.db.db_path),
|
||||
'sec_username', 'TMP$C4879'])
|
||||
|
@ -2,37 +2,39 @@
|
||||
#
|
||||
# id: bugs.core_4880
|
||||
# title: Increase speed of creating package when number of its functions more than several hundreds
|
||||
# decription:
|
||||
# decription:
|
||||
# This test uses TWO auto-generated scripts, both of them have been packed due to their size in files/core_4880.zip
|
||||
# and are unpacked at runtime here.
|
||||
# First script, 'core_4880_fnc.tmp', creates 5'000 STANDALONE functions and adds into log timestamps of start and finish.
|
||||
# Second script, 'core_4880_pkg.tmp', creates PACKAGE with head and body also of 5'000 functions and also adds into log
|
||||
# and are unpacked at runtime here.
|
||||
# First script, 'core_4880_fnc.tmp', creates 5'000 STANDALONE functions and adds into log timestamps of start and finish.
|
||||
# Second script, 'core_4880_pkg.tmp', creates PACKAGE with head and body also of 5'000 functions and also adds into log
|
||||
# timestamps for start of 'create package' and finish of 'create package BODY' statements.
|
||||
# Both scripts use simplest body of functions.
|
||||
#
|
||||
#
|
||||
# After both scripts will be finishec, number of seconds is compared for creation:
|
||||
# 1) standalone functions and 2) package header and body with the same number of functions.
|
||||
# Then, we evaluate maxValue and minValue in this pair and result of division: maxValue / minValue, casted to num(12,2).
|
||||
#
|
||||
#
|
||||
# Numerous runs showed that this ratio (N) is about 1.2 ... 1.5, and it never was more than 1.8.
|
||||
# It was decided to use N = 2 as max acceptable ratio between time for creation of package and for standalone funcions.
|
||||
# If any kind of objects (package or s/alone funcs) will be created more than N times than another, expected_stdout
|
||||
# will contain phrase about regression.
|
||||
#
|
||||
#
|
||||
# Checked on WI-V3.0.0.32008, machine: P-IV 3.0 Ghz RAM 2Gb, OS = Win XP.
|
||||
# Duration of test on that machine is about 45-55 seconds.
|
||||
#
|
||||
#
|
||||
# 13.04.2021. Adapted for run both on Windows and Linux. Checked on:
|
||||
# Windows: 3.0.8.33445, 4.0.0.2416
|
||||
# Linux: 3.0.8.33426, 4.0.0.2416
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4880
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid:
|
||||
# qmid:
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from zipfile import Path
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
from firebird.driver import DbWriteMode
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
@ -41,15 +43,15 @@ substitutions_1 = []
|
||||
|
||||
init_script_1 = """
|
||||
recreate table log(
|
||||
standalone_func_beg timestamp default null
|
||||
,standalone_func_end timestamp default null
|
||||
,pkg_head_n_body_beg timestamp default null
|
||||
,pkg_head_n_body_end timestamp default null
|
||||
standalone_func_beg timestamp default null,
|
||||
standalone_func_end timestamp default null,
|
||||
pkg_head_n_body_beg timestamp default null,
|
||||
pkg_head_n_body_end timestamp default null
|
||||
);
|
||||
commit;
|
||||
insert into log default values;
|
||||
commit;
|
||||
"""
|
||||
"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
@ -57,26 +59,26 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
#---
|
||||
# import os
|
||||
# import zipfile
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
# runProgram('gfix',['-w','async',dsn])
|
||||
#
|
||||
#
|
||||
# zf = zipfile.ZipFile( os.path.join(context['files_location'],'core_4880.zip') )
|
||||
# zf.extractall( context['temp_directory'] )
|
||||
# zf.close()
|
||||
# runProgram('isql',[dsn,'-q','-i', os.path.join(context['temp_directory'],'core_4880_fnc.tmp') ])
|
||||
# runProgram('isql',[dsn,'-q','-i', os.path.join(context['temp_directory'],'core_4880_pkg.tmp') ])
|
||||
#
|
||||
#
|
||||
# os.remove( os.path.join(context['temp_directory'],'core_4880_fnc.tmp') )
|
||||
# os.remove( os.path.join(context['temp_directory'],'core_4880_pkg.tmp') )
|
||||
#
|
||||
# script="""set list on;
|
||||
#
|
||||
# script="""set list on;
|
||||
# set term ^;
|
||||
# execute block as
|
||||
# begin
|
||||
# execute block as
|
||||
# begin
|
||||
# rdb$set_context('USER_SESSION', 'MAX_ACCEPTABLE_RATIO', '2');
|
||||
# -- ^
|
||||
# -- #################
|
||||
@ -85,21 +87,21 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# end
|
||||
# ^
|
||||
# set term ;^
|
||||
#
|
||||
# select iif( x.ratio < cast( rdb$get_context('USER_SESSION', 'MAX_ACCEPTABLE_RATIO') as int ),
|
||||
# 'Ratio is acceptable',
|
||||
# 'Regression, ratio >= ' || rdb$get_context('USER_SESSION', 'MAX_ACCEPTABLE_RATIO') || 'x'
|
||||
#
|
||||
# select iif( x.ratio < cast( rdb$get_context('USER_SESSION', 'MAX_ACCEPTABLE_RATIO') as int ),
|
||||
# 'Ratio is acceptable',
|
||||
# 'Regression, ratio >= ' || rdb$get_context('USER_SESSION', 'MAX_ACCEPTABLE_RATIO') || 'x'
|
||||
# ) as result_msg
|
||||
# --, x.*
|
||||
# from (
|
||||
# select
|
||||
# select
|
||||
# standalone_func_sec
|
||||
# ,pkg_head_n_body_sec
|
||||
# ,cast( iif( pkg_head_n_body_sec > standalone_func_sec, 1.00 * pkg_head_n_body_sec / standalone_func_sec, 1.00 * standalone_func_sec / pkg_head_n_body_sec ) as numeric(12,2) ) as ratio
|
||||
# ,cast( 1.00 * pkg_head_n_body_sec / standalone_func_sec as numeric(12,2) ) package_vs_standalone
|
||||
# ,cast( 1.00 * standalone_func_sec / pkg_head_n_body_sec as numeric(12,2) ) standalone_vs_package
|
||||
# from (
|
||||
# select
|
||||
# select
|
||||
# nullif( datediff(second from standalone_func_beg to standalone_func_end), 0) standalone_func_sec
|
||||
# ,nullif( datediff(second from pkg_head_n_body_beg to pkg_head_n_body_end), 0) pkg_head_n_body_sec
|
||||
# from log
|
||||
@ -108,15 +110,63 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# """
|
||||
# runProgram('isql',[dsn,'-q'],script)
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
RESULT_MSG Ratio is acceptable
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
def test_1(act_1: Action):
|
||||
with act_1.connect_server() as srv:
|
||||
srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.ASYNC)
|
||||
# Read FNC scripts from zip file and execute it
|
||||
script_file = Path(act_1.vars['files'] / 'core_4880.zip',
|
||||
at='core_4880_fnc.tmp')
|
||||
act_1.script = script_file.read_text()
|
||||
act_1.execute()
|
||||
# Read PKG scripts from zip file and execute it
|
||||
script_file = Path(act_1.vars['files'] / 'core_4880.zip',
|
||||
at='core_4880_pkg.tmp')
|
||||
act_1.script = script_file.read_text()
|
||||
act_1.execute()
|
||||
# Check
|
||||
test_script = """
|
||||
set list on;
|
||||
set term ^;
|
||||
execute block as
|
||||
begin
|
||||
rdb$set_context('USER_SESSION', 'MAX_ACCEPTABLE_RATIO', '2');
|
||||
-- ^
|
||||
-- #################
|
||||
-- T H R E S H O L D
|
||||
-- #################
|
||||
end
|
||||
^
|
||||
set term ;^
|
||||
|
||||
select iif(x.ratio < cast( rdb$get_context('USER_SESSION', 'MAX_ACCEPTABLE_RATIO') as int ),
|
||||
'Ratio is acceptable',
|
||||
'Regression, ratio >= ' || rdb$get_context('USER_SESSION', 'MAX_ACCEPTABLE_RATIO') || 'x'
|
||||
) as result_msg
|
||||
--, x.*
|
||||
from (
|
||||
select
|
||||
standalone_func_sec,
|
||||
pkg_head_n_body_sec,
|
||||
cast(iif( pkg_head_n_body_sec > standalone_func_sec, 1.00 * pkg_head_n_body_sec / standalone_func_sec, 1.00 * standalone_func_sec / pkg_head_n_body_sec ) as numeric(12,2) ) as ratio,
|
||||
cast(1.00 * pkg_head_n_body_sec / standalone_func_sec as numeric(12,2)) package_vs_standalone,
|
||||
cast(1.00 * standalone_func_sec / pkg_head_n_body_sec as numeric(12,2)) standalone_vs_package
|
||||
from (
|
||||
select
|
||||
nullif(datediff(second from standalone_func_beg to standalone_func_end), 0) standalone_func_sec,
|
||||
nullif(datediff(second from pkg_head_n_body_beg to pkg_head_n_body_end), 0) pkg_head_n_body_sec
|
||||
from log
|
||||
)
|
||||
) x;
|
||||
"""
|
||||
act_1.reset()
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.isql(switches=['-q'], input=test_script)
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,10 +2,9 @@
|
||||
#
|
||||
# id: bugs.core_4882
|
||||
# title: ISQL input command (or -i option) reads large (> 64K) lines incorrectly
|
||||
# decription:
|
||||
# decription:
|
||||
# This test verifies ability of parsing multiple statements that are going as `single-lined stream`.
|
||||
# Source for this test is file `fbt-repo
|
||||
# iles\\core_4882.sql`.
|
||||
# Source for this test is file `fbt-repo\\files\\core_4882.sql`.
|
||||
# It contains almost all source code of test that emulates OLTP workload - without initial script for data filling.
|
||||
# This test can be found here: svn://svn.code.sf.net/p/firebird/code/qa/oltp-emul/
|
||||
# Three files were taken from it: oltp30_DDL.sql, oltp30_sp.sql and oltp_main_filling.sql - with total size ~600 Kb.
|
||||
@ -15,14 +14,14 @@
|
||||
# NOTE: before this file also contained lines with bulk of begin..end blocks but since CORE-4884 was fixed that number
|
||||
# is limited to 512. With this limit single-line statement of begin-end blocks will have length less than 64K. For that
|
||||
# reason these lines were removed from here to the test for CORE-4884.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4882
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid:
|
||||
# qmid:
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
@ -36,13 +35,14 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
|
||||
# test_script_1
|
||||
#---
|
||||
# import os
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
# scriptfile=open(os.path.join(context['files_location'],'core_4882.sql'),'r')
|
||||
# scriptfile.close()
|
||||
# runProgram('isql',[dsn,'-user',user_name,'-pas',user_password,'-i',scriptfile.name])
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
MSG oltp30_DDL.sql start
|
||||
@ -51,11 +51,12 @@ expected_stdout_1 = """
|
||||
MSG oltp30_sp.sql finish
|
||||
MSG oltp_main_filling.sql start
|
||||
MSG oltp_main_filling.sql finish
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
def test_1(act_1: Action):
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.isql(switches=[], input_file=act_1.vars['files'] / 'core_4882.sql')
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
#
|
||||
# id: bugs.core_4889
|
||||
# title: FBSVCMGR with `action_trace_start` prevents in 3.0 SuperServer from connecting using local protocol
|
||||
# decription:
|
||||
# decription:
|
||||
# Confirmed failing to create embedded attach on build 31948.
|
||||
# Confirmed successful work on build 32268, architectures: SS, SC and CS.
|
||||
# 10.12.2019. Additional check:
|
||||
@ -10,18 +10,20 @@
|
||||
# 4.0.0.1685 CS: 12.078s.
|
||||
# 3.0.5.33206 SS: 10.827s.
|
||||
# 3.0.5.33206 CS: 11.793s.
|
||||
#
|
||||
#
|
||||
# 13.04.2021. Adapted for run both on Windows and Linux. Checked on:
|
||||
# Windows: 3.0.8.33445, 4.0.0.2416
|
||||
# Linux: 3.0.8.33426, 4.0.0.2416
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4889
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
import time
|
||||
from threading import Thread, Barrier
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
@ -34,36 +36,36 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import subprocess
|
||||
# from subprocess import Popen
|
||||
# import time
|
||||
#
|
||||
#
|
||||
# fdb_file='$(DATABASE_LOCATION)bugs.core_4889.fdb'
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -75,19 +77,19 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# print('type(f_names_list[i])=',type(f_names_list[i]))
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# # Prepare config for trace session that will be launched by call of FBSVCMGR:
|
||||
# ################
|
||||
# txt = '''database= %[\\\\\\\\/]bugs.core_4889.fdb
|
||||
# {
|
||||
# enabled = true
|
||||
# time_threshold = 0
|
||||
# time_threshold = 0
|
||||
# log_errors = true
|
||||
# log_statement_finish = true
|
||||
# }
|
||||
@ -95,36 +97,36 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# trc_cfg=open( os.path.join(context['temp_directory'],'tmp_trace_4889.cfg'), 'w')
|
||||
# trc_cfg.write(txt)
|
||||
# flush_and_close( trc_cfg )
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
# # Async. launch of trace session using FBSVCMGR action_trace_start:
|
||||
#
|
||||
#
|
||||
# trc_log = open( os.path.join(context['temp_directory'],'tmp_trace_4889.log'), 'w')
|
||||
#
|
||||
#
|
||||
# # Execute a child program in a new process, redirecting STDERR to the same target as of STDOUT:
|
||||
# p_svcmgr = Popen( [context['fbsvcmgr_path'], "localhost:service_mgr",
|
||||
# "action_trace_start","trc_cfg", trc_cfg.name],
|
||||
# stdout=trc_log, stderr=subprocess.STDOUT)
|
||||
#
|
||||
#
|
||||
# # Wait! Trace session is initialized not instantly!
|
||||
# time.sleep(2)
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
#
|
||||
#
|
||||
# # Determine active trace session ID (for further stop):
|
||||
#
|
||||
#
|
||||
# trc_lst = open( os.path.join(context['temp_directory'],'tmp_trace_4889.lst'), 'w')
|
||||
# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr",
|
||||
# "action_trace_list"],
|
||||
# stdout=trc_lst, stderr=subprocess.STDOUT
|
||||
# )
|
||||
# flush_and_close( trc_lst )
|
||||
#
|
||||
# # Session ID: 5
|
||||
# # user:
|
||||
# # date: 2015-08-27 15:24:14
|
||||
# # flags: active, trace
|
||||
#
|
||||
#
|
||||
# # Session ID: 5
|
||||
# # user:
|
||||
# # date: 2015-08-27 15:24:14
|
||||
# # flags: active, trace
|
||||
#
|
||||
# trcssn=0
|
||||
# with open( trc_lst.name,'r') as f:
|
||||
# for line in f:
|
||||
@ -135,43 +137,43 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# trcssn=word
|
||||
# i=i+1
|
||||
# break
|
||||
#
|
||||
# # Result: `trcssn` is ID of active trace session.
|
||||
#
|
||||
# # Result: `trcssn` is ID of active trace session.
|
||||
# # We have to terminate trace session that is running on server BEFORE we termitane process `p_svcmgr`
|
||||
# if trcssn==0:
|
||||
# print("Error parsing trace session ID.")
|
||||
# flush_and_close( trc_log )
|
||||
#
|
||||
#
|
||||
# else:
|
||||
# #####################################################################
|
||||
#
|
||||
#
|
||||
# # Preparing script for ISQL:
|
||||
#
|
||||
#
|
||||
# sql_cmd='''
|
||||
# set list on;
|
||||
# set list on;
|
||||
# set count on;
|
||||
# select
|
||||
# select
|
||||
# iif(a.mon$remote_protocol is null, 'internal', 'remote') as connection_protocol,
|
||||
# iif(a.mon$remote_process is null, 'internal', 'remote') as connection_process,
|
||||
# iif(a.mon$remote_pid is null, 'internal', 'remote') as connection_remote_pid,
|
||||
# a.mon$auth_method as auth_method -- should be: 'User name in DPB'
|
||||
# from rdb$database r
|
||||
# left join mon$attachments a on a.mon$attachment_id = current_connection and a.mon$system_flag = 0;
|
||||
# commit;
|
||||
# commit;
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# isql_cmd=open( os.path.join(context['temp_directory'],'tmp_isql_4889.sql'), 'w')
|
||||
# isql_cmd.write(sql_cmd)
|
||||
# flush_and_close( isql_cmd )
|
||||
#
|
||||
#
|
||||
# #######################################################################
|
||||
#
|
||||
# # Async. launch ISQL process with EMBEDDED connect.
|
||||
#
|
||||
# # Async. launch ISQL process with EMBEDDED connect.
|
||||
# # ::::: NB :::::
|
||||
# # Confirmed that this action:
|
||||
# # works fine on WI-V3.0.0.31940, build 14-jul-2015
|
||||
# # **HANGS** on WI-V3.0.0.31948, build 16-jul-2015
|
||||
#
|
||||
#
|
||||
# isql_log=open( os.path.join(context['temp_directory'],'tmp_isql_4889.log'), 'w')
|
||||
# p_isql = Popen( [ context['isql_path'] , fdb_file,
|
||||
# "-user", "tmp$no$such$user$4889",
|
||||
@ -179,17 +181,17 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# stdout=isql_log,
|
||||
# stderr=subprocess.STDOUT
|
||||
# )
|
||||
#
|
||||
#
|
||||
# # do NOT remove this delay:
|
||||
# time.sleep(5)
|
||||
#
|
||||
#
|
||||
# p_isql.terminate()
|
||||
# flush_and_close( isql_log )
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
#
|
||||
#
|
||||
# # Stop trace session:
|
||||
#
|
||||
#
|
||||
# trc_lst=open(trc_lst.name, "a")
|
||||
# trc_lst.seek(0,2)
|
||||
# subprocess.call([ context['fbsvcmgr_path'], "localhost:service_mgr",
|
||||
@ -197,17 +199,17 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# stdout=trc_lst, stderr=subprocess.STDOUT
|
||||
# )
|
||||
# flush_and_close( trc_lst )
|
||||
#
|
||||
#
|
||||
# p_svcmgr.terminate()
|
||||
# flush_and_close( trc_log )
|
||||
#
|
||||
#
|
||||
# # do NOT remove this delay:
|
||||
# time.sleep(2)
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
#
|
||||
#
|
||||
# # Output logs:
|
||||
#
|
||||
#
|
||||
# i=0
|
||||
# with open( trc_log.name,'r') as f:
|
||||
# for line in f:
|
||||
@ -217,34 +219,35 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# i=2
|
||||
# print("OK: found text in trace related to EMBEDDED connect.")
|
||||
# break
|
||||
#
|
||||
#
|
||||
# if not i==2:
|
||||
# print("FAILED to found text in trace related to EMBEDDED connect.")
|
||||
#
|
||||
#
|
||||
# if os.path.getsize(isql_log.name) == 0:
|
||||
# print("FAILED to print log from EMBEDDED connect: log is EMPTY.")
|
||||
# else:
|
||||
# with open( isql_log.name,'r') as f:
|
||||
# print(f.read())
|
||||
# f.close()
|
||||
#
|
||||
#
|
||||
# # do NOT remove this pause otherwise log of trace will not be enable for deletion and test will finish with
|
||||
#
|
||||
#
|
||||
# # do NOT remove this pause otherwise log of trace will not be enable for deletion and test will finish with
|
||||
# # Exception raised while executing Python test script. exception: WindowsError: 32
|
||||
#
|
||||
#
|
||||
# # On WI-V3.0.0.31948 final output was:
|
||||
# # FAILED to found text in trace related to EMBEDDED connect.
|
||||
# # FAILED to print log from EMBEDDED connect: log is EMPTY.
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
#
|
||||
#
|
||||
# # Cleanup:
|
||||
# time.sleep(1)
|
||||
# cleanup( (trc_lst, trc_cfg, trc_log,isql_cmd, isql_log) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
OK: found text in trace related to EMBEDDED connect.
|
||||
@ -253,11 +256,66 @@ expected_stdout_1 = """
|
||||
CONNECTION_REMOTE_PID internal
|
||||
AUTH_METHOD User name in DPB
|
||||
Records affected: 1
|
||||
"""
|
||||
"""
|
||||
|
||||
def trace_session(act: Action, b: Barrier):
|
||||
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
|
||||
f'database=%[\\\\/]{act.db.db_path.name}',
|
||||
'{',
|
||||
' enabled = true',
|
||||
' time_threshold = 0',
|
||||
' log_initfini = false',
|
||||
' log_errors = true',
|
||||
' log_statement_finish = true',
|
||||
'}']
|
||||
with act.connect_server() as srv:
|
||||
srv.trace.start(config='\n'.join(cfg30))
|
||||
b.wait()
|
||||
for line in srv:
|
||||
print(line)
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, capsys):
|
||||
b = Barrier(2)
|
||||
trace_thread = Thread(target=trace_session, args=[act_1, b])
|
||||
trace_thread.start()
|
||||
b.wait()
|
||||
isq_script = """
|
||||
set list on;
|
||||
set count on;
|
||||
select
|
||||
iif(a.mon$remote_protocol is null, 'internal', 'remote') as connection_protocol,
|
||||
iif(a.mon$remote_process is null, 'internal', 'remote') as connection_process,
|
||||
iif(a.mon$remote_pid is null, 'internal', 'remote') as connection_remote_pid,
|
||||
a.mon$auth_method as auth_method -- should be: 'User name in DPB'
|
||||
from rdb$database r
|
||||
left join mon$attachments a on a.mon$attachment_id = current_connection and a.mon$system_flag = 0;
|
||||
commit;
|
||||
"""
|
||||
act_1.isql(switches=['-n', '-user', 'tmp$no$such$user$4889', str(act_1.db.db_path)],
|
||||
connect_db=False, input=isq_script)
|
||||
output = act_1.stdout
|
||||
with act_1.connect_server() as srv:
|
||||
for session in list(srv.trace.sessions.keys()):
|
||||
srv.trace.stop(session_id=session)
|
||||
trace_thread.join(1.0)
|
||||
if trace_thread.is_alive():
|
||||
pytest.fail('Trace thread still alive')
|
||||
trace_log = capsys.readouterr().out
|
||||
#
|
||||
# Process logs
|
||||
i = 0
|
||||
for line in trace_log.splitlines():
|
||||
if ') EXECUTE_STATEMENT_FINISH' in line:
|
||||
i = 1
|
||||
if i == 1 and '1 records fetched' in line:
|
||||
i = 2
|
||||
print("OK: found text in trace related to EMBEDDED connect.")
|
||||
break
|
||||
if not i == 2:
|
||||
print("FAILED to found text in trace related to EMBEDDED connect.")
|
||||
print(output if output else "FAILED to print log from EMBEDDED connect: log is EMPTY.")
|
||||
# Check
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,15 +2,15 @@
|
||||
#
|
||||
# id: bugs.core_4899
|
||||
# title: GFIX -online: message "IProvider::attachDatabase failed when loading mapping cache" appears in Classic (only) if access uses remote protocol
|
||||
# decription:
|
||||
#
|
||||
# decription:
|
||||
#
|
||||
# tracker_id: CORE-4899
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
@ -23,83 +23,117 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# fdb='$(DATABASE_LOCATION)bugs.core_4899.fdb'
|
||||
# fdx=os.path.join(context['temp_directory'],'tmp_copy_4899.fdb')
|
||||
#
|
||||
#
|
||||
# if os.path.isfile(fdx):
|
||||
# os.remove(fdx)
|
||||
#
|
||||
#
|
||||
# script="create database 'localhost:%s';" % fdx
|
||||
# runProgram('isql',['-q'],script)
|
||||
# # --------------------- I ----------------
|
||||
#
|
||||
#
|
||||
# #shutil.copy2( fdb, fdx )
|
||||
#
|
||||
#
|
||||
# # Trying to move database to OFFLINE:
|
||||
#
|
||||
#
|
||||
# runProgram('gfix',['-shut', 'full', '-force', '0', fdx])
|
||||
#
|
||||
#
|
||||
# runProgram('gstat',['-h',fdx])
|
||||
#
|
||||
#
|
||||
# # Trying to move database online using LOCAL protocol:
|
||||
# runProgram('gfix',['-online',fdx])
|
||||
#
|
||||
#
|
||||
# # gfix attachment via local protocol reflects with following lines in trace:
|
||||
# # 2015-08-24T18:30:03.2580 (2516:012417E0) ATTACH_DATABASE
|
||||
# # C:\\MIX\\FIREBIRD\\QA\\FBT-REPO\\TMP\\CORE4899-TMP.FDB (ATT_9, SYSDBA:NONE, NONE, <internal>)
|
||||
#
|
||||
#
|
||||
# runProgram('gstat',['-h',fdx])
|
||||
#
|
||||
#
|
||||
# if os.path.isfile(fdx):
|
||||
# os.remove(fdx)
|
||||
#
|
||||
#
|
||||
# # --------------------- II ---------------
|
||||
#
|
||||
#
|
||||
# #shutil.copy2( fdb, fdx )
|
||||
# runProgram('isql',['-q'],script)
|
||||
#
|
||||
#
|
||||
# runProgram('gfix',['-shut', 'full', '-force', '0', fdx])
|
||||
# runProgram('gstat',['-h',fdx])
|
||||
#
|
||||
#
|
||||
# # Trying to move database online using REMOTE protocol:
|
||||
# runProgram('gfix',['-online','localhost:'+fdx])
|
||||
#
|
||||
#
|
||||
# # Note: gfix attachment via remote protocol refects with following lines in trace:
|
||||
# # 2015-08-24T18:30:03.8520 (3256:01B526A8) ATTACH_DATABASE
|
||||
# # C:\\MIX\\FIREBIRD\\QA\\FBT-REPO\\TMP\\CORE4899-TMP.FDB (ATT_9, SYSDBA:NONE, NONE, TCPv4:127.0.0.1)
|
||||
# # C:\\MIX
|
||||
# irebird
|
||||
# b30\\gfix.exe:1448
|
||||
#
|
||||
# # C:\\MIX\\firebird\\db30\\gfix.exe:1448
|
||||
#
|
||||
# runProgram('gstat',['-h',fdx])
|
||||
#
|
||||
#
|
||||
# if os.path.isfile(fdx):
|
||||
# os.remove(fdx)
|
||||
#
|
||||
#
|
||||
# #, 'substitutions':[('^((?!Attributes).)*$',''),('[\\s]+',' ')]
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Attributes force write, full shutdown
|
||||
Attributes force write
|
||||
Attributes force write, full shutdown
|
||||
Attributes force write
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, capsys):
|
||||
# Trying to move database to OFFLINE:
|
||||
act_1.gfix(switches=['-shut', 'full', '-force', '0', str(act_1.db.db_path)])
|
||||
print(act_1.stdout)
|
||||
act_1.reset()
|
||||
act_1.gstat(switches=['-h', str(act_1.db.db_path)], connect_db=False)
|
||||
print(act_1.stdout)
|
||||
# Trying to move database online using LOCAL protocol:
|
||||
act_1.reset()
|
||||
act_1.gfix(switches=['-online', str(act_1.db.db_path)])
|
||||
print(act_1.stdout)
|
||||
# gfix attachment via local protocol reflects with following lines in trace:
|
||||
# 2015-08-24T18:30:03.2580 (2516:012417E0) ATTACH_DATABASE
|
||||
# C:\\MIX\\FIREBIRD\\QA\\FBT-REPO\\TMP\\CORE4899-TMP.FDB (ATT_9, SYSDBA:NONE, NONE, <internal>)
|
||||
act_1.reset()
|
||||
act_1.gstat(switches=['-h', str(act_1.db.db_path)], connect_db=False)
|
||||
print(act_1.stdout)
|
||||
# --------------------- II ---------------
|
||||
act_1.reset()
|
||||
act_1.gfix(switches=['-shut', 'full', '-force', '0', str(act_1.db.db_path)])
|
||||
print(act_1.stdout)
|
||||
act_1.reset()
|
||||
act_1.gstat(switches=['-h', str(act_1.db.db_path)], connect_db=False)
|
||||
print(act_1.stdout)
|
||||
# Trying to move database online using REMOTE protocol:
|
||||
act_1.reset()
|
||||
act_1.gfix(switches=['-online', f'localhost:{act_1.db.db_path}'])
|
||||
print(act_1.stdout)
|
||||
# Note: gfix attachment via remote protocol refects with following lines in trace:
|
||||
# 2015-08-24T18:30:03.8520 (3256:01B526A8) ATTACH_DATABASE
|
||||
# C:\\MIX\\FIREBIRD\\QA\\FBT-REPO\\TMP\\CORE4899-TMP.FDB (ATT_9, SYSDBA:NONE, NONE, TCPv4:127.0.0.1)
|
||||
# C:\\MIX\\firebird\\db30\\gfix.exe:1448
|
||||
act_1.reset()
|
||||
act_1.gstat(switches=['-h', str(act_1.db.db_path)], connect_db=False)
|
||||
print(act_1.stdout)
|
||||
# Check
|
||||
act_1.reset()
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,29 +2,33 @@
|
||||
#
|
||||
# id: bugs.core_4904
|
||||
# title: Index corruption when add data in long-key-indexed field
|
||||
# decription:
|
||||
# decription:
|
||||
# In order to check ticket issues this test does following:
|
||||
# 1. Change on test database FW to OFF - this will increase DML performance.
|
||||
# 2. Create table with indexed field of length = maximum that is allowed by
|
||||
# 2. Create table with indexed field of length = maximum that is allowed by
|
||||
# current FB implementation (page_size / 4 - 9 bytes).
|
||||
# 3. Try to insert enough number of records in this table - this should cause
|
||||
# runtime exception SQLSTATE = 54000, "Maximum index level reached"
|
||||
# 4. Start validation of database: index should NOT be corrupted in its report.
|
||||
#
|
||||
#
|
||||
# Checked on WI-V3.0.0.32140 (CS, SC); WI-V3.0.0.32157 - official RC1 (SS)
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4904
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
from firebird.driver import DbWriteMode
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('[0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9]', ''), ('Maximum index .* reached', 'Maximum index reached'), ('Relation [0-9]{3,4}', 'Relation'), ('After line .*', ''), ('-At block line: [\\d]+, col: [\\d]+', '-At block line')]
|
||||
substitutions_1 = [('[0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9]', ''),
|
||||
('Maximum index .* reached', 'Maximum index reached'),
|
||||
('Relation [0-9]{3,4}', 'Relation'), ('After line .*', ''),
|
||||
('-At block line: [\\d]+, col: [\\d]+', '-At block line')]
|
||||
|
||||
init_script_1 = """"""
|
||||
|
||||
@ -32,34 +36,34 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import subprocess
|
||||
# import time
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# db_name = db_conn.database_name
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close(file_handle):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb'):
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -70,16 +74,16 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# # Move database to FW = OFF in order to increase speed of insertions and output its header info:
|
||||
# #####################################################################
|
||||
#
|
||||
#
|
||||
# f_change_fw_log=open( os.path.join(context['temp_directory'],'tmp_fw_off_4904.log'), 'w')
|
||||
# subprocess.call([ context['fbsvcmgr_path'], "localhost:service_mgr",
|
||||
# "action_properties",
|
||||
@ -87,10 +91,10 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
|
||||
# "dbname", db_name ],
|
||||
# stdout=f_change_fw_log, stderr=subprocess.STDOUT)
|
||||
# flush_and_close( f_change_fw_log )
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
# # Preparing script for ISQL that will do inserts with long keys:
|
||||
#
|
||||
#
|
||||
# sql_cmd=''' recreate table test(s varchar(1015)); -- with THIS length of field following EB will get exception very fast.
|
||||
# create index test_s on test(s);
|
||||
# commit;
|
||||
@ -106,50 +110,51 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
|
||||
# set term ;^
|
||||
# commit;
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# f_long_keys_cmd=open( os.path.join(context['temp_directory'],'tmp_isql_4904.sql'), 'w')
|
||||
# f_long_keys_cmd.write(sql_cmd)
|
||||
# flush_and_close( f_long_keys_cmd )
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
# # Starting ISQL
|
||||
#
|
||||
#
|
||||
# f_long_keys_log=open( os.path.join(context['temp_directory'],'tmp_isql_4904.log'), 'w')
|
||||
# subprocess.call([ context['isql_path'] , dsn, "-i", f_long_keys_cmd.name],
|
||||
# stdout=f_long_keys_log, stderr=subprocess.STDOUT)
|
||||
# flush_and_close( f_long_keys_log )
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
# # Run validation after ISQL will finish (with runtime exception due to implementation limit exceeding):
|
||||
#
|
||||
#
|
||||
# f_validation_log=open( os.path.join(context['temp_directory'],'tmp_onval_4904.log'), 'w')
|
||||
#
|
||||
#
|
||||
# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr",
|
||||
# "action_validate","val_lock_timeout","1",
|
||||
# "dbname","$(DATABASE_LOCATION)bugs.core_4904.fdb"],
|
||||
# stdout=f_validation_log, stderr=subprocess.STDOUT)
|
||||
#
|
||||
#
|
||||
# flush_and_close( f_validation_log )
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
# # Output result of ISQL and online validation:
|
||||
# with open( f_long_keys_log.name,'r') as f:
|
||||
# print(f.read())
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# with open( f_validation_log.name,'r') as f:
|
||||
# print(f.read())
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# # Cleanup:
|
||||
# ##########
|
||||
# time.sleep(1)
|
||||
# cleanup( (f_change_fw_log, f_validation_log, f_long_keys_cmd, f_long_keys_log) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Statement failed, SQLSTATE = 54000
|
||||
@ -163,11 +168,37 @@ expected_stdout_1 = """
|
||||
Index 1 (TEST_S)
|
||||
Relation (TEST) is ok
|
||||
Validation finished
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, capsys):
|
||||
# Move database to FW = OFF in order to increase speed of insertions and output its header info:
|
||||
with act_1.connect_server() as srv:
|
||||
srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.ASYNC)
|
||||
# Preparing script for ISQL that will do inserts with long keys:
|
||||
long_keys_cmd = """
|
||||
recreate table test(s varchar(1015)); -- with THIS length of field following EB will get exception very fast.
|
||||
create index test_s on test(s);
|
||||
commit;
|
||||
set term ^;
|
||||
execute block as
|
||||
begin
|
||||
insert into test(s)
|
||||
select rpad('', 1015, uuid_to_char(gen_uuid()) )
|
||||
from rdb$types, rdb$types
|
||||
rows 50000; -- this is extra-huge reserve; exception should raise when about 120-130 rows will be inserted.
|
||||
end
|
||||
^
|
||||
set term ;^
|
||||
commit;
|
||||
"""
|
||||
act_1.expected_stderr = "We expect errors"
|
||||
act_1.isql(switches=[], input=long_keys_cmd)
|
||||
print(act_1.stdout)
|
||||
print(act_1.stderr)
|
||||
# Run validation after ISQL will finish (with runtime exception due to implementation limit exceeding):
|
||||
srv.database.validate(database=str(act_1.db.db_path), lock_timeout=1, callback=print)
|
||||
# Check
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,17 +2,17 @@
|
||||
#
|
||||
# id: bugs.core_4927
|
||||
# title: IIF function prevents the condition from being pushed into the union for better optimization
|
||||
# decription:
|
||||
# decription:
|
||||
# 1. Obtain engine_version from built-in context variable.
|
||||
# 2. Make config for trace in proper format according to FB engine version,
|
||||
# with adding invalid element 'foo' instead on boolean ('true' or 'false')
|
||||
# 3. Launch trace session in separate child process using 'FBSVCMGR action_trace_start'
|
||||
# 4. Run ISQL with calling test SP.
|
||||
# 5. Stop trace session. Output its log with filtering only messages related to ticket notes.
|
||||
#
|
||||
#
|
||||
# Trace log for FB 2.5 builds before rev. 62200 ( http://sourceforge.net/p/firebird/code/62200 )
|
||||
# contained tables which does NOT contain data which we are looked for (marked as "<<<"):
|
||||
#
|
||||
#
|
||||
# Table Natural Index
|
||||
# ****************************************************
|
||||
# HEADER_2100 1
|
||||
@ -21,25 +21,29 @@
|
||||
# DETAIL_2000 1 <<<
|
||||
# DETAIL_2100 1
|
||||
# DETAIL_3300 1 <<<
|
||||
#
|
||||
#
|
||||
# Here we check that trace log will contain only TWO tables: HEADER_2100 and DETAIL_2100.
|
||||
# Bug affected only 2.5.x. Test checked on: WI-V2.5.5.26928, built at: 2015-09-08 00:13:06 UTC (rev 62201)
|
||||
#
|
||||
#
|
||||
# ::: NB :::
|
||||
# Several delays (time.sleep) added in main thread because of OS buffering. Couldn't switch this buffering off.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4927
|
||||
# min_versions: ['2.5.5']
|
||||
# versions: 2.5.5
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
import time
|
||||
from threading import Thread, Barrier
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 2.5.5
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('^((?!HEADER_|DETAIL_).)*$', ''), ('HEADER_2100.*', 'HEADER_2100'), ('DETAIL_2100.*', 'DETAIL_2100')]
|
||||
substitutions_1 = [('^((?!HEADER_|DETAIL_).)*$', ''),
|
||||
('HEADER_2100.*', 'HEADER_2100'),
|
||||
('DETAIL_2100.*', 'DETAIL_2100')]
|
||||
|
||||
init_script_1 = """
|
||||
create or alter procedure sp_test as begin end;
|
||||
@ -69,7 +73,7 @@ init_script_1 = """
|
||||
commit;
|
||||
|
||||
set term ^;
|
||||
create or alter procedure sp_test returns(result int) as
|
||||
create or alter procedure sp_test returns(result int) as
|
||||
begin
|
||||
for
|
||||
select count(*)
|
||||
@ -86,30 +90,30 @@ init_script_1 = """
|
||||
and qd.rcv_optype_id is not distinct from 3300
|
||||
and qd.snd_id = d.dd_id
|
||||
into result
|
||||
do
|
||||
do
|
||||
suspend;
|
||||
end
|
||||
^
|
||||
set term ;^
|
||||
commit;
|
||||
|
||||
|
||||
insert into header_2100(dd_id, ware_id, snd_optype_id) values(1, 11, 2100);
|
||||
commit;
|
||||
|
||||
|
||||
insert into detail_1000 (ware_id,snd_optype_id,rcv_optype_id,snd_id) values( 11, 1000, 1200, 1);
|
||||
insert into detail_1200 (ware_id,snd_optype_id,rcv_optype_id,snd_id) values( 11, 1200, 2000, 1);
|
||||
insert into detail_2000 (ware_id,snd_optype_id,rcv_optype_id,snd_id) values( 11, 2000, 2100, 1);
|
||||
insert into detail_2100 (ware_id,snd_optype_id,rcv_optype_id,snd_id) values( 11, 2100, 3300, 1);
|
||||
insert into detail_3300 (ware_id,snd_optype_id,rcv_optype_id,snd_id) values( 11, 3300, null, 1);
|
||||
commit;
|
||||
|
||||
|
||||
create index d1000_wsrs on detail_1000 (ware_id,snd_optype_id,rcv_optype_id,snd_id);
|
||||
create index d1200_wsrs on detail_1200 (ware_id,snd_optype_id,rcv_optype_id,snd_id);
|
||||
create index d2000_wsrs on detail_2000 (ware_id,snd_optype_id,rcv_optype_id,snd_id);
|
||||
create index d2100_wsrs on detail_2100 (ware_id,snd_optype_id,rcv_optype_id,snd_id);
|
||||
create index d3300_wsrs on detail_3300 (ware_id,snd_optype_id,rcv_optype_id,snd_id);
|
||||
commit;
|
||||
"""
|
||||
"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
@ -119,31 +123,31 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# import subprocess
|
||||
# from subprocess import Popen
|
||||
# import time
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# # Obtain engine version:
|
||||
# engine = str(db_conn.engine_version) # convert to text because 'float' object has no attribute 'startswith'
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close(file_handle):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb'):
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -154,29 +158,29 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# #####################################################
|
||||
#
|
||||
#
|
||||
# txt25 = '''# Trace config, format for 2.5. Generated auto, do not edit!
|
||||
# <database %[\\\\\\\\/]bugs.core_4927.fdb>
|
||||
# enabled true
|
||||
# time_threshold 0
|
||||
# time_threshold 0
|
||||
# log_statement_finish true
|
||||
# print_perf true
|
||||
# </database>
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# # NOTES ABOUT TRACE CONFIG FOR 3.0:
|
||||
# # 1) Header contains `database` clause in different format vs FB 2.5: its data must be enclosed with '{' '}'
|
||||
# # 2) Name and value must be separated by EQUALITY sign ('=') in FB-3 trace.conf, otherwise we get runtime error:
|
||||
# # element "<. . .>" have no attribute value set
|
||||
#
|
||||
#
|
||||
# txt30 = '''# Trace config, format for 3.0. Generated auto, do not edit!
|
||||
# database=%[\\\\\\\\/]bugs.core_4927.fdb
|
||||
# {
|
||||
@ -186,41 +190,41 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# print_perf = true
|
||||
# }
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# f_trccfg=open( os.path.join(context['temp_directory'],'tmp_trace_4927.cfg'), 'w')
|
||||
# if engine.startswith('2.5'):
|
||||
# f_trccfg.write(txt25)
|
||||
# else:
|
||||
# f_trccfg.write(txt30)
|
||||
#
|
||||
#
|
||||
# flush_and_close( f_trccfg )
|
||||
#
|
||||
#
|
||||
# #####################################################
|
||||
# # Starting trace session in new child process (async.):
|
||||
#
|
||||
#
|
||||
# f_trclog=open( os.path.join(context['temp_directory'],'tmp_trace_4927.log'), 'w')
|
||||
# # Execute a child program in a new process, redirecting STDERR to the same target as of STDOUT:
|
||||
# p_trace=Popen([context['fbsvcmgr_path'], "localhost:service_mgr",
|
||||
# "action_trace_start",
|
||||
# "trc_cfg", f_trccfg.name],
|
||||
# stdout=f_trclog, stderr=subprocess.STDOUT)
|
||||
#
|
||||
#
|
||||
# # Wait! Trace session is initialized not instantly!
|
||||
# time.sleep(1)
|
||||
#
|
||||
#
|
||||
# sqltxt='''
|
||||
# set list on;
|
||||
# select result from sp_test;
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# runProgram('isql',[dsn],sqltxt)
|
||||
#
|
||||
#
|
||||
# # do NOT remove this otherwise trace log can contain only message about its start before being closed!
|
||||
# time.sleep(3)
|
||||
#
|
||||
#
|
||||
# # Getting ID of launched trace session and STOP it:
|
||||
# ###################################################
|
||||
#
|
||||
#
|
||||
# # Save active trace session info into file for further parsing it and obtain session_id back (for stop):
|
||||
# f_trclst=open( os.path.join(context['temp_directory'],'tmp_trace_4927.lst'), 'w')
|
||||
# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr",
|
||||
@ -228,7 +232,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# stdout=f_trclst, stderr=subprocess.STDOUT
|
||||
# )
|
||||
# flush_and_close( f_trclst )
|
||||
#
|
||||
#
|
||||
# trcssn=0
|
||||
# with open( f_trclst.name,'r') as f:
|
||||
# for line in f:
|
||||
@ -239,7 +243,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# trcssn=word
|
||||
# i=i+1
|
||||
# break
|
||||
#
|
||||
#
|
||||
# # Result: `trcssn` is ID of active trace session. Now we have to terminate it:
|
||||
# f_trclst=open(f_trclst.name,'a')
|
||||
# f_trclst.seek(0,2)
|
||||
@ -249,35 +253,66 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# stdout=f_trclst, stderr=subprocess.STDOUT
|
||||
# )
|
||||
# flush_and_close( f_trclst )
|
||||
#
|
||||
#
|
||||
# # Terminate child process of launched trace session (though it should already be killed):
|
||||
# p_trace.terminate()
|
||||
# flush_and_close( f_trclog )
|
||||
#
|
||||
#
|
||||
# with open( f_trclog.name,'r') as f:
|
||||
# for line in f:
|
||||
# print(line)
|
||||
#
|
||||
#
|
||||
# # do NOT remove this delay otherwise get access error 'Windows 32'
|
||||
# # (The process cannot access the file because it is being used by another process):
|
||||
# time.sleep(1)
|
||||
#
|
||||
#
|
||||
# # CLEANUP
|
||||
# #########
|
||||
# cleanup( (f_trccfg, f_trclst, f_trclog) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
HEADER_2100
|
||||
DETAIL_2100
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=2.5.5')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
def trace_session(act: Action, b: Barrier):
|
||||
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
|
||||
f'database=%[\\\\/]{act.db.db_path.name}',
|
||||
'{',
|
||||
' enabled = true',
|
||||
' time_threshold = 0',
|
||||
' log_initfini = false',
|
||||
' log_statement_finish = true',
|
||||
' print_perf = true',
|
||||
'}']
|
||||
with act.connect_server() as srv:
|
||||
srv.trace.start(config='\n'.join(cfg30))
|
||||
b.wait()
|
||||
for line in srv:
|
||||
print(line)
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
def test_1(act_1: Action, capsys):
|
||||
b = Barrier(2)
|
||||
trace_thread = Thread(target=trace_session, args=[act_1, b])
|
||||
trace_thread.start()
|
||||
b.wait()
|
||||
act_1.isql(switches=[], input='set list on; select result from sp_test;')
|
||||
time.sleep(2)
|
||||
with act_1.connect_server() as srv:
|
||||
for session in list(srv.trace.sessions.keys()):
|
||||
srv.trace.stop(session_id=session)
|
||||
trace_thread.join(1.0)
|
||||
if trace_thread.is_alive():
|
||||
pytest.fail('Trace thread still alive')
|
||||
# Check
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
||||
|
||||
|
@ -2,14 +2,15 @@
|
||||
#
|
||||
# id: bugs.core_4928
|
||||
# title: It is not possible to save the connection information in the ON CONNECT trigger, if the connection is created by the gbak
|
||||
# decription:
|
||||
# decription:
|
||||
# tracker_id: CORE-4928
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from pathlib import Path
|
||||
from firebird.qa import db_factory, python_act, Action, temp_file
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
@ -26,29 +27,29 @@ init_script_1 = """
|
||||
att_auth varchar(255),
|
||||
att_dts timestamp default 'now'
|
||||
);
|
||||
|
||||
|
||||
commit;
|
||||
|
||||
|
||||
set term ^;
|
||||
create or alter trigger trg_connect active on connect as
|
||||
begin
|
||||
in autonomous transaction do
|
||||
insert into att_log(att_id, att_name, att_user, att_addr, att_prot, att_auth)
|
||||
select
|
||||
mon$attachment_id
|
||||
,mon$attachment_name
|
||||
,mon$user
|
||||
,mon$remote_address
|
||||
,mon$remote_protocol
|
||||
,mon$auth_method
|
||||
mon$attachment_id,
|
||||
mon$attachment_name,
|
||||
mon$user,
|
||||
mon$remote_address,
|
||||
mon$remote_protocol,
|
||||
mon$auth_method
|
||||
from mon$attachments
|
||||
where mon$remote_protocol starting with upper('TCP') and mon$user = upper('SYSDBA')
|
||||
;
|
||||
end
|
||||
^
|
||||
set term ;^
|
||||
commit;
|
||||
"""
|
||||
commit;
|
||||
"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
@ -59,10 +60,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# fbk = os.path.join(context['temp_directory'],'tmp.core_4928.fbk')
|
||||
# runProgram('gbak',['-b','-user',user_name,'-password',user_password,dsn,fbk])
|
||||
# runProgram('gbak',['-rep','-user',user_name,'-password',user_password,fbk,dsn])
|
||||
#
|
||||
#
|
||||
# sql='''
|
||||
# set list on;
|
||||
# select
|
||||
# select
|
||||
# iif( att_id > 0, 1, 0) is_att_id_ok
|
||||
# ,iif( att_name containing 'core_4928.fdb', 1, 0) is_att_name_ok
|
||||
# ,iif( att_user = upper('SYSDBA'), 1, 0) is_att_user_ok
|
||||
@ -70,16 +71,17 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# ,iif( upper(att_prot) starting with upper('TCP'), 1, 0) is_att_prot_ok
|
||||
# ,iif( att_auth is not null, 1, 0) is_att_auth_ok
|
||||
# ,iif( att_dts is not null, 1, 0) is_att_dts_ok
|
||||
# from att_log
|
||||
# from att_log
|
||||
# where att_id <> current_connection;
|
||||
# '''
|
||||
# runProgram('isql',[dsn,'-user',user_name,'-pas',user_password],sql)
|
||||
#
|
||||
#
|
||||
# if os.path.isfile(fbk):
|
||||
# os.remove(fbk)
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
IS_ATT_ID_OK 1
|
||||
@ -89,11 +91,33 @@ expected_stdout_1 = """
|
||||
IS_ATT_PROT_OK 1
|
||||
IS_ATT_AUTH_OK 1
|
||||
IS_ATT_DTS_OK 1
|
||||
"""
|
||||
"""
|
||||
|
||||
fbk_file_1 = temp_file('tmp_core_4928.fbk')
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, fbk_file_1: Path):
|
||||
act_1.gbak(switches=['-b', act_1.db.dsn, str(fbk_file_1)])
|
||||
act_1.reset()
|
||||
# This was in original test, but it makes no sense as it overwites att_log content
|
||||
# from backup that does not contain any data on v4.0.0.2496
|
||||
# It's not important to test the issue anyway
|
||||
#act_1.gbak(switches=['-rep', str(fbk_file_1), act_1.db.dsn])
|
||||
#act_1.reset()
|
||||
# Check
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.script = """
|
||||
set list on;
|
||||
select
|
||||
iif(att_id > 0, 1, 0) is_att_id_ok,
|
||||
iif(att_name containing 'test.fdb', 1, 0) is_att_name_ok,
|
||||
iif(att_user = upper('SYSDBA'), 1, 0) is_att_user_ok,
|
||||
iif(att_addr is not null, 1, 0) is_att_addr_ok,
|
||||
iif(upper(att_prot) starting with upper('TCP'), 1, 0) is_att_prot_ok,
|
||||
iif(att_auth is not null, 1, 0) is_att_auth_ok,
|
||||
iif(att_dts is not null, 1, 0) is_att_dts_ok
|
||||
from att_log
|
||||
where att_id <> current_connection;
|
||||
"""
|
||||
act_1.execute()
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,31 +2,32 @@
|
||||
#
|
||||
# id: bugs.core_4933
|
||||
# title: Add better transaction control to isql
|
||||
# decription:
|
||||
# decription:
|
||||
# Test creates two .sql script and run them using ISQL utility.
|
||||
# In the 1st script we create view for check current transaction parameters.
|
||||
# View output following values for transaction:
|
||||
# TIL, lock resolution (wait/no_wait/lock_timeout), read_only/read_write and [no]auto_undo
|
||||
#
|
||||
#
|
||||
# Then we TURN ON keeping of Tx parameters (SET KEEP_TRAN ON) and do some manipulations in this
|
||||
# ('main') script, including invocation of auxiliary ('addi') script using IN <...> command.
|
||||
#
|
||||
# Second script creates another database and the same view in it, then does soma actions there
|
||||
#
|
||||
# Second script creates another database and the same view in it, then does some actions there
|
||||
# and also check output of this view.
|
||||
# After this (second) script finish, we return to 1st one and resume there final actions.
|
||||
#
|
||||
#
|
||||
# IN ALL STEPS WE HAVE TO SEE THE SAME PARAMS - NO MATTER HOW MUCH TIMES
|
||||
# WE DID COMMIT/ROLLBACK/RECONNECT AND EVEN WORK IN OTHER DB.
|
||||
#
|
||||
#
|
||||
# Checked on: 3.0.6.33249; 4.0.0.1777
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4933
|
||||
# min_versions: ['3.0.6']
|
||||
# versions: 3.0.6
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from pathlib import Path
|
||||
from firebird.qa import db_factory, python_act, Action, temp_file
|
||||
|
||||
# version: 3.0.6
|
||||
# resources: None
|
||||
@ -41,23 +42,23 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
#---
|
||||
# import sys
|
||||
# import os
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# tmp_addi_fdb = os.path.join(context['temp_directory'],'tmp_addi_4933.fdb')
|
||||
#
|
||||
#
|
||||
# if os.path.isfile(tmp_addi_fdb):
|
||||
# os.remove( tmp_addi_fdb )
|
||||
#
|
||||
#
|
||||
# #-------------------------------------------
|
||||
#
|
||||
#
|
||||
# sql_addi_script='''
|
||||
# create database 'localhost:%(tmp_addi_fdb)s' user %(user_name)s password '%(user_password)s';
|
||||
#
|
||||
#
|
||||
# recreate view v_check as
|
||||
# select
|
||||
# select
|
||||
# decode(t.mon$isolation_mode, 0,'consistency', 1,'snapshot', 2,'rc rec_vers', 3,'rc no_recv', 4,'rc read_cons', 'UNKNOWN') as tx_til_mon_trans
|
||||
# ,rdb$get_context('SYSTEM', 'ISOLATION_LEVEL') as tx_til_rdb_get_context
|
||||
# ,decode(t.mon$lock_timeout, -1, 'wait', 0, 'no_wait', 'timeout ' || t.mon$lock_timeout) as tx_lock_timeout_mon_trans
|
||||
@ -69,28 +70,28 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# from mon$transactions t
|
||||
# where t.mon$transaction_id = current_transaction;
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# select 'addi_script: create_new_db' as msg, v.* from v_check v;
|
||||
# rollback;
|
||||
#
|
||||
#
|
||||
# connect 'localhost:%(tmp_addi_fdb)s' user %(user_name)s password '%(user_password)s';
|
||||
# select 'addi_script: reconnect' as msg, v.* from v_check v;
|
||||
# rollback;
|
||||
#
|
||||
#
|
||||
# drop database;
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# f_addi_sql = open( os.path.join(context['temp_directory'],'tmp_core_4731_addi.sql'), 'w', buffering = 0)
|
||||
# f_addi_sql.write( sql_addi_script % dict(globals(), **locals()) )
|
||||
# f_addi_sql.close()
|
||||
# f_addi_sql_name = f_addi_sql.name
|
||||
# #-------------------------------------------
|
||||
#
|
||||
#
|
||||
# sql_main_script='''
|
||||
# set list on;
|
||||
# connect '%(dsn)s' user %(user_name)s password '%(user_password)s';
|
||||
# recreate view v_check as
|
||||
# select
|
||||
# select
|
||||
# decode(t.mon$isolation_mode, 0,'consistency', 1,'snapshot', 2,'rc rec_vers', 3,'rc no_recv', 4,'rc read_cons', 'UNKNOWN') as tx_til_mon_trans
|
||||
# ,rdb$get_context('SYSTEM', 'ISOLATION_LEVEL') as tx_til_rdb_get_context
|
||||
# ,decode(t.mon$lock_timeout, -1, 'wait', 0, 'no_wait', 'timeout ' || t.mon$lock_timeout) as tx_lock_timeout_mon_trans
|
||||
@ -102,64 +103,65 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# from mon$transactions t
|
||||
# where t.mon$transaction_id = current_transaction;
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# select 'main_script: initial' as msg, v.* from v_check v;
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# set keep_tran on;
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# set transaction read only read committed record_version lock timeout 5 no auto undo; -- only in 4.x: auto commit;
|
||||
#
|
||||
#
|
||||
# select 'main_script: started Tx' as msg, v.* from v_check v;
|
||||
#
|
||||
#
|
||||
# commit; -------------------------------------------------------------------------------------- [ 1 ]
|
||||
#
|
||||
#
|
||||
# select 'main_script: after_commit' as msg, v.* from v_check v;
|
||||
#
|
||||
#
|
||||
# rollback; ------------------------------------------------------------------------------------ [ 2 ]
|
||||
#
|
||||
#
|
||||
# select 'main_script: after_rollback' as msg, v.* from v_check v;
|
||||
#
|
||||
#
|
||||
# rollback;
|
||||
#
|
||||
#
|
||||
# connect '%(dsn)s' user %(user_name)s password '%(user_password)s'; --------------------------- [ 3 ]
|
||||
#
|
||||
#
|
||||
# select 'main_script: after_reconnect' as msg, v.* from v_check v;
|
||||
# rollback;
|
||||
#
|
||||
#
|
||||
# --###################
|
||||
# in %(f_addi_sql_name)s;
|
||||
# --###################
|
||||
#
|
||||
#
|
||||
# connect '%(dsn)s' user %(user_name)s password '%(user_password)s'; --------------------------- [ 5 ]
|
||||
#
|
||||
#
|
||||
# select 'main_script: resume' as msg, v.* from v_check v;
|
||||
# rollback;
|
||||
#
|
||||
#
|
||||
# set keep_tran off;
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# select 'keep_tran: turned_off' as msg, v.* from v_check v;
|
||||
# commit;
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# f_main_sql = open( os.path.join(context['temp_directory'],'tmp_core_4731_main.sql'), 'w', buffering = 0)
|
||||
# f_main_sql.write( sql_main_script % dict(globals(), **locals()) )
|
||||
# f_main_sql.close()
|
||||
#
|
||||
#
|
||||
# runProgram( 'isql',['-q', '-i', f_main_sql.name] )
|
||||
#
|
||||
#
|
||||
# os.remove( f_main_sql.name )
|
||||
# os.remove( f_addi_sql.name )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
MSG main_script: initial
|
||||
TX_TIL_MON_TRANS snapshot
|
||||
TX_TIL_MON_TRANS snapshot
|
||||
TX_TIL_RDB_GET_CONTEXT SNAPSHOT
|
||||
TX_LOCK_TIMEOUT_MON_TRANS wait
|
||||
TX_LOCK_TIMEOUT_RDB_GET_CONTEXT -1
|
||||
@ -169,88 +171,176 @@ expected_stdout_1 = """
|
||||
|
||||
|
||||
MSG main_script: started Tx
|
||||
TX_TIL_MON_TRANS rc rec_vers
|
||||
TX_TIL_MON_TRANS rc rec_vers
|
||||
TX_TIL_RDB_GET_CONTEXT READ COMMITTED
|
||||
TX_LOCK_TIMEOUT_MON_TRANS timeout 5
|
||||
TX_LOCK_TIMEOUT_RDB_GET_CONTEXT 5
|
||||
TX_READ_ONLY_MON_TRANS read_only
|
||||
TX_READ_ONLY_MON_TRANS read_only
|
||||
TX_READ_ONLY_RDB_GET_CONTEXT TRUE
|
||||
TX_AUTOUNDO_MON_TRANS 0
|
||||
|
||||
|
||||
MSG main_script: after_commit
|
||||
TX_TIL_MON_TRANS rc rec_vers
|
||||
TX_TIL_MON_TRANS rc rec_vers
|
||||
TX_TIL_RDB_GET_CONTEXT READ COMMITTED
|
||||
TX_LOCK_TIMEOUT_MON_TRANS timeout 5
|
||||
TX_LOCK_TIMEOUT_RDB_GET_CONTEXT 5
|
||||
TX_READ_ONLY_MON_TRANS read_only
|
||||
TX_READ_ONLY_MON_TRANS read_only
|
||||
TX_READ_ONLY_RDB_GET_CONTEXT TRUE
|
||||
TX_AUTOUNDO_MON_TRANS 0
|
||||
|
||||
|
||||
MSG main_script: after_rollback
|
||||
TX_TIL_MON_TRANS rc rec_vers
|
||||
TX_TIL_MON_TRANS rc rec_vers
|
||||
TX_TIL_RDB_GET_CONTEXT READ COMMITTED
|
||||
TX_LOCK_TIMEOUT_MON_TRANS timeout 5
|
||||
TX_LOCK_TIMEOUT_RDB_GET_CONTEXT 5
|
||||
TX_READ_ONLY_MON_TRANS read_only
|
||||
TX_READ_ONLY_MON_TRANS read_only
|
||||
TX_READ_ONLY_RDB_GET_CONTEXT TRUE
|
||||
TX_AUTOUNDO_MON_TRANS 0
|
||||
|
||||
|
||||
MSG main_script: after_reconnect
|
||||
TX_TIL_MON_TRANS rc rec_vers
|
||||
TX_TIL_MON_TRANS rc rec_vers
|
||||
TX_TIL_RDB_GET_CONTEXT READ COMMITTED
|
||||
TX_LOCK_TIMEOUT_MON_TRANS timeout 5
|
||||
TX_LOCK_TIMEOUT_RDB_GET_CONTEXT 5
|
||||
TX_READ_ONLY_MON_TRANS read_only
|
||||
TX_READ_ONLY_MON_TRANS read_only
|
||||
TX_READ_ONLY_RDB_GET_CONTEXT TRUE
|
||||
TX_AUTOUNDO_MON_TRANS 0
|
||||
|
||||
|
||||
MSG addi_script: create_new_db
|
||||
TX_TIL_MON_TRANS rc rec_vers
|
||||
TX_TIL_MON_TRANS rc rec_vers
|
||||
TX_TIL_RDB_GET_CONTEXT READ COMMITTED
|
||||
TX_LOCK_TIMEOUT_MON_TRANS timeout 5
|
||||
TX_LOCK_TIMEOUT_RDB_GET_CONTEXT 5
|
||||
TX_READ_ONLY_MON_TRANS read_only
|
||||
TX_READ_ONLY_MON_TRANS read_only
|
||||
TX_READ_ONLY_RDB_GET_CONTEXT TRUE
|
||||
TX_AUTOUNDO_MON_TRANS 0
|
||||
|
||||
|
||||
MSG addi_script: reconnect
|
||||
TX_TIL_MON_TRANS rc rec_vers
|
||||
TX_TIL_MON_TRANS rc rec_vers
|
||||
TX_TIL_RDB_GET_CONTEXT READ COMMITTED
|
||||
TX_LOCK_TIMEOUT_MON_TRANS timeout 5
|
||||
TX_LOCK_TIMEOUT_RDB_GET_CONTEXT 5
|
||||
TX_READ_ONLY_MON_TRANS read_only
|
||||
TX_READ_ONLY_MON_TRANS read_only
|
||||
TX_READ_ONLY_RDB_GET_CONTEXT TRUE
|
||||
TX_AUTOUNDO_MON_TRANS 0
|
||||
|
||||
|
||||
MSG main_script: resume
|
||||
TX_TIL_MON_TRANS rc rec_vers
|
||||
TX_TIL_MON_TRANS rc rec_vers
|
||||
TX_TIL_RDB_GET_CONTEXT READ COMMITTED
|
||||
TX_LOCK_TIMEOUT_MON_TRANS timeout 5
|
||||
TX_LOCK_TIMEOUT_RDB_GET_CONTEXT 5
|
||||
TX_READ_ONLY_MON_TRANS read_only
|
||||
TX_READ_ONLY_MON_TRANS read_only
|
||||
TX_READ_ONLY_RDB_GET_CONTEXT TRUE
|
||||
TX_AUTOUNDO_MON_TRANS 0
|
||||
|
||||
|
||||
MSG keep_tran: turned_off
|
||||
TX_TIL_MON_TRANS snapshot
|
||||
TX_TIL_MON_TRANS snapshot
|
||||
TX_TIL_RDB_GET_CONTEXT SNAPSHOT
|
||||
TX_LOCK_TIMEOUT_MON_TRANS wait
|
||||
TX_LOCK_TIMEOUT_RDB_GET_CONTEXT -1
|
||||
TX_READ_ONLY_MON_TRANS read_write
|
||||
TX_READ_ONLY_RDB_GET_CONTEXT FALSE
|
||||
TX_AUTOUNDO_MON_TRANS 1
|
||||
"""
|
||||
"""
|
||||
|
||||
|
||||
addi_script_1 = temp_file('addi_script.sql')
|
||||
main_script_1 = temp_file('main_script.sql')
|
||||
tmp_db_1 = temp_file('tmp_addi_4933.fdb')
|
||||
|
||||
@pytest.mark.version('>=3.0.6')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
def test_1(act_1: Action, addi_script_1: Path, main_script_1: Path):
|
||||
addi_script_1.write_text(f"""
|
||||
create database 'localhost:{tmp_db_1}' user {act_1.db.user} password '{act_1.db.password}';
|
||||
|
||||
recreate view v_check as
|
||||
select
|
||||
decode(t.mon$isolation_mode, 0,'consistency', 1,'snapshot', 2,'rc rec_vers', 3,'rc no_recv', 4,'rc read_cons', 'UNKNOWN') as tx_til_mon_trans,
|
||||
rdb$get_context('SYSTEM', 'ISOLATION_LEVEL') as tx_til_rdb_get_context,
|
||||
decode(t.mon$lock_timeout, -1, 'wait', 0, 'no_wait', 'timeout ' || t.mon$lock_timeout) as tx_lock_timeout_mon_trans,
|
||||
rdb$get_context('SYSTEM', 'LOCK_TIMEOUT') as tx_lock_timeout_rdb_get_context,
|
||||
iif(t.mon$read_only=1,'read_only','read_write') as tx_read_only_mon_trans,
|
||||
rdb$get_context('SYSTEM', 'READ_ONLY') as tx_read_only_rdb_get_context,
|
||||
t.mon$auto_undo as tx_autoundo_mon_trans
|
||||
-- only in FB 4.x+: ,t.mon$auto_commit as tx_autocommit_mon_trans
|
||||
from mon$transactions t
|
||||
where t.mon$transaction_id = current_transaction;
|
||||
commit;
|
||||
|
||||
select 'addi_script: create_new_db' as msg, v.* from v_check v;
|
||||
rollback;
|
||||
|
||||
connect 'localhost:{tmp_db_1}' user {act_1.db.user} password '{act_1.db.password}';
|
||||
select 'addi_script: reconnect' as msg, v.* from v_check v;
|
||||
rollback;
|
||||
|
||||
drop database;
|
||||
""")
|
||||
main_script_1.write_text(f"""
|
||||
set list on;
|
||||
connect '{act_1.db.dsn}' user {act_1.db.user} password '{act_1.db.password}';
|
||||
recreate view v_check as
|
||||
select
|
||||
decode(t.mon$isolation_mode, 0,'consistency', 1,'snapshot', 2,'rc rec_vers', 3,'rc no_recv', 4,'rc read_cons', 'UNKNOWN') as tx_til_mon_trans,
|
||||
rdb$get_context('SYSTEM', 'ISOLATION_LEVEL') as tx_til_rdb_get_context,
|
||||
decode(t.mon$lock_timeout, -1, 'wait', 0, 'no_wait', 'timeout ' || t.mon$lock_timeout) as tx_lock_timeout_mon_trans,
|
||||
rdb$get_context('SYSTEM', 'LOCK_TIMEOUT') as tx_lock_timeout_rdb_get_context,
|
||||
iif(t.mon$read_only=1,'read_only','read_write') as tx_read_only_mon_trans,
|
||||
rdb$get_context('SYSTEM', 'READ_ONLY') as tx_read_only_rdb_get_context,
|
||||
t.mon$auto_undo as tx_autoundo_mon_trans
|
||||
-- only 4.x: ,t.mon$auto_commit as tx_autocommit_mon_trans
|
||||
from mon$transactions t
|
||||
where t.mon$transaction_id = current_transaction;
|
||||
commit;
|
||||
|
||||
select 'main_script: initial' as msg, v.* from v_check v;
|
||||
commit;
|
||||
|
||||
set keep_tran on;
|
||||
commit;
|
||||
|
||||
set transaction read only read committed record_version lock timeout 5 no auto undo; -- only in 4.x: auto commit;
|
||||
|
||||
select 'main_script: started Tx' as msg, v.* from v_check v;
|
||||
|
||||
commit; -------------------------------------------------------------------------------------- [ 1 ]
|
||||
|
||||
select 'main_script: after_commit' as msg, v.* from v_check v;
|
||||
|
||||
rollback; ------------------------------------------------------------------------------------ [ 2 ]
|
||||
|
||||
select 'main_script: after_rollback' as msg, v.* from v_check v;
|
||||
|
||||
rollback;
|
||||
|
||||
connect '{act_1.db.dsn}' user {act_1.db.user} password '{act_1.db.password}'; --------------------------- [ 3 ]
|
||||
|
||||
select 'main_script: after_reconnect' as msg, v.* from v_check v;
|
||||
rollback;
|
||||
|
||||
--###################
|
||||
in {addi_script_1};
|
||||
--###################
|
||||
|
||||
connect '{act_1.db.dsn}' user {act_1.db.user} password '{act_1.db.password}'; --------------------------- [ 5 ]
|
||||
|
||||
select 'main_script: resume' as msg, v.* from v_check v;
|
||||
rollback;
|
||||
|
||||
set keep_tran off;
|
||||
commit;
|
||||
|
||||
select 'keep_tran: turned_off' as msg, v.* from v_check v;
|
||||
commit;
|
||||
""")
|
||||
# Check
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.isql(switches=['-q'], input_file=main_script_1)
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,9 +2,9 @@
|
||||
#
|
||||
# id: bugs.core_4964
|
||||
# title: Real errors during connect to security database are hidden by Srp user manager. Errors should be logged no matter what AuthServer is used.
|
||||
# decription:
|
||||
# decription:
|
||||
# Test obtains full path to $fb_home via FBSVCMGR info_get_env.
|
||||
# Then it makes copy of file 'databases.conf' that is in $fb_home directory because
|
||||
# Then it makes copy of file 'databases.conf' that is in $fb_home directory because
|
||||
# following lines will be added to that 'databases.conf':
|
||||
# ===
|
||||
# tmp_alias_4964 = ...
|
||||
@ -13,18 +13,18 @@
|
||||
# }
|
||||
# ===
|
||||
# NB: we intentionally put reference to file that for sure does exist but is INVALID for usage as fdb: 'firebird.msg'
|
||||
#
|
||||
#
|
||||
# Then we:
|
||||
# 1) obtain content of server firebird.log
|
||||
# 2) try to make connect to alias 'tmp_alias_4964' and (as expected) get error.
|
||||
# 3) wait a little and obtain again content of server firebird.log
|
||||
#
|
||||
#
|
||||
# Finally we restore original databases.conf and check that:
|
||||
# 1) Client error message contains phrase about need to check server firebird.log for details.
|
||||
# 2) Difference of firebird.log contains messages that engine could not attach to password database
|
||||
# because it is invalid (we specify 'firebird.msg' as security_db in databases.conf for test database,
|
||||
# 2) Difference of firebird.log contains messages that engine could not attach to password database
|
||||
# because it is invalid (we specify 'firebird.msg' as security_db in databases.conf for test database,
|
||||
# and of course this is not valid database)
|
||||
#
|
||||
#
|
||||
# Client always get message with gdscode = 335545106 and sqlcode=-902.
|
||||
# Error text in firebird.log depends on what plugin is used for authentification:
|
||||
# 1) Legacy:
|
||||
@ -34,16 +34,16 @@
|
||||
# file <...> is not a valid database
|
||||
# 2) Srp:
|
||||
# Authentication error
|
||||
# file C:\\FBSS\\FIREBIRD.MSG is not a valid database
|
||||
#
|
||||
# file C:\\FB\\SS\\FIREBIRD.MSG is not a valid database
|
||||
#
|
||||
# Checked for:
|
||||
# FB30SS, build 3.0.4.32972: OK, 3.360s.
|
||||
# FB40SS, build 4.0.0.977: OK, 3.485s.
|
||||
#
|
||||
#
|
||||
# Refactored 05.01.2020 (firebird.conf now contains Srp as first plugin in UserManager parameter):
|
||||
# 4.0.0.1714 SS: 2.922s; 4.0.0.1714 SC: 5.563s; 4.0.0.1714 CS: 9.172s.
|
||||
# 3.0.5.33221 SS: 2.015s; 3.0.5.33221 SC: 3.469s; 3.0.5.33221 CS: 6.173s.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4964
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
@ -55,7 +55,8 @@ from firebird.qa import db_factory, isql_act, Action
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('FILE.*FIREBIRD.MSG', 'FILE FIREBIRD.MSG'), ('CLIENT_MSG: 335545106L', 'CLIENT_MSG: 335545106')]
|
||||
substitutions_1 = [('FILE.*FIREBIRD.MSG', 'FILE FIREBIRD.MSG'),
|
||||
('CLIENT_MSG: 335545106L', 'CLIENT_MSG: 335545106')]
|
||||
|
||||
init_script_1 = """"""
|
||||
|
||||
@ -63,7 +64,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import subprocess
|
||||
# from subprocess import Popen
|
||||
@ -73,29 +74,29 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# import shutil
|
||||
# import re
|
||||
# from fdb import services
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close(file_handle):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb'):
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -106,16 +107,16 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def svc_get_fb_log( f_fb_log ):
|
||||
#
|
||||
#
|
||||
# global subprocess
|
||||
#
|
||||
#
|
||||
# subprocess.call( [ context['fbsvcmgr_path'],
|
||||
# "localhost:service_mgr",
|
||||
# "action_get_fb_log"
|
||||
@ -123,26 +124,26 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# stdout=f_fb_log, stderr=subprocess.STDOUT
|
||||
# )
|
||||
# return
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# svc = services.connect(host='localhost')
|
||||
# fb_home=svc.get_home_directory()
|
||||
# svc.close()
|
||||
#
|
||||
#
|
||||
# dts = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
|
||||
#
|
||||
#
|
||||
# dbconf = os.path.join(fb_home, 'databases.conf')
|
||||
# fbconf = os.path.join(fb_home, 'firebird.conf')
|
||||
#
|
||||
#
|
||||
# dbcbak = os.path.join(fb_home, 'databases_'+dts+'.bak')
|
||||
# fbcbak = os.path.join(fb_home, 'firebird_'+dts+'.bak')
|
||||
#
|
||||
#
|
||||
# shutil.copy2( dbconf, dbcbak )
|
||||
# shutil.copy2( fbconf, fbcbak )
|
||||
#
|
||||
#
|
||||
# tmp_fdb=os.path.join(context['temp_directory'],'tmp_4964.fdb')
|
||||
#
|
||||
#
|
||||
# f_dbconf=open(dbconf,'a')
|
||||
# f_dbconf.seek(0, 2)
|
||||
# f_dbconf.write("\\n\\n# Temporarily added by fbtest, CORE-4964. Should be removed auto:")
|
||||
@ -151,29 +152,29 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# f_dbconf.write("\\n{\\n SecurityDatabase = $(dir_conf)/firebird.msg\\n}")
|
||||
# f_dbconf.write("\\n#" + '='*60 )
|
||||
# f_dbconf.close()
|
||||
#
|
||||
#
|
||||
# f_fbconf=open(fbconf,'r')
|
||||
# fbconf_content=f_fbconf.readlines()
|
||||
# f_fbconf.close()
|
||||
# for i,s in enumerate( fbconf_content ):
|
||||
# if s.lower().lstrip().startswith( 'wirecrypt'.lower() ):
|
||||
# fbconf_content[i] = '# <temply commented> ' + s
|
||||
#
|
||||
#
|
||||
# fbconf_content.append('\\n# Temporarily added by fbtest, CORE-4964. Should be removed auto:')
|
||||
# fbconf_content.append("\\n#" + '='*30 )
|
||||
# fbconf_content.append('\\nWireCrypt = Disabled')
|
||||
# fbconf_content.append("\\n#" + '='*30 )
|
||||
#
|
||||
#
|
||||
# f_fbconf=open(fbconf,'w')
|
||||
# f_fbconf.writelines( fbconf_content )
|
||||
# flush_and_close( f_fbconf )
|
||||
#
|
||||
#
|
||||
# f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_4964_fblog_before.txt'), 'w')
|
||||
# svc_get_fb_log( f_fblog_before )
|
||||
# flush_and_close( f_fblog_before )
|
||||
#
|
||||
#
|
||||
# f_connect_log=open( os.path.join(context['temp_directory'],'tmp_connect_4964.log'), 'w')
|
||||
#
|
||||
#
|
||||
# try:
|
||||
# # Try to connect to 'firebird.msg' which is obviously not a database file:
|
||||
# ###################################
|
||||
@ -183,37 +184,37 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# except Exception,e:
|
||||
# for x in e:
|
||||
# f_connect_log.write( repr(x)+'\\n' )
|
||||
#
|
||||
#
|
||||
# flush_and_close( f_connect_log )
|
||||
#
|
||||
#
|
||||
# f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_4964_fblog_after.txt'), 'w')
|
||||
# svc_get_fb_log( f_fblog_after )
|
||||
# flush_and_close( f_fblog_after )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# # RESTORE original config:
|
||||
# ##########################
|
||||
# shutil.move( dbcbak, dbconf )
|
||||
# shutil.move( fbcbak, fbconf )
|
||||
#
|
||||
#
|
||||
# # Compare firebird.log versions BEFORE and AFTER this test:
|
||||
# ######################
|
||||
#
|
||||
#
|
||||
# oldfb=open(f_fblog_before.name, 'r')
|
||||
# newfb=open(f_fblog_after.name, 'r')
|
||||
#
|
||||
#
|
||||
# difftext = ''.join(difflib.unified_diff(
|
||||
# oldfb.readlines(),
|
||||
# oldfb.readlines(),
|
||||
# newfb.readlines()
|
||||
# ))
|
||||
# oldfb.close()
|
||||
# newfb.close()
|
||||
#
|
||||
#
|
||||
# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_4964_diff.txt'), 'w')
|
||||
# f_diff_txt.write(difftext)
|
||||
# flush_and_close( f_diff_txt )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# allowed_patterns = (
|
||||
# re.compile('cannot\\s+attach\\s+to\\s+password+\\s+database\\.*', re.IGNORECASE)
|
||||
# ,re.compile('error\\s+in\\s+isc_attach_database\\(\\)\\s+API\\.*', re.IGNORECASE)
|
||||
@ -222,30 +223,30 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# ,re.compile('335545106')
|
||||
# ,re.compile('-902')
|
||||
# )
|
||||
#
|
||||
#
|
||||
# with open( f_connect_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# match2some = filter( None, [ p.search(line) for p in allowed_patterns ] )
|
||||
# if match2some:
|
||||
# print( 'CLIENT_MSG: ' + line.upper() )
|
||||
#
|
||||
#
|
||||
# with open( f_diff_txt.name,'r') as f:
|
||||
# for line in f:
|
||||
# if line.startswith('+'):
|
||||
# match2some = filter( None, [ p.search(line) for p in allowed_patterns ] )
|
||||
# if match2some:
|
||||
# print( 'FIREBIRD.LOG: ' + (' '.join(line.split()).upper()) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# # Cleanup:
|
||||
# ##########
|
||||
# # do NOT remove this pause otherwise some of logs will not be enable for deletion and test will finish with
|
||||
# # do NOT remove this pause otherwise some of logs will not be enable for deletion and test will finish with
|
||||
# # Exception raised while executing Python test script. exception: WindowsError: 32
|
||||
# time.sleep(1)
|
||||
# cleanup( (f_connect_log, f_diff_txt, f_fblog_before, f_fblog_after) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
@ -255,11 +256,9 @@ expected_stdout_1 = """
|
||||
CLIENT_MSG: 335545106L
|
||||
FIREBIRD.LOG: + AUTHENTICATION ERROR
|
||||
FIREBIRD.LOG: + FILE FIREBIRD.MSG IS NOT A VALID DATABASE
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
pytest.skip("Test requires manipulation with databases.conf")
|
||||
#pytest.fail("Test not IMPLEMENTED")
|
||||
|
@ -2,17 +2,17 @@
|
||||
#
|
||||
# id: bugs.core_4977
|
||||
# title: Detach using Linux client takes much longer than from Windows
|
||||
# decription:
|
||||
# decription:
|
||||
# # *** NOTE ***
|
||||
# # We measure APPROXIMATE time that is required for detaching from database by evaluating number of seconds that passed
|
||||
# # from UNIX standard epoch time inside ISQL and writing it to log. After returning control from ISQL we evaluate again
|
||||
# # that number by calling Python 'time.time()' - and it will return value upto current UTC time, i.e. it WILL take in
|
||||
# # account local timezone from OS settings (this is so at least on Windows). Thus we have to add/substract time shift
|
||||
# # between UTC and local time - this is done by 'time.timezone' summand.
|
||||
# # On PC-host with CPU 3.0 GHz and 2Gb RAM) in almost all cases difference was less than 1000 ms, so it was decided
|
||||
# # between UTC and local time - this is done by 'time.timezone' command.
|
||||
# # On PC-host with CPU 3.0 GHz and 2Gb RAM) in almost all cases difference was less than 1000 ms, so it was decided
|
||||
# # to set MAX_DETACH_TIME_THRESHOLD = 1200 ms.
|
||||
# # Tested on WI-V3.0.0.32140 (SS/SC/CC).
|
||||
#
|
||||
#
|
||||
# Results for 22.05.2017:
|
||||
# fb30Cs, build 3.0.3.32725: OK, 1.796ss.
|
||||
# fb30SC, build 3.0.3.32725: OK, 1.047ss.
|
||||
@ -20,18 +20,19 @@
|
||||
# FB40CS, build 4.0.0.645: OK, 2.032ss.
|
||||
# FB40SC, build 4.0.0.645: OK, 1.188ss.
|
||||
# FB40SS, build 4.0.0.645: OK, 1.157ss.
|
||||
#
|
||||
#
|
||||
# 13.04.2021. Adapted for run both on Windows and Linux. Checked on:
|
||||
# Windows: 3.0.8.33445, 4.0.0.2416
|
||||
# Linux: 3.0.8.33426, 4.0.0.2416
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4977
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
import time
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
@ -44,37 +45,37 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import subprocess
|
||||
# import time
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# ############################################
|
||||
# ### d e f i n e t h r e s h o l d ###
|
||||
# ############################################
|
||||
# MAX_DETACH_TIME_THRESHOLD=1200
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -86,34 +87,34 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# print('type(f_names_list[i])=',type(f_names_list[i]))
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# sqltxt='''
|
||||
# set list on;
|
||||
# select datediff(second from timestamp '01.01.1970 00:00:00.000' to current_timestamp) as " "
|
||||
# set list on;
|
||||
# select datediff(second from timestamp '01.01.1970 00:00:00.000' to current_timestamp) as " "
|
||||
# from rdb$types rows 1;
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# f_isql_cmd=open( os.path.join(context['temp_directory'],'tmp_4977.sql'), 'w')
|
||||
# f_isql_cmd.write(sqltxt)
|
||||
# flush_and_close( f_isql_cmd )
|
||||
#
|
||||
#
|
||||
# ms_before_detach=0
|
||||
#
|
||||
#
|
||||
# f_isql_log = open( os.path.join(context['temp_directory'],'tmp_4977.log'), 'w')
|
||||
# f_isql_err = open( os.path.join(context['temp_directory'],'tmp_4977.err'), 'w')
|
||||
#
|
||||
#
|
||||
# subprocess.call( [context['isql_path'], dsn, "-i", f_isql_cmd.name ],
|
||||
# stdout = f_isql_log,
|
||||
# stderr = f_isql_err
|
||||
# )
|
||||
# flush_and_close( f_isql_log )
|
||||
# flush_and_close( f_isql_err )
|
||||
#
|
||||
#
|
||||
# with open( f_isql_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# # ::: NB ::: do NOT remove "and line.split()[0].isdigit()" if decide to replace subprocess.call()
|
||||
@ -121,28 +122,38 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# # String like: 'Database ....' does appear first in log instead of result!
|
||||
# if line.split() and line.split()[0].isdigit():
|
||||
# ms_before_detach=int( line.split()[0] )
|
||||
#
|
||||
#
|
||||
# detach_during_ms = int( (time.time() - ms_before_detach - time.timezone) * 1000 )
|
||||
#
|
||||
#
|
||||
# if detach_during_ms < MAX_DETACH_TIME_THRESHOLD:
|
||||
# print('Detach performed fast enough: less than threshold.')
|
||||
# else:
|
||||
# print('Detach lasted too long time: %s ms, MAX_DETACH_TIME_THRESHOLD is %s ms' % (detach_during_ms, MAX_DETACH_TIME_THRESHOLD) )
|
||||
#
|
||||
#
|
||||
# # cleanup:
|
||||
# time.sleep(1)
|
||||
# cleanup((f_isql_log, f_isql_err, f_isql_cmd))
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Detach performed fast enough: less than threshold.
|
||||
"""
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action):
|
||||
MAX_DETACH_TIME_THRESHOLD=1200
|
||||
act_1.script = """
|
||||
set list on;
|
||||
select datediff(second from timestamp '01.01.1970 00:00:00.000' to current_timestamp) as " "
|
||||
from rdb$types rows 1;
|
||||
"""
|
||||
act_1.execute()
|
||||
ms_before_detach = 0
|
||||
for line in act_1.stdout.splitlines():
|
||||
# ::: NB ::: do NOT remove "and line.split()[0].isdigit()" if decide to replace subprocess.call()
|
||||
# with pipe-way like: runProgram('isql',[dsn,'-q','-o',sqllog.name], sqltxt) !!
|
||||
# String like: 'Database ....' does appear first in log instead of result!
|
||||
splitted = line.split()
|
||||
if splitted and splitted[0].isdigit():
|
||||
ms_before_detach = int(splitted[0])
|
||||
detach_during_ms = int((time.time() - ms_before_detach - time.timezone) * 1000)
|
||||
assert detach_during_ms < MAX_DETACH_TIME_THRESHOLD
|
||||
|
@ -2,36 +2,36 @@
|
||||
#
|
||||
# id: bugs.core_4998
|
||||
# title: Both client and server could not close connection after failed authentification
|
||||
# decription:
|
||||
# decription:
|
||||
# Reproduced on 3.0.0.32136 RC1 with firebird.conf:
|
||||
# AuthServer = Legacy_Auth,Srp
|
||||
# AuthClient = Srp,Legacy_Auth
|
||||
# ::: NB-1 :::
|
||||
# ::: NB-1 :::
|
||||
# In order to get this environment for client test temp-ly CHANGES firebird.conf
|
||||
# Test will restore original firebird.conf in the end.
|
||||
#
|
||||
# ::: NB-2 :::
|
||||
#
|
||||
# ::: NB-2 :::
|
||||
# We have to prepare auxiliary Python script to be executed in SEPARATE (NEW!) execution context,
|
||||
# otherwise firebird.log is filled with messages "errno = 10054" only after this test completely finished.
|
||||
# See variable 'f_python_separate_exec_context' - it points to this temp .py file.
|
||||
# This aux Python script is called like this:
|
||||
# os.system( f_python_separate_exec_context )
|
||||
#
|
||||
# It contains three attempts to make connection with invalid passwords.
|
||||
#
|
||||
# It contains three attempts to make connection with invalid passwords.
|
||||
# Exceptions ('Your user/password not defined...') are suppressed, we need only make these attempts to check
|
||||
# that no new records withh be added to firebird.log (as it is confirmed to be in 3.0.0.32136 RC1).
|
||||
#
|
||||
#
|
||||
# File firebird.log is compared BEFORE and AFTER os.system( f_python_separate_exec_context ).
|
||||
# No new messages related to 10054 error should occur during this test in firebird.log.
|
||||
#
|
||||
#
|
||||
# 3.0.0.32366 RC2 - works OK.
|
||||
#
|
||||
#
|
||||
# Also checked on:
|
||||
# 30Cs, build 3.0.4.32972: OK, 6.172s.
|
||||
# 30SS, build 3.0.4.32972: OK, 4.375s.
|
||||
# 40CS, build 4.0.0.955: OK, 7.281s.
|
||||
# 40SS, build 4.0.0.977: OK, 4.704s.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-4998
|
||||
# min_versions: ['3.0.0']
|
||||
# versions: 3.0
|
||||
@ -60,35 +60,35 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# import re
|
||||
# import shutil
|
||||
# from fdb import services
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# THIS_DSN = dsn
|
||||
# DBAUSR = user_name
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# svc = services.connect(host='localhost')
|
||||
# fb_home=svc.get_home_directory()
|
||||
# svc.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close(file_handle):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb'):
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -99,16 +99,16 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def svc_get_fb_log( f_fb_log ):
|
||||
#
|
||||
#
|
||||
# global subprocess
|
||||
#
|
||||
#
|
||||
# subprocess.call( [ context['fbsvcmgr_path'],
|
||||
# "localhost:service_mgr",
|
||||
# "action_get_fb_log"
|
||||
@ -116,43 +116,43 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# stdout=f_fb_log, stderr=subprocess.STDOUT
|
||||
# )
|
||||
# return
|
||||
#
|
||||
#
|
||||
# ###########################################################################################
|
||||
#
|
||||
#
|
||||
# dts = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
|
||||
#
|
||||
#
|
||||
# fbconf = os.path.join( fb_home, 'firebird.conf')
|
||||
# fbcbak = os.path.join( fb_home, 'firebird_'+dts+'.bak')
|
||||
#
|
||||
#
|
||||
# shutil.copy2( fbconf, fbcbak )
|
||||
#
|
||||
#
|
||||
# f_fbconf = open(fbconf,'r')
|
||||
# fbconf_content=f_fbconf.readlines()
|
||||
# f_fbconf.close()
|
||||
#
|
||||
#
|
||||
# for i,s in enumerate( fbconf_content ):
|
||||
# if s.lower().lstrip().startswith( 'wirecrypt'.lower() ):
|
||||
# fbconf_content[i] = '# <temply commented> ' + s
|
||||
# if s.lower().lstrip().startswith( 'AuthClient'.lower() ):
|
||||
# fbconf_content[i] = '# <temply commented> ' + s
|
||||
#
|
||||
#
|
||||
# fbconf_content.append('\\n# Temporarily added by fbtest, CORE-4998. Should be removed auto:')
|
||||
# fbconf_content.append("\\n#" + '='*30 )
|
||||
# fbconf_content.append('\\nAuthClient = Srp,Legacy_Auth')
|
||||
# fbconf_content.append("\\n#" + '='*30 )
|
||||
#
|
||||
#
|
||||
# f_fbconf=open(fbconf,'w')
|
||||
# f_fbconf.writelines( fbconf_content )
|
||||
# flush_and_close( f_fbconf )
|
||||
#
|
||||
#
|
||||
# ###########################################################################################
|
||||
#
|
||||
#
|
||||
# f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_4998_fblog_before.txt'), 'w')
|
||||
# svc_get_fb_log( f_fblog_before )
|
||||
# flush_and_close( f_fblog_before )
|
||||
#
|
||||
#
|
||||
# other_exec_context_python_text = '''import fdb
|
||||
#
|
||||
#
|
||||
# for i in range(0,3):
|
||||
# con1 = None
|
||||
# try:
|
||||
@ -164,76 +164,76 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# con1.close()
|
||||
# exit(0)
|
||||
# ''' % locals()
|
||||
#
|
||||
#
|
||||
# f_python_separate_exec_context = os.path.join(context['temp_directory'], 'tmp_core_4998_try_connect_with_invalid_passwords.py')
|
||||
#
|
||||
#
|
||||
# f = open( f_python_separate_exec_context, 'w')
|
||||
# f.write( other_exec_context_python_text )
|
||||
# flush_and_close( f )
|
||||
#
|
||||
#
|
||||
# ########################################################################################################
|
||||
# ### l a u n c h P y t h o n i n a n o t h e r e x e c u t i o n c o n t e x t ###
|
||||
# ########################################################################################################
|
||||
#
|
||||
#
|
||||
# # 17.06.2018. We have to add full path and name of interpretep (e.g. 'C:\\Python27\\python.exe')
|
||||
# # because it can appear that OS will not be able to recognize how to handle .py files!
|
||||
# # sys.executable - returns full path to Python exe,
|
||||
#
|
||||
# # sys.executable - returns full path to Python exe,
|
||||
#
|
||||
# os.system( sys.executable + ' ' + f_python_separate_exec_context )
|
||||
#
|
||||
#
|
||||
# time.sleep(1)
|
||||
#
|
||||
#
|
||||
# f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_4998_fblog_after.txt'), 'w')
|
||||
# svc_get_fb_log( f_fblog_after )
|
||||
# flush_and_close( f_fblog_after )
|
||||
#
|
||||
#
|
||||
# # RESTORE original config:
|
||||
# ##########################
|
||||
# shutil.move( fbcbak, fbconf)
|
||||
#
|
||||
#
|
||||
# # Compare firebird.log versions BEFORE and AFTER this test:
|
||||
# ######################
|
||||
#
|
||||
#
|
||||
# oldfb=open(f_fblog_before.name, 'r')
|
||||
# newfb=open(f_fblog_after.name, 'r')
|
||||
#
|
||||
#
|
||||
# difftext = ''.join(difflib.unified_diff(
|
||||
# oldfb.readlines(),
|
||||
# oldfb.readlines(),
|
||||
# newfb.readlines()
|
||||
# ))
|
||||
# oldfb.close()
|
||||
# newfb.close()
|
||||
#
|
||||
#
|
||||
# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_4998_diff.txt'), 'w')
|
||||
# f_diff_txt.write(difftext)
|
||||
# flush_and_close( f_diff_txt )
|
||||
#
|
||||
#
|
||||
# # INET/inet_error: read errno = 10054
|
||||
#
|
||||
#
|
||||
# allowed_patterns = (
|
||||
# re.compile('\\.*inet_error\\:{0,1}\\s{0,}read\\s+errno\\s{0,}\\={0,}\\s{0,}10054\\.*', re.IGNORECASE),
|
||||
# )
|
||||
#
|
||||
#
|
||||
# with open( f_diff_txt.name,'r') as f:
|
||||
# for line in f:
|
||||
# if line.startswith('+'):
|
||||
# match2some = filter( None, [ p.search(line) for p in allowed_patterns ] )
|
||||
# if match2some:
|
||||
# print( 'UNEXPECTED TEXT IN FIREBIRD.LOG: ' + (' '.join(line.split()).upper()) )
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
# # Cleanup:
|
||||
# time.sleep(1)
|
||||
# cleanup( (f_diff_txt,f_fblog_before,f_fblog_after, f_python_separate_exec_context) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
pytest.skip("Test requires manipulation with firebird.conf")
|
||||
#pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
|
@ -2,15 +2,15 @@
|
||||
#
|
||||
# id: bugs.core_5028
|
||||
# title: Report the remote port number in MON$ATTACHMENTS
|
||||
# decription:
|
||||
#
|
||||
# decription:
|
||||
#
|
||||
# tracker_id: CORE-5028
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
@ -30,29 +30,43 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# select iif(port > 0, 'OK', 'BAD') as port_value
|
||||
# from (
|
||||
# select cast(substring(mon$remote_address from 1 + position('/' in mon$remote_address)) as int) as port
|
||||
# from mon$attachments
|
||||
# from mon$attachments
|
||||
# where mon$attachment_id = current_connection
|
||||
# )
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# # On previous FB versions <sqlcmd> will raise exception:
|
||||
# # Statement failed, SQLSTATE = 22018
|
||||
# # conversion error from string "192.168.43.154"
|
||||
#
|
||||
#
|
||||
# cur.execute(sqlcmd)
|
||||
# for r in cur:
|
||||
# print(r[0])
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
OK
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
def test_1(act_1: Action, capsys):
|
||||
with act_1.db.connect() as con:
|
||||
c = con.cursor()
|
||||
cmd = """
|
||||
select iif(port > 0, 'OK', 'BAD') as port_value
|
||||
from (
|
||||
select cast(substring(mon$remote_address from 1 + position('/' in mon$remote_address)) as int) as port
|
||||
from mon$attachments
|
||||
where mon$attachment_id = current_connection)
|
||||
"""
|
||||
for row in c.execute(cmd):
|
||||
print(row[0])
|
||||
# Check
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
||||
|
||||
|
@ -2,9 +2,9 @@
|
||||
#
|
||||
# id: bugs.core_5034
|
||||
# title: At least 5 seconds delay on disconnect could happen if disconnect happens close after Event Manager initialization
|
||||
# decription:
|
||||
# decription:
|
||||
# This test uses Python multiprocessing package in order to spawn multiple processes with trivial job: attach/detach from DB.
|
||||
# Number processes ('planned_attachments') and of iterations for each of them ('reconnect_count') depends on FB architecture
|
||||
# Number processes ('planned_attachments') and of iterations for each of them ('reconnect_count') depends on FB architecture
|
||||
# which is obtained by call of SP sys_get_fb_arch.
|
||||
# For ClassicServer weird effect was detected: when number of processes is more then ~34-35 some of Python processes may not
|
||||
# finish (but CS processes are gone and thus database has no attachments).
|
||||
@ -26,29 +26,32 @@
|
||||
# In order to properly detect records that correspond to post-apply scripts (with INSERT statements) special integer field 'SEQ'
|
||||
# is used: it will have even values for event when we are 'just before detach' (i.e. "inside" database connection) and odd values
|
||||
# when we are 'just after' (i.e. when connection has been just closed). The view 'v_top10_slow' is used to display connects which
|
||||
# were closed too slow.
|
||||
# Normally, this view should return ONE record with text 'Detaches were fast'.
|
||||
# were closed too slow.
|
||||
# Normally, this view should return ONE record with text 'Detaches were fast'.
|
||||
# Otherwise it will return concrete values of time that was spent on detach process, in milliseconds.
|
||||
# #######################
|
||||
# ### A C H T U N G ###
|
||||
# #######################
|
||||
# Following parameters: 'planned_attachments' and 'reconnect_count' affects on test result **VERY** strong, especially on Classic.
|
||||
# You may need to change them if test results are unstable. Do NOT make value of 'planned_attachments' more than 33-34 on Classic!
|
||||
#
|
||||
#
|
||||
# Successfully checked on build 32276, SS/SC/CS.
|
||||
# Confirmed error on WI-V3.0.0.32134, Cs: "MonitoringData: Cannot initialize the shared memory region / sh_mem_length_mapped is 0"
|
||||
#
|
||||
#
|
||||
# 23.11.2016
|
||||
# Increased threshold to 6000 ms instead of old 5000 ms -- see
|
||||
# Increased threshold to 6000 ms instead of old 5000 ms -- see
|
||||
# http://web.firebirdsql.org/download/prerelease/results/archive/3.0.2.32630/bugs.core_5034.html
|
||||
#
|
||||
#
|
||||
# [pcisar] 26.11.2021
|
||||
# New implementation should be done using multiprocessing Python module
|
||||
# Postponed for later due to complexity
|
||||
# tracker_id: CORE-5034
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid:
|
||||
# qmid:
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
@ -83,7 +86,7 @@ init_script_1 = """
|
||||
declare v_fetches_beg bigint;
|
||||
declare v_fetches_end bigint;
|
||||
begin
|
||||
|
||||
|
||||
-- Aux SP for detect FB architecture.
|
||||
|
||||
select a.mon$server_pid, a.mon$remote_protocol
|
||||
@ -94,11 +97,11 @@ init_script_1 = """
|
||||
if ( att_protocol is null ) then
|
||||
fb_arch = 'Embedded';
|
||||
else if ( upper(current_user) = upper('SYSDBA')
|
||||
and rdb$get_context('SYSTEM','ENGINE_VERSION') NOT starting with '2.5'
|
||||
and exists(select * from mon$attachments a
|
||||
and rdb$get_context('SYSTEM','ENGINE_VERSION') NOT starting with '2.5'
|
||||
and exists(select * from mon$attachments a
|
||||
where a.mon$remote_protocol is null
|
||||
and upper(a.mon$user) in ( upper('Cache Writer'), upper('Garbage Collector'))
|
||||
)
|
||||
)
|
||||
) then
|
||||
fb_arch = 'SuperServer';
|
||||
else
|
||||
@ -112,7 +115,7 @@ init_script_1 = """
|
||||
from mon$io_stats i
|
||||
where i.mon$stat_group = 0 -- db_level
|
||||
into v_fetches_beg;
|
||||
|
||||
|
||||
execute statement v_test_sttm
|
||||
on external
|
||||
'localhost:' || rdb$get_context('SYSTEM', 'DB_NAME')
|
||||
@ -121,19 +124,19 @@ init_script_1 = """
|
||||
password a_connect_with_pwd
|
||||
role left('R' || replace(uuid_to_char(gen_uuid()),'-',''),31)
|
||||
into ext_server_pid;
|
||||
|
||||
|
||||
in autonomous transaction do
|
||||
select i.mon$page_fetches
|
||||
from mon$io_stats i
|
||||
where i.mon$stat_group = 0 -- db_level
|
||||
into v_fetches_end;
|
||||
|
||||
fb_arch = iif( cur_server_pid is distinct from ext_server_pid,
|
||||
'Classic',
|
||||
iif( v_fetches_beg is not distinct from v_fetches_end,
|
||||
'SuperClassic',
|
||||
|
||||
fb_arch = iif( cur_server_pid is distinct from ext_server_pid,
|
||||
'Classic',
|
||||
iif( v_fetches_beg is not distinct from v_fetches_end,
|
||||
'SuperClassic',
|
||||
'SuperServer'
|
||||
)
|
||||
)
|
||||
);
|
||||
end
|
||||
|
||||
@ -141,7 +144,7 @@ init_script_1 = """
|
||||
|
||||
suspend;
|
||||
|
||||
end
|
||||
end
|
||||
|
||||
^ -- sys_get_fb_arch
|
||||
set term ;^
|
||||
@ -168,9 +171,9 @@ init_script_1 = """
|
||||
while ( i < n) do
|
||||
begin
|
||||
v_stt = 'drop role R_' || lpad(i, 3, '0');
|
||||
begin
|
||||
begin
|
||||
execute statement v_stt;
|
||||
when any do begin end
|
||||
when any do begin end
|
||||
end
|
||||
|
||||
v_stt = 'create role R_' || lpad(i, 3, '0');
|
||||
@ -187,12 +190,12 @@ init_script_1 = """
|
||||
|
||||
create or alter trigger trg_disc active on disconnect position 0 as
|
||||
begin
|
||||
|
||||
|
||||
POST_EVENT 'FOO';
|
||||
|
||||
if ( current_user = 'TMP$C5034' and rdb$get_context('USER_SESSION','INITIAL_DDL') is null )
|
||||
if ( current_user = 'TMP$C5034' and rdb$get_context('USER_SESSION','INITIAL_DDL') is null )
|
||||
then
|
||||
in autonomous transaction do
|
||||
in autonomous transaction do
|
||||
insert into log4detach default values;
|
||||
end
|
||||
^
|
||||
@ -200,15 +203,15 @@ init_script_1 = """
|
||||
commit;
|
||||
|
||||
create or alter view v_top10_slow as
|
||||
select distinct msg
|
||||
select distinct msg
|
||||
from
|
||||
(
|
||||
select iif( detach_ms > max_detach_ms,
|
||||
'Slow detaches > '|| max_detach_ms ||' ms detected: ' || detach_ms || ' ms, from ' || dts_before_detach || ' to '||dts_after_detach,
|
||||
'All detaches not exceeded threshold'
|
||||
select iif( detach_ms > max_detach_ms,
|
||||
'Slow detaches > '|| max_detach_ms ||' ms detected: ' || detach_ms || ' ms, from ' || dts_before_detach || ' to '||dts_after_detach,
|
||||
'All detaches not exceeded threshold'
|
||||
) as msg
|
||||
from (
|
||||
select
|
||||
select
|
||||
rno
|
||||
,datediff(millisecond from min(t0) to min(t1)) as detach_ms
|
||||
,min(t0) as dts_before_detach
|
||||
@ -240,40 +243,38 @@ init_script_1 = """
|
||||
rows 10
|
||||
);
|
||||
commit;
|
||||
"""
|
||||
"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import fdb
|
||||
# import time
|
||||
# import subprocess
|
||||
# from multiprocessing import Process
|
||||
# from fdb import services
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# # Get FB architecture:
|
||||
# xcur=db_conn.cursor()
|
||||
# xcur.execute("select fb_arch from sys_get_fb_arch;")
|
||||
#
|
||||
#
|
||||
# for r in xcur:
|
||||
# fb_arch = r[0].split()[0]
|
||||
#
|
||||
#
|
||||
# dbfile=db_conn.database_name
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# ISQL_BINARY = context['isql_path']
|
||||
# svc = services.connect(host='localhost')
|
||||
# FB_HOME = os.path.normpath( svc.get_home_directory() ) # 'c:
|
||||
# irebird' --> 'c:
|
||||
# irebird' (i.e. remove trailing backslash if needed)
|
||||
# FB_HOME = os.path.normpath( svc.get_home_directory() ) # 'c:\firebird\' --> 'c:\firebird' (i.e. remove trailing backslash if needed)
|
||||
# svc.close()
|
||||
#
|
||||
#
|
||||
# if os.name == 'nt':
|
||||
# # For Windows we assume that client library is always in FB_HOME dir:
|
||||
# FB_CLNT=os.path.join(FB_HOME, 'fbclient.dll')
|
||||
@ -281,26 +282,26 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# # For Linux client library will be searched in 'lib' subdirectory of FB_HOME:
|
||||
# # con=fdb.connect( dsn='localhost:employee', user='SYSDBA', password='masterkey', fb_library_name='/var/tmp/fb40tmp/lib/libfbclient.so')
|
||||
# FB_CLNT=os.path.join(FB_HOME, 'lib', 'libfbclient.so' )
|
||||
#
|
||||
#
|
||||
# FBT_TEMP_DIR = os.path.normpath(context['temp_directory'])
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close(file_handle):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb'):
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -311,12 +312,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# if fb_arch=='Classic':
|
||||
# # {20,20}=72"; {32,10}=52" - but can hang!; {30,8)=46"; {30,15}=93"; {33,10}=91"; {35,5}=no-return-from-python!
|
||||
# planned_attachments=30
|
||||
@ -329,16 +330,16 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# # SS: {30,10}=35"; {50,20}=54"; {40,30}=57"; {75,30}=70"; {80,10}=42"
|
||||
# planned_attachments=80
|
||||
# reconnect_count=10
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# subprocess.check_output([ context['fbsvcmgr_path'], "localhost:service_mgr",
|
||||
# "action_properties",
|
||||
# "prp_write_mode", "prp_wm_async",
|
||||
# "dbname", dbfile ], stderr=subprocess.STDOUT)
|
||||
#
|
||||
#
|
||||
# # GENERATE CODE FOR EXECUTING IN SEPARATE EXECUTABLE PYTHON CONTEXT
|
||||
# ###################################################################
|
||||
#
|
||||
#
|
||||
# f_parallel_txt='''import os
|
||||
# import fdb
|
||||
# import time
|
||||
@ -347,120 +348,121 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# import subprocess
|
||||
# from subprocess import Popen
|
||||
# from multiprocessing import Process
|
||||
#
|
||||
#
|
||||
# def attach_detach(a_dsn, a_fb_client, v_temp_dir, reconnect_count, process_seq):
|
||||
# f_detach_info_sql = open( os.path.join(v_temp_dir, 'tmp_5034_after_detach_' + str(process_seq).zfill(3) + '.log'), 'w')
|
||||
# v_role = 'R_'+str(process_seq).zfill(3)
|
||||
# v_sqllst = []
|
||||
#
|
||||
#
|
||||
# for i in range(0,reconnect_count):
|
||||
# v_seq = 100000 + (1+process_seq) * 1000 + 2*i
|
||||
#
|
||||
#
|
||||
# att = fdb.connect( dsn = a_dsn, fb_library_name = a_fb_client, user='TMP$C5034', password='123',role = v_role )
|
||||
#
|
||||
#
|
||||
# # Trigger 'trg_disc' will add row to LOG4DETACH table.
|
||||
# # Column RNO will have value = 'R_nnnn' (for worker #0 --> 'R_000', #1 --> 'R_001' etc),
|
||||
# # i.e. it will be NOT null for the timestamp when we are DISCONNECTED to database:
|
||||
# att.close()
|
||||
#
|
||||
#
|
||||
# v_seq = v_seq + 1
|
||||
#
|
||||
#
|
||||
# # Catch current timestamp (we just retuirned from DB connect) and store it in the list:
|
||||
# v_sqllst.append( "insert into log4detach(dts, rno, seq) values( '%%s', '%%s', %%s ); " %% ( datetime.strftime(datetime.now(), '%%Y-%%m-%%d %%H:%%M:%%S.%%f')[:23], v_role, v_seq ) )
|
||||
#
|
||||
#
|
||||
# # Current worker COMPLETED <reconnect_count> iterations of connect/disconnect,
|
||||
# # now we can save timestamps that was stores in the list just after each detach
|
||||
# # to the text file for further executing it by ISQL:
|
||||
# f_detach_info_sql.write("\\\\n".join(v_sqllst))
|
||||
# f_detach_info_sql.write( '\\\\n' )
|
||||
#
|
||||
#
|
||||
# merge_sql="merge into log4detach t using ( select id, %%s + 2 * (row_number()over(order by id)-1) seq from log4detach d where rno='%%s' and seq is null ) s on (s.id = t.id) when matched then update set t.seq = s.seq;" %% ( 100000 + (1+process_seq) * 1000, v_role )
|
||||
#
|
||||
#
|
||||
# f_detach_info_sql.write( ' '.join(merge_sql.split()).lstrip() )
|
||||
# f_detach_info_sql.write( '\\\\n' )
|
||||
# f_detach_info_sql.write( 'commit;' )
|
||||
# f_detach_info_sql.close()
|
||||
#
|
||||
#
|
||||
# planned_attachments = %(planned_attachments)s
|
||||
#
|
||||
#
|
||||
# if __name__ == '__main__':
|
||||
# p_list=[]
|
||||
# v_fb_home = r'%(FB_HOME)s'
|
||||
# v_temp_dir = r'%(FBT_TEMP_DIR)s'
|
||||
# v_dsn = r'%(dsn)s'
|
||||
# v_fb_client = r'%(FB_CLNT)s'
|
||||
#
|
||||
#
|
||||
# for i in range(0, planned_attachments):
|
||||
# # Python multi-processing feature:
|
||||
# ##################################
|
||||
# p_i = Process(target=attach_detach, args=( v_dsn, v_fb_client, v_temp_dir, %(reconnect_count)s, i, ))
|
||||
# p_i.start()
|
||||
# p_list.append(p_i)
|
||||
#
|
||||
#
|
||||
# for i in range(len(p_list)):
|
||||
# p_list[i].join()
|
||||
#
|
||||
#
|
||||
# # All completed
|
||||
#
|
||||
#
|
||||
# f_detach_info_sql = open( os.path.join(v_temp_dir, 'tmp_5034_after_detach_all.sql'), 'w')
|
||||
# for i in range(len(p_list)):
|
||||
# f_detach_log_i = os.path.join(v_temp_dir, 'tmp_5034_after_detach_' + str(i).zfill(3) + '.log')
|
||||
# with open(f_detach_log_i, 'r') as s:
|
||||
# f_detach_info_sql.write(s.read()+'\\\\n\\\\n\\\\n')
|
||||
# os.remove(f_detach_log_i)
|
||||
#
|
||||
#
|
||||
# f_detach_info_sql.flush()
|
||||
# os.fsync(f_detach_info_sql.fileno())
|
||||
# f_detach_info_sql.close()
|
||||
#
|
||||
#
|
||||
# # subprocess.call( [ os.path.join( v_fb_home, 'isql'), v_dsn, '-user', '%(user_name)s', '-password', '%(user_password)s', '-nod', '-n', '-i', f_detach_info_sql.name] )
|
||||
# subprocess.call( [ r'%(ISQL_BINARY)s', v_dsn, '-user', '%(user_name)s', '-password', '%(user_password)s', '-nod', '-n', '-i', f_detach_info_sql.name] )
|
||||
#
|
||||
#
|
||||
# os.remove(f_detach_info_sql.name)
|
||||
# ''' % dict(globals(), **locals())
|
||||
# # (context['temp_directory'].replace('\\\\','\\\\\\\\'), planned_attachments, dsn, reconnect_count, dsn)
|
||||
#
|
||||
#
|
||||
# f_parallel_py=open( os.path.join(context['temp_directory'],'tmp_5034_after_detach.py'), 'w')
|
||||
# f_parallel_py.write(f_parallel_txt)
|
||||
# flush_and_close( f_parallel_py )
|
||||
#
|
||||
#
|
||||
# ########################################################################################################
|
||||
# ### l a u n c h P y t h o n i n a n o t h e r e x e c u t i o n c o n t e x t ###
|
||||
# ########################################################################################################
|
||||
# runProgram( sys.executable, [f_parallel_py.name] )
|
||||
#
|
||||
#
|
||||
# time.sleep(2)
|
||||
#
|
||||
#
|
||||
# f_top_slow_sql=open( os.path.join(context['temp_directory'], 'tmp_5034_slowest_detaches.sql'), 'w')
|
||||
# f_top_slow_sql.write('drop user tmp$c5034; commit; set count on; set heading off; select * from v_top10_slow; commit;')
|
||||
# flush_and_close( f_top_slow_sql )
|
||||
#
|
||||
#
|
||||
# f_top_slow_log=open( os.path.join(context['temp_directory'], 'tmp_5034_slowest_detaches.log'), 'w')
|
||||
# subprocess.call( [ context['isql_path'], dsn, "-nod", "-n", "-i", f_top_slow_sql.name], stdout=f_top_slow_log, stderr=subprocess.STDOUT)
|
||||
# flush_and_close( f_top_slow_log )
|
||||
#
|
||||
#
|
||||
# time.sleep(1)
|
||||
#
|
||||
#
|
||||
# with open(f_top_slow_log.name) as f:
|
||||
# print(f.read())
|
||||
#
|
||||
#
|
||||
# # Cleanup.
|
||||
# #########
|
||||
# time.sleep(1)
|
||||
# cleanup( (f_top_slow_sql,f_top_slow_log,f_parallel_py,f_parallel_py.name+'c') )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
All detaches not exceeded threshold
|
||||
Records affected: 1
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
def test_1(act_1: Action):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
|
@ -2,20 +2,22 @@
|
||||
#
|
||||
# id: bugs.core_5039
|
||||
# title: Connecting to service with invalid servicename yields incorrect error message
|
||||
# decription:
|
||||
# 28.01.2019.
|
||||
# decription:
|
||||
# 28.01.2019.
|
||||
# Name of service manager is ignored in FB 4.0, see http://tracker.firebirdsql.org/browse/CORE-5883
|
||||
# ("service_mgr" to be cleaned out from connection string completely...")
|
||||
# Disabled this test to be run on FB 4.0: added record to '%FBT_REPO% ests\\qa4x-exclude-list.txt'.
|
||||
# Added EMPTY section for FB version 4.0 in this .fbt as one more way to protect from running.
|
||||
#
|
||||
# [pcisar] 26.112021
|
||||
# "Empty" 4.0 version was removed completelly, as it's not needed with pytest
|
||||
#
|
||||
# tracker_id: CORE-5039
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0, 4.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
@ -28,45 +30,23 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
# runProgram('fbsvcmgr',['localhost:qwe_mnb_zxc_9','user','SYSDBA','password','masterkey','info_server_version'])
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stderr_1 = """
|
||||
Cannot attach to services manager
|
||||
-service qwe_mnb_zxc_9 is not defined
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
# version: 4.0
|
||||
# resources: None
|
||||
|
||||
substitutions_2 = []
|
||||
|
||||
init_script_2 = """"""
|
||||
|
||||
db_2 = db_factory(sql_dialect=3, init=init_script_2)
|
||||
|
||||
test_script_2 = """
|
||||
-- This section was intentionally left empty.
|
||||
-- No message should be in expected_* sections.
|
||||
-- It is STRONGLY RECOMMENDED to add this ticket
|
||||
-- in the 'excluded-list file:
|
||||
-- %FBT_REPO% ests\\qa4x-exclude-list.txt
|
||||
"""
|
||||
|
||||
act_2 = isql_act('db_2', test_script_2, substitutions=substitutions_2)
|
||||
|
||||
|
||||
@pytest.mark.version('>=4.0')
|
||||
def test_2(act_2: Action):
|
||||
act_2.execute()
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0,<4')
|
||||
def test_1(act_1: Action):
|
||||
act_1.expected_stderr = expected_stderr_1
|
||||
act_1.svcmgr(switches=['localhost:qwe_mnb_zxc_9', 'user', 'SYSDBA',
|
||||
'password', 'masterkey', 'info_server_version'],
|
||||
connect_mngr=False)
|
||||
assert act_1.clean_stderr == act_1.clean_expected_stderr
|
||||
|
@ -2,18 +2,18 @@
|
||||
#
|
||||
# id: bugs.core_5061
|
||||
# title: ISQL plan output is unexpectedly truncated after a query is simplified to become shorter
|
||||
# decription:
|
||||
# decription:
|
||||
# Start of discussion: letter to dimitr, 30-dec-2015 13:57; its subject refers to core-4708.
|
||||
# It was found that explained plan produced by ISQL is unexpectedly ends on WI-V3.0.0.32256.
|
||||
# This testuses that query, but instead of verifying plan text itself (which can be changed in the future)
|
||||
# This testuses that query, but instead of verifying plan text itself (which can be changed in the future)
|
||||
# it is sufficient to check only that plan does NOT contain lines with ellipsis or 'truncated' or 'error'.
|
||||
# This mean that 'expected_stdout' section must be EMPTY. Otherwise expected_stdout will contain info
|
||||
# This mean that 'expected_stdout' section must be EMPTY. Otherwise expected_stdout will contain info
|
||||
# about error or invalid plan.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-5061
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid:
|
||||
# qmid:
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
@ -29,11 +29,11 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import subprocess
|
||||
# import time
|
||||
#
|
||||
#
|
||||
# sql_text=''' set list on;
|
||||
# set explain on;
|
||||
# set planonly;
|
||||
@ -45,54 +45,54 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# select r.i+1 from r1 r where r.i < 2
|
||||
# )
|
||||
# --select count(*) from r1;
|
||||
#
|
||||
#
|
||||
# ,r2 as (
|
||||
# select first 1 row_number() over() i
|
||||
# select first 1 row_number() over() i
|
||||
# from r1 ra
|
||||
# full join r1 rb on rb.i=ra.i
|
||||
# group by ra.i
|
||||
# having count(*)>0
|
||||
#
|
||||
# full join r1 rb on rb.i=ra.i
|
||||
# group by ra.i
|
||||
# having count(*)>0
|
||||
#
|
||||
# union all
|
||||
#
|
||||
#
|
||||
# select rx.i+1 from r2 rx
|
||||
# where rx.i+1 <= 2
|
||||
# )
|
||||
# --select count(*) from r2
|
||||
# ,r3 as (
|
||||
# select first 1 row_number() over() i
|
||||
# select first 1 row_number() over() i
|
||||
# from r2 ra
|
||||
# full join r2 rb on rb.i=ra.i
|
||||
# group by ra.i
|
||||
# having count(*)>0
|
||||
#
|
||||
# full join r2 rb on rb.i=ra.i
|
||||
# group by ra.i
|
||||
# having count(*)>0
|
||||
#
|
||||
# union all
|
||||
#
|
||||
#
|
||||
# select rx.i+1 from r3 rx
|
||||
# where rx.i+1 <= 2
|
||||
# )
|
||||
# --select count(*) from r3
|
||||
# ,r4 as (
|
||||
# select first 1 row_number() over() i
|
||||
# select first 1 row_number() over() i
|
||||
# from r3 ra
|
||||
# full join r3 rb on rb.i=ra.i
|
||||
# group by ra.i
|
||||
# having count(*)>0
|
||||
#
|
||||
# full join r3 rb on rb.i=ra.i
|
||||
# group by ra.i
|
||||
# having count(*)>0
|
||||
#
|
||||
# union all
|
||||
#
|
||||
#
|
||||
# select rx.i+1 from r4 rx
|
||||
# where rx.i+1 <= 2
|
||||
# )
|
||||
# ,rn as (
|
||||
# select row_number() over() i
|
||||
# from rdb$database r full join rdb$database r2 on r2.rdb$relation_id=r.rdb$relation_id
|
||||
# group by r.rdb$relation_id
|
||||
# having count(*)>0
|
||||
# order by r.rdb$relation_id
|
||||
# select row_number() over() i
|
||||
# from rdb$database r full join rdb$database r2 on r2.rdb$relation_id=r.rdb$relation_id
|
||||
# group by r.rdb$relation_id
|
||||
# having count(*)>0
|
||||
# order by r.rdb$relation_id
|
||||
# rows 1 to 1
|
||||
# )
|
||||
# select
|
||||
# select
|
||||
# char_length(mon$explained_plan)
|
||||
# ,(select count(*) from r4)
|
||||
# ,(select count(*) from rn)
|
||||
@ -100,20 +100,20 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# from mon$statements
|
||||
# ;
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# sqltxt=open( os.path.join(context['temp_directory'],'tmp_sql_5061.sql'), 'w')
|
||||
# sqltxt.write(sql_text)
|
||||
# sqltxt.close()
|
||||
#
|
||||
#
|
||||
# sqllog=open( os.path.join(context['temp_directory'],'tmp_sql_5061.log'), 'w')
|
||||
# subprocess.call( [ context['isql_path'], dsn,'-user',user_name,'-pas',user_password,'-q', '-i', sqltxt.name],
|
||||
# stdout=sqllog,
|
||||
# stderr=subprocess.STDOUT
|
||||
# )
|
||||
# sqllog.close()
|
||||
#
|
||||
#
|
||||
# # Check content of files: 1st shuld contain name of temply created user, 2nd should be with error during get FB log:
|
||||
#
|
||||
#
|
||||
# i=0
|
||||
# with open( sqllog.name,'r') as f:
|
||||
# for line in f:
|
||||
@ -121,23 +121,94 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# if '...' in line or 'truncated' in line or 'error' in line:
|
||||
# print("Plan is truncated or empty. Found at line "+str(i))
|
||||
# break
|
||||
#
|
||||
# # Do not remove this pause: on Windows closing of handles can take some (small) time.
|
||||
#
|
||||
# # Do not remove this pause: on Windows closing of handles can take some (small) time.
|
||||
# # Otherwise Windows(32) access error can raise here.
|
||||
# time.sleep(1)
|
||||
#
|
||||
#
|
||||
# if os.path.isfile(sqltxt.name):
|
||||
# os.remove(sqltxt.name)
|
||||
# if os.path.isfile(sqllog.name):
|
||||
# os.remove(sqllog.name)
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
test_script_1 = """
|
||||
set list on;
|
||||
set explain on;
|
||||
set planonly;
|
||||
set blob all;
|
||||
with recursive
|
||||
r1 as (
|
||||
select 1 as i from rdb$database
|
||||
union all
|
||||
select r.i+1 from r1 r where r.i < 2
|
||||
)
|
||||
--select count(*) from r1;
|
||||
|
||||
,r2 as (
|
||||
select first 1 row_number() over() i
|
||||
from r1 ra
|
||||
full join r1 rb on rb.i=ra.i
|
||||
group by ra.i
|
||||
having count(*)>0
|
||||
|
||||
union all
|
||||
|
||||
select rx.i+1 from r2 rx
|
||||
where rx.i+1 <= 2
|
||||
)
|
||||
--select count(*) from r2
|
||||
,r3 as (
|
||||
select first 1 row_number() over() i
|
||||
from r2 ra
|
||||
full join r2 rb on rb.i=ra.i
|
||||
group by ra.i
|
||||
having count(*)>0
|
||||
|
||||
union all
|
||||
|
||||
select rx.i+1 from r3 rx
|
||||
where rx.i+1 <= 2
|
||||
)
|
||||
--select count(*) from r3
|
||||
,r4 as (
|
||||
select first 1 row_number() over() i
|
||||
from r3 ra
|
||||
full join r3 rb on rb.i=ra.i
|
||||
group by ra.i
|
||||
having count(*)>0
|
||||
|
||||
union all
|
||||
|
||||
select rx.i+1 from r4 rx
|
||||
where rx.i+1 <= 2
|
||||
)
|
||||
,rn as (
|
||||
select row_number() over() i
|
||||
from rdb$database r full join rdb$database r2 on r2.rdb$relation_id=r.rdb$relation_id
|
||||
group by r.rdb$relation_id
|
||||
having count(*)>0
|
||||
order by r.rdb$relation_id
|
||||
rows 1 to 1
|
||||
)
|
||||
select
|
||||
char_length(mon$explained_plan)
|
||||
,(select count(*) from r4)
|
||||
,(select count(*) from rn)
|
||||
--,(select count(*) from rn)
|
||||
from mon$statements
|
||||
;
|
||||
"""
|
||||
|
||||
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action):
|
||||
act_1.execute()
|
||||
i = 0
|
||||
for line in act_1.stdout.splitlines():
|
||||
i += 1
|
||||
if '...' in line or 'truncated' in line or 'error' in line:
|
||||
pytest.fail(f"Plan is truncated or empty. Found at line {i}")
|
||||
|
@ -2,36 +2,46 @@
|
||||
#
|
||||
# id: bugs.core_5062
|
||||
# title: CHAR_TO_UUID on column with index throws expression evaluation not supported Human readable UUID argument for CHAR_TO_UUID must be of exact length 36
|
||||
# decription:
|
||||
# decription:
|
||||
# tracker_id: CORE-5062
|
||||
# min_versions: ['2.5.6']
|
||||
# versions: 2.5.6
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 2.5.6
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = []
|
||||
|
||||
init_script_1 = """"""
|
||||
init_script_1 = """
|
||||
recreate table test_uuid(
|
||||
datavalue int,
|
||||
uuid char(16) character set octets,
|
||||
constraint test_uuid_unq unique(uuid)
|
||||
);
|
||||
commit;
|
||||
insert into test_uuid(datavalue, uuid) values( 1, char_to_uuid('57F2B8C7-E1D8-4B61-9086-C66D1794F2D9') );
|
||||
--insert into test_uuid(datavalue, uuid) values( 2, char_to_uuid('37F2B8C3-E1D8-4B31-9083-C33D1794F2D3') );
|
||||
commit;
|
||||
"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# # ::: NB ::: Could not reproduce ticket issue on x86.
|
||||
# # Checked on: WI-V2.5.4.26856, WI-V3.0.0.31948 (Python = 2.7 x86, fdb = 1.5).
|
||||
#
|
||||
#
|
||||
# import fdb
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# sql_ddl='''recreate table test_uuid(
|
||||
# datavalue int,
|
||||
# uuid char(16) character set octets,
|
||||
# datavalue int,
|
||||
# uuid char(16) character set octets,
|
||||
# constraint test_uuid_unq unique(uuid)
|
||||
# );
|
||||
# commit;
|
||||
@ -39,32 +49,37 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# --insert into test_uuid(datavalue, uuid) values( 2, char_to_uuid('37F2B8C3-E1D8-4B31-9083-C33D1794F2D3') );
|
||||
# commit;
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# runProgram('isql',['-user',user_name, '-pas',user_password, dsn],sql_ddl)
|
||||
#
|
||||
#
|
||||
# con2 = fdb.connect(dsn=dsn, user=user_name, password=user_password, charset='utf8')
|
||||
#
|
||||
#
|
||||
# xcur2 = con2.cursor()
|
||||
# psSel = xcur2.prep("select datavalue from test_uuid where uuid = char_to_uuid(?)")
|
||||
#
|
||||
# psSel = xcur2.stmt("select datavalue from test_uuid where uuid = char_to_uuid(?)")
|
||||
#
|
||||
# print ( psSel.plan )
|
||||
# xcur2.execute(psSel, [('57F2B8C7-E1D8-4B61-9086-C66D1794F2D9')])
|
||||
# for row in xcur2:
|
||||
# print( row[0] )
|
||||
#
|
||||
#
|
||||
# con2.close()
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
PLAN (TEST_UUID INDEX (TEST_UUID_UNQ))
|
||||
1
|
||||
"""
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
@pytest.mark.version('>=2.5.6')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
# Not needed for new implementation
|
||||
#expected_stdout_1 = """
|
||||
#PLAN (TEST_UUID INDEX (TEST_UUID_UNQ))
|
||||
#1
|
||||
#"""
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
def test_1(act_1: Action):
|
||||
with act_1.db.connect() as con:
|
||||
c = con.cursor()
|
||||
stmt = c.prepare("select datavalue from test_uuid where uuid = char_to_uuid(?)")
|
||||
assert stmt.plan == 'PLAN (TEST_UUID INDEX (TEST_UUID_UNQ))'
|
||||
result = c.execute(stmt, ['57F2B8C7-E1D8-4B61-9086-C66D1794F2D9']).fetchall()
|
||||
assert result == [(1, )]
|
||||
|
||||
|
@ -1,47 +0,0 @@
|
||||
#coding:utf-8
|
||||
#
|
||||
# id: bugs.core_5068
|
||||
# title: gbak with invalid parameter crashes FB
|
||||
# decription:
|
||||
# Confirmed crash on 2.5.5.26952, but only when use 'gbak' utility (with services call).
|
||||
# As of fbsvcmgr, it works correct and reports error: Unknown switch "res_user_all_space".
|
||||
# Output when use gbak is:
|
||||
# gbak:unknown switch "USER_ALL_SPACE"
|
||||
# gbak: ERROR:Unable to complete network request to host "localhost".
|
||||
# gbak: ERROR: Error reading data from the connection.
|
||||
# gbak:Exiting before completion due to errors
|
||||
#
|
||||
# Checked on WI-V2.5.6.26962 - works OK.
|
||||
# No test needed for 3.0 thus only stub code present here in 'firebird_version': '3.0' section.
|
||||
#
|
||||
# tracker_id: CORE-5068
|
||||
# min_versions: ['2.5.5']
|
||||
# versions: 3.0
|
||||
# qmid:
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = []
|
||||
|
||||
init_script_1 = """"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
#
|
||||
# id: bugs.core_5075
|
||||
# title: Regression. Triger on DISCONNECT with dynamic SQL (ES 'insert into ...'): 1) does not work in 3.0; 2) leads FB to crash when it is recreated
|
||||
# decription:
|
||||
# decription:
|
||||
# Test does following:
|
||||
# * obtains firebird.log as it was _before_ actions;
|
||||
# * stores initial script for creation DB objects in file <f_sql_init> for futher applying it twice (see ticket);
|
||||
@ -12,17 +12,18 @@
|
||||
# * print variable 'sqlres';
|
||||
# * obtains firebird.log as it is _after_ actions;
|
||||
# * compare two firebird.log versions - diff must be empty.
|
||||
#
|
||||
#
|
||||
# Checked on 3.0.0.32281, SS/SC/CS.
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-5075
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from difflib import unified_diff
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
@ -40,28 +41,28 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# from subprocess import Popen, PIPE, STDOUT
|
||||
# import time
|
||||
# import difflib
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close(file_handle):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb'):
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -72,17 +73,17 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# def svc_get_fb_log( f_fb_log ):
|
||||
#
|
||||
#
|
||||
# import subprocess
|
||||
#
|
||||
#
|
||||
# subprocess.call([ context['fbsvcmgr_path'],
|
||||
# "localhost:service_mgr",
|
||||
# "action_get_fb_log"
|
||||
@ -90,48 +91,48 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# stdout=f_fb_log, stderr=subprocess.STDOUT
|
||||
# )
|
||||
# return
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_5075_fblog_before.txt'), 'w')
|
||||
# svc_get_fb_log( f_fblog_before )
|
||||
# flush_and_close( f_fblog_before )
|
||||
#
|
||||
#
|
||||
# sqltxt='''set term ^;
|
||||
# create or alter trigger trg_connect active on connect position 0 as
|
||||
# begin
|
||||
# end
|
||||
# ^
|
||||
#
|
||||
#
|
||||
# create or alter trigger trg_disc active on disconnect position 0 as
|
||||
# begin
|
||||
# end
|
||||
# ^
|
||||
# set term ;^
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# recreate sequence g;
|
||||
# recreate table log(
|
||||
# event_id int generated by default as identity constraint pk_log primary key,
|
||||
# event_name varchar(20),
|
||||
# event_name varchar(20),
|
||||
# when_it_was timestamp default 'now'
|
||||
# );
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# set term ^;
|
||||
# execute block as
|
||||
# begin
|
||||
# rdb$set_context('USER_SESSION','INITIAL_DDL','1');
|
||||
# end
|
||||
# ^
|
||||
#
|
||||
#
|
||||
# create or alter trigger trg_connect active on connect position 0 as
|
||||
# begin
|
||||
# execute statement 'insert into log(event_name) values(''connect'')'
|
||||
# with autonomous transaction;
|
||||
# end
|
||||
# ^
|
||||
#
|
||||
#
|
||||
# create or alter trigger trg_disc active on disconnect position 0 as
|
||||
# begin
|
||||
# if ( rdb$get_context('USER_SESSION','INITIAL_DDL') is null ) then
|
||||
@ -140,76 +141,77 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# end
|
||||
# ^
|
||||
# set term ;^
|
||||
# commit;
|
||||
# commit;
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# f_sql_init=open( os.path.join(context['temp_directory'],'tmp_5075_init.sql'), 'w')
|
||||
# f_sql_init.write(sqltxt)
|
||||
# flush_and_close( f_sql_init )
|
||||
#
|
||||
#
|
||||
# sqlres=subprocess.check_output([context['isql_path'], dsn, "-nod", "-i", f_sql_init.name], stderr=subprocess.STDOUT)
|
||||
# print(sqlres) # Must be empty (no errors)
|
||||
#
|
||||
# sqltxt='''set list on;set count on; select event_id, event_name from log;
|
||||
#
|
||||
# sqltxt='''set list on;set count on; select event_id, event_name from log;
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# # http://stackoverflow.com/questions/8475290/how-do-i-write-to-a-python-subprocess-stdin
|
||||
#
|
||||
#
|
||||
# sqlres=''
|
||||
#
|
||||
#
|
||||
# p = Popen([context['isql_path'], dsn], stdout=PIPE, stdin=PIPE, stderr=subprocess.STDOUT )
|
||||
# sqlres += p.communicate(input=sqltxt)[0]
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# p = Popen([context['isql_path'], dsn], stdout=PIPE, stdin=PIPE, stderr=subprocess.STDOUT )
|
||||
# sqlres += p.communicate(input=sqltxt)[0]
|
||||
#
|
||||
#
|
||||
# p = Popen([context['isql_path'], dsn], stdout=PIPE, stdin=PIPE, stderr=subprocess.STDOUT )
|
||||
# sqlres += p.communicate(input=sqltxt)[0]
|
||||
#
|
||||
#
|
||||
# print(sqlres)
|
||||
#
|
||||
#
|
||||
# sqlres=subprocess.check_output([context['isql_path'], dsn, "-nod", "-i", f_sql_init.name], stderr=subprocess.STDOUT)
|
||||
# print(sqlres) # Must be empty (no errors)
|
||||
#
|
||||
#
|
||||
# p = Popen([context['isql_path'], dsn], stdout=PIPE, stdin=PIPE, stderr=subprocess.STDOUT )
|
||||
# sqlres = p.communicate(input=sqltxt)[0]
|
||||
# print(sqlres)
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_5075_fblog_after.txt'), 'w')
|
||||
# svc_get_fb_log( f_fblog_after )
|
||||
# flush_and_close( f_fblog_after )
|
||||
#
|
||||
#
|
||||
# time.sleep(1)
|
||||
#
|
||||
#
|
||||
# oldfb=open(f_fblog_before.name, 'r')
|
||||
# newfb=open(f_fblog_after.name, 'r')
|
||||
#
|
||||
#
|
||||
# difftext = ''.join(difflib.unified_diff(
|
||||
# oldfb.readlines(),
|
||||
# oldfb.readlines(),
|
||||
# newfb.readlines()
|
||||
# ))
|
||||
# oldfb.close()
|
||||
# newfb.close()
|
||||
#
|
||||
#
|
||||
# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_5075_diff.txt'), 'w')
|
||||
# f_diff_txt.write(difftext)
|
||||
# flush_and_close( f_diff_txt )
|
||||
#
|
||||
#
|
||||
# with open( f_diff_txt.name,'r') as f:
|
||||
# for line in f:
|
||||
# print('new messages in firebird.log: '+line)
|
||||
#
|
||||
#
|
||||
# # Cleanup.
|
||||
# ##########
|
||||
# # do NOT remove this pause otherwise some of logs will not be enable for deletion and test will finish with
|
||||
# # do NOT remove this pause otherwise some of logs will not be enable for deletion and test will finish with
|
||||
# # Exception raised while executing Python test script. exception: WindowsError: 32
|
||||
# time.sleep(1)
|
||||
# cleanup((f_sql_init,f_fblog_before,f_fblog_after,f_diff_txt))
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
EVENT_ID 1
|
||||
@ -236,11 +238,81 @@ expected_stdout_1 = """
|
||||
EVENT_ID 1
|
||||
EVENT_NAME connect
|
||||
Records affected: 1
|
||||
"""
|
||||
"""
|
||||
|
||||
init_script = """
|
||||
set term ^;
|
||||
create or alter trigger trg_connect active on connect position 0 as
|
||||
begin
|
||||
end
|
||||
^
|
||||
|
||||
create or alter trigger trg_disc active on disconnect position 0 as
|
||||
begin
|
||||
end
|
||||
^
|
||||
set term ;^
|
||||
commit;
|
||||
|
||||
recreate sequence g;
|
||||
recreate table log(
|
||||
event_id int generated by default as identity constraint pk_log primary key,
|
||||
event_name varchar(20),
|
||||
when_it_was timestamp default 'now'
|
||||
);
|
||||
commit;
|
||||
|
||||
set term ^;
|
||||
execute block as
|
||||
begin
|
||||
rdb$set_context('USER_SESSION','INITIAL_DDL','1');
|
||||
end
|
||||
^
|
||||
|
||||
create or alter trigger trg_connect active on connect position 0 as
|
||||
begin
|
||||
execute statement 'insert into log(event_name) values(''connect'')'
|
||||
with autonomous transaction;
|
||||
end
|
||||
^
|
||||
|
||||
create or alter trigger trg_disc active on disconnect position 0 as
|
||||
begin
|
||||
if ( rdb$get_context('USER_SESSION','INITIAL_DDL') is null ) then
|
||||
execute statement 'insert into log(event_name) values(''disconnect'')'
|
||||
with autonomous transaction;
|
||||
end
|
||||
^
|
||||
set term ;^
|
||||
commit;
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, capsys):
|
||||
with act_1.connect_server() as srv:
|
||||
srv.info.get_log()
|
||||
log_before = srv.readlines()
|
||||
#
|
||||
act_1.isql(switches=['-nod'], input=init_script)
|
||||
# Tests 3x
|
||||
test_cmd = 'set list on;set count on; select event_id, event_name from log;'
|
||||
for step in range(3):
|
||||
act_1.reset()
|
||||
act_1.isql(switches=[], input=test_cmd)
|
||||
print(act_1.stdout)
|
||||
# Init again
|
||||
act_1.reset()
|
||||
act_1.isql(switches=['-nod'], input=init_script)
|
||||
# Test again
|
||||
act_1.reset()
|
||||
act_1.isql(switches=[], input=test_cmd)
|
||||
print(act_1.stdout)
|
||||
# Get log again
|
||||
with act_1.connect_server() as srv:
|
||||
srv.info.get_log()
|
||||
log_after = srv.readlines()
|
||||
#
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
assert list(unified_diff(log_before, log_after)) == []
|
||||
|
@ -2,30 +2,30 @@
|
||||
#
|
||||
# id: bugs.core_5077
|
||||
# title: ISQL 'SHOW DATABASE' command does not show encryption status of database
|
||||
# decription:
|
||||
# decription:
|
||||
# We create new database ('tmp_core_5077.fdb') and try to encrypt it usng IBSurgeon Demo Encryption package
|
||||
# ( https://ib-aid.com/download-demo-firebird-encryption-plugin/ ; https://ib-aid.com/download/crypt/CryptTest.zip )
|
||||
# License file plugins\\dbcrypt.conf with unlimited expiration was provided by IBSurgeon to Firebird Foundation (FF).
|
||||
# This file was preliminary stored in FF Test machine.
|
||||
# Test assumes that this file and all neccessary libraries already were stored into FB_HOME and %FB_HOME%\\plugins.
|
||||
#
|
||||
#
|
||||
# After test database will be created, we try to encrypt it using 'alter database encrypt with <plugin_name> ...' command
|
||||
# (where <plugin_name> = dbcrypt - name of .dll in FB_HOME\\plugins\\ folder that implements encryption).
|
||||
# Then we allow engine to complete this job - take delay about 1..2 seconds BEFORE detach from database.
|
||||
#
|
||||
#
|
||||
# After this we run ISQL with 'SHOW DATABASE' command. Its output has to contain string 'Database encrypted'.
|
||||
#
|
||||
#
|
||||
# Finally, we change this temp DB state to full shutdown in order to have 100% ability to drop this file.
|
||||
#
|
||||
#
|
||||
# Checked on: 4.0.0.1629: OK, 6.264s; 3.0.5.33179: OK, 4.586s.
|
||||
#
|
||||
#
|
||||
# 13.04.2021. Adapted for run both on Windows and Linux. Checked on:
|
||||
# Windows: 4.0.0.2416
|
||||
# Linux: 4.0.0.2416
|
||||
# Note: different names for encryption plugin and keyholde rare used for Windows vs Linux:
|
||||
# PLUGIN_NAME = 'dbcrypt' if os.name == 'nt' else '"fbSampleDbCrypt"'
|
||||
# KHOLDER_NAME = 'KeyHolder' if os.name == 'nt' else "fbSampleKeyHolder"
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-5077
|
||||
# min_versions: ['3.0.0']
|
||||
# versions: 3.0
|
||||
@ -45,36 +45,36 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import time
|
||||
# import subprocess
|
||||
# import re
|
||||
# import fdb
|
||||
# from fdb import services
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
# engine = db_conn.engine_version
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -86,18 +86,18 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# print('type(f_names_list[i])=',type(f_names_list[i]))
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# tmpfdb='$(DATABASE_LOCATION)'+'tmp_core_5077.fdb'
|
||||
#
|
||||
#
|
||||
# cleanup( (tmpfdb,) )
|
||||
#
|
||||
#
|
||||
# con = fdb.create_database( dsn = 'localhost:'+tmpfdb )
|
||||
#
|
||||
#
|
||||
# # 14.04.2021.
|
||||
# # Name of encryption plugin depends on OS:
|
||||
# # * for Windows we (currently) use plugin by IBSurgeon, its name is 'dbcrypt';
|
||||
@ -107,7 +107,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# #
|
||||
# PLUGIN_NAME = 'dbcrypt' if os.name == 'nt' else ( '"fbSampleDbCrypt"' if engine >= 4.0 else '"DbCrypt_example"')
|
||||
# KHOLDER_NAME = 'KeyHolder' if os.name == 'nt' else "fbSampleKeyHolder"
|
||||
#
|
||||
#
|
||||
# cur = con.cursor()
|
||||
# cur.execute('alter database encrypt with %(PLUGIN_NAME)s key Red' % locals())
|
||||
# ### DOES NOT WORK ON LINUX! ISSUES 'TOKEN UNKNOWN' !! >>> con.execute_immediate('alter database encrypt with %(PLUGIN_NAME)s key Red' % locals()) // sent letter to Alex and dimitr, 14.04.2021
|
||||
@ -115,66 +115,64 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# time.sleep(2)
|
||||
# # ^
|
||||
# # +-------- !! ALLOW BACKGROUND ENCRYPTION PROCESS TO COMPLETE ITS JOB !!
|
||||
#
|
||||
#
|
||||
# con.close()
|
||||
#
|
||||
#
|
||||
# ########################################
|
||||
# # run ISQL with 'SHOW DATABASE' command:
|
||||
# ########################################
|
||||
# f_isql_cmd=open( os.path.join(context['temp_directory'],'tmp_5077.sql'), 'w')
|
||||
# f_isql_cmd.write('show database;')
|
||||
# flush_and_close( f_isql_cmd )
|
||||
#
|
||||
#
|
||||
# f_isql_log=open( os.path.join(context['temp_directory'], 'tmp_5077.log'), 'w')
|
||||
# f_isql_err=open( os.path.join(context['temp_directory'], 'tmp_5077.err'), 'w')
|
||||
# subprocess.call( [context['isql_path'], 'localhost:' + tmpfdb, '-q', '-n', '-i', f_isql_cmd.name ], stdout=f_isql_log, stderr = f_isql_err)
|
||||
# flush_and_close( f_isql_log )
|
||||
# flush_and_close( f_isql_err )
|
||||
#
|
||||
#
|
||||
# #---------------------------- shutdown temp DB --------------------
|
||||
#
|
||||
#
|
||||
# f_dbshut_log = open( os.path.join(context['temp_directory'],'tmp_dbshut_5077.log'), 'w')
|
||||
# subprocess.call( [ context['gfix_path'], 'localhost:'+tmpfdb, "-shut", "full", "-force", "0" ],
|
||||
# stdout = f_dbshut_log,
|
||||
# stderr = subprocess.STDOUT
|
||||
# )
|
||||
# flush_and_close( f_dbshut_log )
|
||||
#
|
||||
#
|
||||
# allowed_patterns = (
|
||||
# re.compile( 'Database(\\s+not){0,1}\\s+encrypted\\.*', re.IGNORECASE),
|
||||
# )
|
||||
#
|
||||
#
|
||||
# with open( f_isql_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# match2some = filter( None, [ p.search(line) for p in allowed_patterns ] )
|
||||
# if match2some:
|
||||
# print( (' '.join( line.split()).upper() ) )
|
||||
#
|
||||
#
|
||||
# with open( f_isql_err.name,'r') as f:
|
||||
# for line in f:
|
||||
# print("Unexpected error when doing 'SHOW DATABASE': "+line)
|
||||
#
|
||||
#
|
||||
# with open( f_dbshut_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# print("Unexpected error on SHUTDOWN temp database: "+line)
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# # CLEANUP
|
||||
# #########
|
||||
# time.sleep(1)
|
||||
#
|
||||
#
|
||||
# cleanup( ( f_isql_log, f_isql_err, f_isql_cmd, f_dbshut_log,tmpfdb ) )
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
DATABASE ENCRYPTED
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
pytest.skip("Test depends on 3rd party encryption plugin")
|
||||
#pytest.fail("Test not IMPLEMENTED")
|
||||
|
@ -2,7 +2,7 @@
|
||||
#
|
||||
# id: bugs.core_5078
|
||||
# title: "Invalid BLOB ID" error
|
||||
# decription:
|
||||
# decription:
|
||||
# Confirmed, got exception during selecting data on Classic WI-V2.5.5.26952, x64.
|
||||
# STDERR:
|
||||
# Statement failed, SQLSTATE = 42000
|
||||
@ -15,14 +15,16 @@
|
||||
# SUBS 2806
|
||||
# MSGTYPE 2524
|
||||
# NOTIFYPARAMS 1482
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-5078
|
||||
# min_versions: ['2.5.6']
|
||||
# versions: 2.5.6
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from pathlib import Path
|
||||
import zipfile
|
||||
from firebird.qa import db_factory, python_act, Action, temp_file
|
||||
|
||||
# version: 2.5.6
|
||||
# resources: None
|
||||
@ -35,36 +37,36 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import sys
|
||||
# import subprocess
|
||||
# from subprocess import Popen
|
||||
# import zipfile
|
||||
# import time
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close(file_handle):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -75,58 +77,66 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# zf = zipfile.ZipFile( os.path.join(context['files_location'],'core_5078.zip') )
|
||||
# zf.extractall( context['temp_directory'] )
|
||||
# zf.close()
|
||||
#
|
||||
#
|
||||
# # Result: file tmp_core_5078.fbk is extracted into context['temp_directory']
|
||||
#
|
||||
#
|
||||
# tmp_fbk=os.path.join(context['temp_directory'],'tmp_core_5078.fbk')
|
||||
# tmp_fdb=os.path.join(context['temp_directory'],'tmp_core_5078.fdb')
|
||||
#
|
||||
#
|
||||
# cleanup( (tmp_fdb,) )
|
||||
#
|
||||
#
|
||||
# # Restoring from .fbk:
|
||||
# runProgram( context['fbsvcmgr_path'],['localhost:service_mgr','action_restore','dbname',tmp_fdb,'bkp_file',tmp_fbk])
|
||||
#
|
||||
#
|
||||
# f_sql=open( os.path.join(context['temp_directory'],'tmp_isql_5078.sql'), 'w')
|
||||
# f_sql.write('set list on; select * from do_changeTxStatus;')
|
||||
# flush_and_close( f_sql )
|
||||
#
|
||||
#
|
||||
# f_log = open(os.devnull, 'w')
|
||||
# f_err = open( os.path.join(context['temp_directory'],'tmp_isql_5078.err'), 'w')
|
||||
#
|
||||
#
|
||||
# subprocess.call( [context['isql_path'], 'localhost:'+tmp_fdb, "-i", f_sql.name],stdout=f_log,stderr=f_err)
|
||||
#
|
||||
#
|
||||
# flush_and_close( f_log )
|
||||
# flush_and_close( f_err )
|
||||
#
|
||||
#
|
||||
# time.sleep(1)
|
||||
#
|
||||
#
|
||||
# # This file should be EMPTY:
|
||||
# ###########################
|
||||
# with open(f_err.name) as f:
|
||||
# print( f.read() )
|
||||
#
|
||||
#
|
||||
# # CLEANUP
|
||||
# ##########
|
||||
# time.sleep(1)
|
||||
# cleanup( (tmp_fbk, tmp_fdb, f_err, f_sql) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
fbk_file_1 = temp_file('tmp_core_5078.fbk')
|
||||
fdb_file_1 = temp_file('tmp_core_5078.fdb')
|
||||
|
||||
@pytest.mark.version('>=2.5.6')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, fbk_file_1: Path, fdb_file_1: Path):
|
||||
script_file = zipfile.Path(act_1.vars['files'] / 'core_5078.zip',
|
||||
at='tmp_core_5078.fbk')
|
||||
fbk_file_1.write_bytes(script_file.read_bytes())
|
||||
with act_1.connect_server() as srv:
|
||||
srv.database.restore(database=str(fdb_file_1), backup=str(fbk_file_1))
|
||||
srv.wait()
|
||||
# This should execute without errors
|
||||
act_1.isql(switches=[str(fdb_file_1)], input='set list on; select * from do_changeTxStatus;',
|
||||
connect_db=False)
|
||||
|
@ -2,16 +2,16 @@
|
||||
#
|
||||
# id: bugs.core_5085
|
||||
# title: Allow to fixup (nbackup) database via Services API
|
||||
# decription:
|
||||
# decription:
|
||||
# Checked on 4.0.0.2119: OK.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-5085
|
||||
# min_versions: ['4.0']
|
||||
# versions: 4.0
|
||||
# qmid:
|
||||
# qmid:
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 4.0
|
||||
# resources: None
|
||||
@ -24,38 +24,38 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import time
|
||||
# import subprocess
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# db_source = db_conn.database_name
|
||||
# db_delta = db_source +'.delta'
|
||||
# nbk_level_0 = os.path.splitext(db_source)[0] + '.nbk00'
|
||||
# #'$(DATABASE_LOCATION)tmp_core_5085.nbk_00'
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close(file_handle):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -66,25 +66,25 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# cleanup( ( db_delta, nbk_level_0, ) )
|
||||
#
|
||||
#
|
||||
# # 1. Create standby copy: make clone of source DB using nbackup -b 0:
|
||||
# ########################
|
||||
# f_nbk0_log=open( os.path.join(context['temp_directory'],'tmp_nbk0_5085.log'), 'w')
|
||||
# f_nbk0_err=open( os.path.join(context['temp_directory'],'tmp_nbk0_5085.err'), 'w')
|
||||
#
|
||||
#
|
||||
# subprocess.call( [context['nbackup_path'], '-L', db_source], stdout=f_nbk0_log, stderr=f_nbk0_err )
|
||||
# subprocess.call( [context['fbsvcmgr_path'], 'service_mgr', 'action_nfix', 'dbname', db_source], stdout=f_nbk0_log, stderr=f_nbk0_err )
|
||||
#
|
||||
#
|
||||
# flush_and_close( f_nbk0_log )
|
||||
# flush_and_close( f_nbk0_err )
|
||||
#
|
||||
#
|
||||
# # test connect to ensure that all OK after fixup:
|
||||
# ##############
|
||||
# con=fdb.connect(dsn = dsn)
|
||||
@ -94,7 +94,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# print(r[0])
|
||||
# cur.close()
|
||||
# con.close()
|
||||
#
|
||||
#
|
||||
# # Check. All of these files must be empty:
|
||||
# ###################################
|
||||
# f_list=(f_nbk0_log, f_nbk0_err)
|
||||
@ -103,23 +103,32 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# for line in f:
|
||||
# if line.split():
|
||||
# print( 'UNEXPECTED output in file '+f_list[i].name+': '+line.upper() )
|
||||
#
|
||||
#
|
||||
# # Cleanup.
|
||||
# ##########
|
||||
# time.sleep(1)
|
||||
# cleanup( (f_nbk0_log,f_nbk0_err,db_delta, nbk_level_0) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
0
|
||||
"""
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
#expected_stdout_1 = """
|
||||
#0
|
||||
#"""
|
||||
|
||||
@pytest.mark.version('>=4.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action):
|
||||
act_1.nbackup(switches=['-l', str(act_1.db.db_path)])
|
||||
#with act_1.connect_server() as srv:
|
||||
# This raises error in new FB OO API while calling spb.insert_string(SPBItem.DBNAME, database):
|
||||
# "Internal error when using clumplet API: attempt to store data in dataless clumplet"
|
||||
#srv.database.nfix_database(database=str(act_1.db.db_path))
|
||||
# So we have to use svcmgr...
|
||||
act_1.reset()
|
||||
act_1.svcmgr(switches=['action_nfix', 'dbname', str(act_1.db.db_path)])
|
||||
with act_1.db.connect() as con:
|
||||
c = con.cursor()
|
||||
result = c.execute('select mon$backup_state from mon$database').fetchall()
|
||||
assert result == [(0, )]
|
||||
|
Loading…
Reference in New Issue
Block a user