6
0
mirror of https://github.com/FirebirdSQL/firebird-qa.git synced 2025-01-22 13:33:07 +01:00

Test with trace refactoring and more Python tests

This commit is contained in:
Pavel Císař 2021-12-07 20:09:36 +01:00
parent c13d40082f
commit 76b49666fa
32 changed files with 1338 additions and 1478 deletions

View File

@ -40,7 +40,6 @@
# qmid: None
import pytest
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
# version: 2.5
@ -264,41 +263,21 @@ test_script_1 = """
set term ;^
"""
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' #include_filter',
' exclude_filter = %no_trace%',
' log_connections = true',
' log_transactions = true',
' log_statement_finish = true',
' print_plan = true',
' print_perf = true',
' time_threshold = 0',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line)
trace_1 = ['exclude_filter = %no_trace%',
'log_connections = true',
'log_transactions = true',
'log_statement_finish = true',
'print_plan = true',
'print_perf = true',
'time_threshold = 0'
]
@pytest.mark.version('>=3.0')
def test_1(act_1: Action, capsys):
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
act_1.isql(switches=['-n'], input=test_script_1)
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
def test_1(act_1: Action):
with act_1.trace(db_events=trace_1):
act_1.isql(switches=['-n'], input=test_script_1)
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
act_1.trace_to_stdout()
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -16,7 +16,6 @@
# qmid: None
import pytest
from threading import Thread, Barrier
from difflib import unified_diff
from firebird.qa import db_factory, python_act, Action
@ -482,65 +481,34 @@ select '*Лев Николаевич Толстой *
' from rdb$database;
"""
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' include_filter = %(SELECT|INSERT|UPDATE|DELETE)%',
' exclude_filter = %no_trace%',
' log_connections = true',
' log_transactions = true',
' log_statement_prepare = true',
' log_statement_free = true',
' log_statement_start = true',
' log_statement_finish = true',
' log_trigger_start = true',
' log_trigger_finish = true',
' log_context = true',
' print_plan = true',
' print_perf = true',
' time_threshold = 0',
' max_sql_length = 5000',
' max_blr_length = 500',
' max_dyn_length = 500',
' max_arg_length = 80',
' max_arg_count = 30',
'}',
'services {',
' enabled = false',
' log_services = false',
' log_service_query = false',
'}']
with act.connect_server() as srv:
srv.encoding = 'utf8'
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
pass # we are not interested in trace output
trace_1 = ['include_filter = %(SELECT|INSERT|UPDATE|DELETE)%',
'exclude_filter = %no_trace%',
'log_connections = true',
'log_transactions = true',
'log_statement_prepare = true',
'log_statement_free = true',
'log_statement_start = true',
'log_statement_finish = true',
'log_trigger_start = true',
'log_trigger_finish = true',
'log_context = true',
'print_plan = true',
'print_perf = true',
'time_threshold = 0',
'max_sql_length = 5000',
'max_blr_length = 500',
'max_dyn_length = 500',
'max_arg_length = 80',
'max_arg_count = 30',
]
@pytest.mark.version('>=2.5.1')
def test_1(act_1: Action):
b = Barrier(2)
# Get content of firebird.log BEFORE test
with act_1.connect_server() as srv:
srv.info.get_log()
log_before = srv.readlines()
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
# RUN QUERY WITH NON-ASCII CHARACTERS
act_1.isql(switches=['-n', '-q'], input=test_script_1)
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
log_before = act_1.get_firebird_log()
with act_1.trace(db_events=trace_1, keep_log=False, encoding='utf8'):
act_1.isql(switches=['-n', '-q'], input=test_script_1)
# Get content of firebird.log AFTER test
with act_1.connect_server() as srv:
srv.info.get_log()
log_after = srv.readlines()
log_after = act_1.get_firebird_log()
assert '\n'.join(unified_diff(log_before, log_after)) == ''

View File

@ -20,7 +20,6 @@
# qmid: None
import pytest
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
# version: 2.5
@ -188,37 +187,17 @@ expected_stdout_1 = """
SYSDBA:NONE, ISO88591, TCP
"""
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' log_connections = true',
' time_threshold = 0',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line.upper())
trace_1 = ['log_connections = true',
'time_threshold = 0',
]
@pytest.mark.version('>=3.0')
def test_1(act_1: Action, capsys):
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
# make two connections with different charset
with act_1.db.connect(charset='utf8'):
pass
with act_1.db.connect(charset='iso8859_1'):
pass
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
def test_1(act_1: Action):
with act_1.trace(db_events=trace_1):
with act_1.db.connect(charset='utf8'):
pass
with act_1.db.connect(charset='iso8859_1'):
pass
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
act_1.trace_to_stdout(upper=True)
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -33,8 +33,6 @@
# qmid: None
import pytest
import time
from threading import Thread, Barrier
from io import BytesIO
from firebird.qa import db_factory, python_act, Action, temp_file
from firebird.driver import SrvStatFlag
@ -267,43 +265,20 @@ expected_stdout_1 = """
"BACKUP DATABASE"
"""
def trace_session(act: Action, barrier: Barrier):
cfg30 = ['services',
'{',
' enabled = true',
' log_services = true',
' exclude_filter = "Database Stats"',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
barrier.wait()
for line in srv:
print(line.upper())
trace_1 = ['log_services = true',
'exclude_filter = "Database Stats"',
]
temp_file_1 = temp_file('test-file')
@pytest.mark.version('>=3.0')
def test_1(act_1: Action, capsys, temp_file_1):
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
with act_1.connect_server() as srv:
# Make some service requests
b.wait()
def test_1(act_1: Action, temp_file_1):
with act_1.trace(svc_events=trace_1), act_1.connect_server() as srv:
srv.database.set_sweep_interval(database=act_1.db.db_path, interval=1234321)
srv.database.get_statistics(database=act_1.db.db_path, flags=SrvStatFlag.HDR_PAGES)
srv.wait()
srv.database.backup(database=act_1.db.db_path, backup=temp_file_1)
srv.wait()
#
time.sleep(2)
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(2.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
act_1.trace_to_stdout(upper=True)
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -20,8 +20,6 @@
# qmid: None
import pytest
import time
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
# version: 2.5.1
@ -197,35 +195,14 @@ expected_stdout_1 = """
ELEMENT "LOG_STATEMENT_FINISH": "FOO" IS NOT A VALID
"""
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' time_threshold = 0',
' log_statement_finish = foo',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line.upper())
trace_1 = ['time_threshold = 0',
'log_statement_finish = foo'
]
@pytest.mark.version('>=3.0')
def test_1(act_1: Action, capsys):
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
act_1.isql(switches=['-n'], input='select 1 as c from rdb$database;')
time.sleep(2)
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
def test_1(act_1: Action):
with act_1.trace(db_events=trace_1):
act_1.isql(switches=['-n'], input='select 1 as c from rdb$database;')
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
act_1.trace_to_stdout(upper=True)
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -41,8 +41,6 @@
# qmid: None
import pytest
import time
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
from firebird.driver import DbWriteMode
@ -451,23 +449,10 @@ expected_stdout_1 = """
Check ratio_marks_to_row_count_for_GTT_PRESERVE_ROWS: OK
"""
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' log_transactions = true',
' print_perf = true',
#' log_connections = true',
#' log_procedure_start = true',
#' log_procedure_finish = true',
' log_initfini = false',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line)
trace_1 = ['log_transactions = true',
'print_perf = true',
'log_initfini = false',
]
@pytest.mark.version('>=2.5.2')
def test_1(act_1: Action, capsys):
@ -482,29 +467,15 @@ def test_1(act_1: Action, capsys):
c.call_procedure('sp_fill_fix_tab', [NUM_ROWS_TO_BE_ADDED])
con.commit()
#
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
#
with act_1.db.connect() as con1:
c = con1.cursor()
c.call_procedure('sp_fill_gtt_sav_rows', [NUM_ROWS_TO_BE_ADDED])
con1.rollback()
with act_1.db.connect() as con2:
c = con2.cursor()
c.call_procedure('sp_fill_gtt_del_rows', [NUM_ROWS_TO_BE_ADDED])
con2.rollback()
# Somehow sleep is necessary otherwise "sp_fill_gtt_del_rows" will not show up in trace log
time.sleep(3)
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(3.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
trace_output = capsys.readouterr().out
with act_1.trace(db_events=trace_1):
with act_1.db.connect() as con1:
c = con1.cursor()
c.call_procedure('sp_fill_gtt_sav_rows', [NUM_ROWS_TO_BE_ADDED])
con1.rollback()
with act_1.db.connect() as con2:
c = con2.cursor()
c.call_procedure('sp_fill_gtt_del_rows', [NUM_ROWS_TO_BE_ADDED])
con2.rollback()
# Obtain statistics for table T_FIX_TAB in order to estimate numberof data pages
dp_cnt = 0
act_1.gstat(switches=['-a','-t', 'T_FIX_TAB', '-u', act_1.db.user, '-p', act_1.db.password])
@ -519,7 +490,7 @@ def test_1(act_1: Action, capsys):
gtt_del_marks = -1
gtt_del_trace = ''
gtt_sav_trace = ''
for line in trace_output.splitlines():
for line in act_1.trace_log:
if 'fetch' in line:
# 2.5.7:
# ['370', 'ms,', '1100', 'read(s),', '1358', 'write(s),', '410489', 'fetch(es),', '93294', 'mark(s)']
@ -568,7 +539,7 @@ def test_1(act_1: Action, capsys):
if failed_flag:
print('Trace for GTT PRESERVE rows: ' + gtt_sav_trace)
print('Trace for GTT DELETE rows: ' + gtt_del_trace)
#
# Check
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -28,8 +28,6 @@
# qmid: None
import pytest
import time
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
# version: 3.0
@ -277,54 +275,26 @@ expected_stdout_1 = """
Statement statistics detected for ROLLBACK
"""
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' log_transactions = true',
' print_perf = true',
#' log_connections = true',
#' log_procedure_start = true',
#' log_procedure_finish = true',
' log_initfini = false',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line)
trace_1 = ['log_transactions = true',
'print_perf = true',
'log_initfini = false',
]
@pytest.mark.version('>=3.0')
def test_1(act_1: Action, capsys):
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
#
act_1.isql(switches=[], input=test_script_1)
# do NOT remove this otherwise trace log can contain only message about its start before being closed!
time.sleep(3)
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(3.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
trace_output = capsys.readouterr().out
with act_1.trace(db_events=trace_1):
act_1.isql(switches=[], input=test_script_1)
# Output log of trace session, with filtering only interested info:
# Performance header text (all excessive spaces will be removed before comparison - see below):
perf_header='Table Natural Index Update Insert Delete Backout Purge Expunge'
checked_events= {') COMMIT_TRANSACTION': 'commit',
') ROLLBACK_TRANSACTION': 'rollback',
') EXECUTE_STATEMENT': 'execute_statement',
') START_TRANSACTION': 'start_transaction'
}
i, k = 0, 0
watched_event = ''
for line in trace_output.splitlines():
for line in act_1.trace_log:
k += 1
e = ''.join([v.upper() for x, v in checked_events.items() if x in line])
watched_event = e if e else watched_event
@ -336,6 +306,7 @@ def test_1(act_1: Action, capsys):
print('Found performance block header')
if line.startswith('TFIX') or line.startswith('GTT_SSN') or line.startswith('GTT_TRA'):
print(f'Found table statistics for {line.split()[0]}')
# Check
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -11,9 +11,7 @@
# qmid: None
import pytest
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
from firebird.driver import DatabaseError
# version: 2.5
# resources: None
@ -193,39 +191,17 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' time_threshold = 0',
' log_statement_finish = true',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
pass # We are not interested in trace output
trace_1 = ['time_threshold = 0',
'log_statement_finish = true',
]
@pytest.mark.version('>=2.5')
def test_1(act_1: Action, capsys):
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
# empty query
with act_1.db.connect() as con:
c = con.cursor()
try:
c.execute('') # This may crash the server
except Exception as exc:
pass
#
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
def test_1(act_1: Action):
with act_1.trace(db_events=trace_1, keep_log=False):
with act_1.db.connect() as con:
c = con.cursor()
try:
c.execute('') # This may crash the server
except Exception:
pass
# If we got here, the server lives so test passed

View File

@ -21,8 +21,6 @@
import pytest
import re
import time
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
# version: 2.5.2
@ -303,54 +301,30 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
def trace_session(act: Action, b: Barrier, log_sweep: bool):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' time_threshold = 0',
' log_connections = true',
f' log_sweep = {"true" if log_sweep else "false"}',
' log_initfini = false',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line.upper())
def sweep_present(text: str) -> bool:
def sweep_present(trace_log) -> bool:
pattern = re.compile('\\s+sweep_(start|progress|finish)(\\s+|$)', re.IGNORECASE)
present = False
for line in text.splitlines():
for line in trace_log:
if pattern.search(line):
present = True
break
return present
def check_sweep(act_1: Action, log_sweep: bool):
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b, log_sweep])
trace_thread.start()
b.wait()
with act_1.connect_server() as srv:
# Run sweep
cfg = ['time_threshold = 0',
'log_connections = true',
f'log_sweep = {"true" if log_sweep else "false"}',
'log_initfini = false',
]
with act_1.trace(db_events=cfg), act_1.connect_server() as srv:
srv.database.sweep(database=act_1.db.db_path)
# Stop trace
time.sleep(2)
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
@pytest.mark.version('>=3.0')
def test_1(act_1: Action, capsys):
def test_1(act_1: Action):
# Case 1 - sweep logged
check_sweep(act_1, True)
trace_log = capsys.readouterr().out
assert sweep_present(trace_log)
assert sweep_present(act_1.trace_log)
# Case 2 - sweep not logged
act_1.trace_log.clear()
check_sweep(act_1, False)
trace_log = capsys.readouterr().out
assert not sweep_present(trace_log)
assert not sweep_present(act_1.trace_log)

View File

@ -33,8 +33,6 @@
import pytest
import re
import time
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
# version: 2.5.3
@ -262,44 +260,24 @@ expected_stdout_1 = """
param3 = smallint, "4"
"""
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' time_threshold = 0',
' log_statement_start = true',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line)
trace_1 = ['time_threshold = 0',
'log_statement_start = true',
]
@pytest.mark.version('>=2.5.3')
def test_1(act_1: Action, capsys):
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
act_1.isql(switches=['-n', '-q'], input='execute procedure sp_test(1, 2, 3, 4);')
time.sleep(2)
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
#
trace_log = capsys.readouterr().out
with act_1.trace(db_events=trace_1):
act_1.isql(switches=['-n', '-q'], input='execute procedure sp_test(1, 2, 3, 4);')
# process trace
spcall_pattern = re.compile("execute procedure ")
params_pattern = re.compile("param[0-9]{1} = ")
flag = False
for line in trace_log.splitlines():
for line in act_1.trace_log:
if spcall_pattern.match(line):
flag = True
if flag and params_pattern.match(line):
print(line)
# Check
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -55,7 +55,6 @@ import time
import re
import subprocess
from datetime import datetime
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
from firebird.driver import DbWriteMode, ShutdownMethod, ShutdownMode
@ -522,23 +521,13 @@ expected_stdout_1_b = """
Trace log parsing. Found triggers before sweep finish: EXPECTED (equals to planned).
"""
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' time_threshold = 0',
' log_statement_start = true',
' log_initfini = false',
' log_errors = true',
' log_sweep = true',
' log_trigger_finish = true',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line)
trace_1 = ['time_threshold = 0',
'log_statement_start = true',
'log_initfini = false',
'log_errors = true',
'log_sweep = true',
'log_trigger_finish = true',
]
@pytest.mark.version('>=3.0')
def test_1(act_1: Action, capsys):
@ -619,57 +608,46 @@ def test_1(act_1: Action, capsys):
with act_1.connect_server() as srv:
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.SYNC)
# Trace
b_trace = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b_trace])
trace_thread.start()
b_trace.wait()
# Traced action
# Now we run SWEEP in child thread (asynchronous) and while it will work - try to establish several attachments.
sweeper = subprocess.Popen([act_1.vars['gfix'], '-sweep', '-user', act_1.db.user,
'-password', act_1.db.password, act_1.db.dsn],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
)
with act_1.trace(db_events=trace_1):
# Traced action
# Now we run SWEEP in child thread (asynchronous) and while it will work - try to establish several attachments.
sweeper = subprocess.Popen([act_1.vars['gfix'], '-sweep', '-user', act_1.db.user,
'-password', act_1.db.password, act_1.db.dsn],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
)
#
try:
time.sleep(WAIT_BEFORE_RUN_1ST_ATT)
# Save current timestamp: this is point BEFORE we try to establish attachmentas using several ISQL sessions:
DTS_BEG_FOR_ATTACHMENTS = datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
# Try to establish several attachments to database while sweep is in work:
check_sql = """
set heading off;
set term ^;
execute block returns(att bigint) as
begin
att = current_connection;
suspend;
end
^
set term ;^
commit;
"""
for i in range(PLANNED_ATTACH_CNT):
act_1.reset()
act_1.isql(switches=['-n'], input=check_sql)
time.sleep(WAIT_FOR_ALL_ATT_DONE)
# Save current timestamp: we assume that now ALL isql sessions already FINISHED to
# establish attachment (or the whole job and quited)
DTS_END_FOR_ATTACHMENTS = datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
# Move database to shutdown in order to stop sweep
with act_1.connect_server() as srv:
srv.database.shutdown(database=act_1.db.db_path, mode=ShutdownMode.FULL,
method=ShutdownMethod.FORCED, timeout=0)
finally:
# Kill sweep
sweeper.terminate()
#
try:
time.sleep(WAIT_BEFORE_RUN_1ST_ATT)
# Save current timestamp: this is point BEFORE we try to establish attachmentas using several ISQL sessions:
DTS_BEG_FOR_ATTACHMENTS = datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
# Try to establish several attachments to database while sweep is in work:
check_sql = """
set heading off;
set term ^;
execute block returns(att bigint) as
begin
att = current_connection;
suspend;
end
^
set term ;^
commit;
"""
for i in range(PLANNED_ATTACH_CNT):
act_1.reset()
act_1.isql(switches=['-n'], input=check_sql)
time.sleep(WAIT_FOR_ALL_ATT_DONE)
# Save current timestamp: we assume that now ALL isql sessions already FINISHED to
# establish attachment (or the whole job and quited)
DTS_END_FOR_ATTACHMENTS = datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
# Move database to shutdown in order to stop sweep
with act_1.connect_server() as srv:
srv.database.shutdown(database=act_1.db.db_path, mode=ShutdownMode.FULL,
method=ShutdownMethod.FORCED, timeout=0)
finally:
# Kill sweep
sweeper.terminate()
# stop trace
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
#
trace_log = capsys.readouterr().out
# Return database online in order to check number of attachments that were established
# while sweep was in work
with act_1.connect_server() as srv:
@ -716,7 +694,7 @@ def test_1(act_1: Action, capsys):
found_swp_finish = False
triggers_count_before_swp_start = 0
triggers_count_before_swp_finish = 0
for line in trace_log.splitlines():
for line in act_1.trace_log:
for p in allowed_patterns:
if result:= p.search(line):
what_found = result.group(0)
@ -728,9 +706,6 @@ def test_1(act_1: Action, capsys):
triggers_count_before_swp_start += (1 if not found_swp_start else 0)
triggers_count_before_swp_finish += (1 if found_swp_start and not found_swp_finish else 0)
#time.sleep(1)
#print('Trace log parsing. Found triggers before sweep start:', triggers_count_before_swp_start )
#print('Trace log parsing. Found triggers before sweep finish:', triggers_count_before_swp_finish )
print('Trace log parsing. Found triggers before sweep start:',
'EXPECTED (no triggers found).' if triggers_count_before_swp_start == 0
else f'UNEXPECTED: {triggers_count_before_swp_start} instead of 0.')

View File

@ -22,8 +22,6 @@
# qmid: None
import pytest
import time
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
# version: 2.5.3
@ -174,39 +172,17 @@ Command error: show database
Cannot get server version without database connection
"""
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' time_threshold = 0',
' log_errors = true',
' connection_id = 1234',
' log_connections = true',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line.upper())
trace_1 = ['time_threshold = 0',
'log_errors = true',
'connection_id = 1234',
'log_connections = true',
]
@pytest.mark.version('>=3.0')
def test_1(act_1: Action, capsys):
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
try:
def test_1(act_1: Action):
with act_1.trace(db_events=trace_1):
act_1.expected_stderr = expected_stderr_1
act_1.isql(switches=['-n'],
input="connect 'localhost:some_non_existent' user 'SYSDBA' password 'masterkey'; show database; show version;")
time.sleep(2)
finally:
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
# check that we are still kicking and got expected result from isql
# check that we are still kicking (via trace exit) and got expected result from isql
assert act_1.clean_stderr == act_1.clean_expected_stderr

View File

@ -50,7 +50,6 @@ import re
import time
import subprocess
from difflib import unified_diff
from threading import Thread, Barrier
from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
from firebird.driver import DbWriteMode
@ -613,21 +612,11 @@ sweep_killer_out_1 = temp_file('killer.out')
sweep_killer_err_1 = temp_file('killer.err')
sweep_out_1 = temp_file('sweep.out')
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' time_threshold = 0',
' log_errors = true',
' log_sweep = true',
' log_connections = true',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line.upper())
trace_1 = ['time_threshold = 0',
'log_errors = true',
'log_sweep = true',
'log_connections = true',
]
@pytest.mark.version('>=3.0')
def test_1(act_1: Action, capsys, sweep_killer_script_1: Path, sweep_killer_out_1: Path,
@ -681,11 +670,7 @@ def test_1(act_1: Action, capsys, sweep_killer_script_1: Path, sweep_killer_out_
# Change FW to ON (in order to make sweep life harder :))
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.SYNC)
# Start trace
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
try:
with act_1.trace(db_events=trace_1):
# Launch (async.) ISQL which will make small delay and then kill GFIX attachment
with open(sweep_killer_out_1, 'w') as killer_out, \
open(sweep_killer_err_1, 'w') as killer_err:
@ -704,17 +689,7 @@ def test_1(act_1: Action, capsys, sweep_killer_script_1: Path, sweep_killer_out_
gfix_err = act_1.stderr
finally:
p_killer.terminate()
time.sleep(2)
finally:
# stop trace
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
#
trace_log = capsys.readouterr().out
# get firebird log after action
with act_1.connect_server() as srv:
srv.info.get_log()
@ -731,7 +706,7 @@ def test_1(act_1: Action, capsys, sweep_killer_script_1: Path, sweep_killer_out_
print('ISQL ERR:', line.upper())
# Trace log
found_sweep_failed = 0
for line in trace_log.splitlines():
for line in act_1.trace_log:
if 'SWEEP_FAILED' in line:
print('TRACE_LOG:' + (' '.join(line.split()).upper()))
found_sweep_failed = 1

View File

@ -23,9 +23,7 @@
# qmid: None
import pytest
import time
import re
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
# version: 3.0
@ -350,28 +348,16 @@ expected_stdout_1 = """
LOG_FUNC_ENABLED PARAM0 = BIGINT, "207936"
"""
def trace_session(act: Action, b: Barrier, logfunc: bool):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' time_threshold = 0',
' log_errors = true',
' log_connections = true',
' log_transactions = true',
]
if logfunc:
cfg30.append(' log_function_start = true')
cfg30.append(' log_function_finish = true')
cfg30.append('}')
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line.upper())
trace_1 = ['time_threshold = 0',
'log_errors = true',
'log_connections = true',
'log_transactions = true',
'log_function_start = true',
'log_function_finish = true'
]
@pytest.mark.version('>=3.0')
def test_1(act_1: Action, capsys):
def test_1(act_1: Action):
output = []
trace_timestamp_prefix = '[.*\\s+]*20[0-9]{2}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{3,4}\\s+\\(.+\\)'
func_start_ptn = re.compile(trace_timestamp_prefix + '\\s+(FAILED){0,1}\\s*EXECUTE_FUNCTION_START$', re.IGNORECASE)
@ -395,52 +381,27 @@ def test_1(act_1: Action, capsys):
commit;
"""
# Case 1: Trace functions enabled
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b, True])
trace_thread.start()
b.wait()
try:
with act_1.trace(db_events=trace_1):
act_1.isql(switches=['-n', '-q'], input=func_script % (123, 456))
time.sleep(2)
finally:
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
#
trace_log = capsys.readouterr().out
for line in trace_log.splitlines():
for line in act_1.trace_log:
if (func_start_ptn.search(line)
or func_finish_ptn.search(line)
or func_name_ptn.search(line)
or func_param_prn.search(line) ):
output.append('LOG_FUNC_ENABLED ' + line.upper())
# Case 1: Trace functions disabled
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b, False])
trace_thread.start()
b.wait()
try:
# Case 2: Trace functions disabled
act_1.trace_log.clear()
with act_1.trace(db_events=trace_1[:-2]):
act_1.isql(switches=['-n', '-q'], input=func_script % (789, 987))
time.sleep(2)
finally:
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
#
trace_log += capsys.readouterr().out
for line in trace_log.splitlines():
for line in act_1.trace_log:
if (func_start_ptn.search(line)
or func_finish_ptn.search(line)
or func_name_ptn.search(line)
or func_param_prn.search(line) ):
print('LOG_FUNC_DISABLED ' + line.upper())
# Test
output.append('LOG_FUNC_DISABLED ' + line.upper())
# Check
act_1.reset()
act_1.expected_stdout = expected_stdout_1
act_1.stdout = '\n'.join(output)

View File

@ -15,8 +15,6 @@
# qmid: None
import pytest
import time
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
# version: 3.0
@ -205,46 +203,25 @@ expected_stdout_1 = """
-> Table "TEST" Full Scan
"""
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' time_threshold = 0',
' log_initfini = false',
' print_plan = true',
' explain_plan = true',
' log_statement_prepare = true',
' include_filter=%(from|join)[[:whitespace:]]test%',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line)
trace_1 = ['time_threshold = 0',
'log_initfini = false',
'print_plan = true',
'explain_plan = true',
'log_statement_prepare = true',
'include_filter=%(from|join)[[:whitespace:]]test%',
]
@pytest.mark.version('>=3.0')
def test_1(act_1: Action, capsys):
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
# Trace ready, run tests
act_1.isql(switches=[], input='select count(*) from test;')
# Stop trace
time.sleep(2)
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
# Check
with act_1.trace(db_events=trace_1):
act_1.isql(switches=[], input='select count(*) from test;')
# Process trace
show_line = 0
for line in capsys.readouterr().out.splitlines():
for line in act_1.trace_log:
show_line = (show_line + 1 if ('^' * 79) in line or show_line>0 else show_line)
if show_line > 1:
print(line)
# Check
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -21,8 +21,6 @@
# qmid: None
import pytest
import time
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
# version: 3.0
@ -258,54 +256,32 @@ expected_stdout_1 = """
Records affected: 1
"""
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' time_threshold = 0',
' log_initfini = false',
' log_errors = true',
' log_statement_finish = true',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line)
trace_1 = ['time_threshold = 0',
'log_initfini = false',
'log_errors = true',
'log_statement_finish = true',
]
@pytest.mark.version('>=3.0')
def test_1(act_1: Action, capsys):
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
isq_script = """
set list on;
set count on;
select
iif(a.mon$remote_protocol is null, 'internal', 'remote') as connection_protocol,
iif(a.mon$remote_process is null, 'internal', 'remote') as connection_process,
iif(a.mon$remote_pid is null, 'internal', 'remote') as connection_remote_pid,
a.mon$auth_method as auth_method -- should be: 'User name in DPB'
from rdb$database r
left join mon$attachments a on a.mon$attachment_id = current_connection and a.mon$system_flag = 0;
commit;
"""
act_1.isql(switches=['-n', '-user', 'tmp$no$such$user$4889', str(act_1.db.db_path)],
connect_db=False, input=isq_script)
output = act_1.stdout
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
trace_log = capsys.readouterr().out
#
# Process logs
with act_1.trace(db_events=trace_1):
isq_script = """
set list on;
set count on;
select
iif(a.mon$remote_protocol is null, 'internal', 'remote') as connection_protocol,
iif(a.mon$remote_process is null, 'internal', 'remote') as connection_process,
iif(a.mon$remote_pid is null, 'internal', 'remote') as connection_remote_pid,
a.mon$auth_method as auth_method -- should be: 'User name in DPB'
from rdb$database r
left join mon$attachments a on a.mon$attachment_id = current_connection and a.mon$system_flag = 0;
commit;
"""
act_1.isql(switches=['-n', '-user', 'tmp$no$such$user$4889', str(act_1.db.db_path)],
connect_db=False, credentials=False, input=isq_script)
# Process trace log
i = 0
for line in trace_log.splitlines():
for line in act_1.trace_log:
if ') EXECUTE_STATEMENT_FINISH' in line:
i = 1
if i == 1 and '1 records fetched' in line:
@ -313,8 +289,8 @@ def test_1(act_1: Action, capsys):
print("OK: found text in trace related to EMBEDDED connect.")
break
if not i == 2:
print("FAILED to found text in trace related to EMBEDDED connect.")
print(output if output else "FAILED to print log from EMBEDDED connect: log is EMPTY.")
print("FAILED to find text in trace related to EMBEDDED connect.")
print(act_1.stdout if act_1.stdout else "FAILED to print log from EMBEDDED connect: log is EMPTY.")
# Check
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out

View File

@ -34,8 +34,6 @@
# qmid: None
import pytest
import time
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
# version: 2.5.5
@ -280,39 +278,17 @@ expected_stdout_1 = """
DETAIL_2100
"""
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' time_threshold = 0',
' log_initfini = false',
' log_statement_finish = true',
' print_perf = true',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line)
trace_1 = ['time_threshold = 0',
'log_initfini = false',
'log_statement_finish = true',
'print_perf = true',
]
@pytest.mark.version('>=3.0')
def test_1(act_1: Action, capsys):
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
act_1.isql(switches=[], input='set list on; select result from sp_test;')
time.sleep(2)
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
with act_1.trace(db_events=trace_1):
act_1.isql(switches=[], input='set list on; select result from sp_test;')
# Check
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
act_1.trace_to_stdout()
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -17,8 +17,6 @@
# qmid: None
import pytest
import time
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action, user_factory, User
# version: 4.0
@ -26,9 +24,9 @@ from firebird.qa import db_factory, python_act, Action, user_factory, User
substitutions_1 = [('\t+', ' '),
('^((?!ROLE_|PREPARE_STATEMENT|EXECUTE_STATEMENT_START|EXECUTE_STATEMENT_FINISH).)*$', ''),
('.*PREPARE_STATEMENT', 'TRACE_LOG: PREPARE_STATEMENT'),
('.*EXECUTE_STATEMENT_START', 'TRACE_LOG: EXECUTE_STATEMENT_START'),
('.*EXECUTE_STATEMENT_FINISH', 'TRACE_LOG: EXECUTE_STATEMENT_FINISH')]
('.*PREPARE_STATEMENT', 'PREPARE_STATEMENT'),
('.*EXECUTE_STATEMENT_START', 'EXECUTE_STATEMENT_START'),
('.*EXECUTE_STATEMENT_FINISH', 'EXECUTE_STATEMENT_FINISH')]
init_script_1 = """"""
@ -214,29 +212,18 @@ expected_stdout_1_a = """
"""
expected_stdout_1_b = """
TRACE LOG: 2016-08-06T11:51:38.9360 (2536:01FD0CC8) PREPARE_STATEMENT
TRACE LOG: 2016-08-06T11:51:38.9360 (2536:01FD0CC8) EXECUTE_STATEMENT_START
TRACE LOG: 2016-08-06T11:51:38.9360 (2536:01FD0CC8) EXECUTE_STATEMENT_FINISH
2016-08-06T11:51:38.9360 (2536:01FD0CC8) PREPARE_STATEMENT
2016-08-06T11:51:38.9360 (2536:01FD0CC8) EXECUTE_STATEMENT_START
2016-08-06T11:51:38.9360 (2536:01FD0CC8) EXECUTE_STATEMENT_FINISH
"""
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' time_threshold = 0',
' log_initfini = false',
' log_statement_start = true',
' log_statement_finish = true',
' max_sql_length = 5000',
' log_statement_prepare = true',
'}']
with act.connect_server(user='TMP$C5269_2', password='456',
role='role_for_trace_any_attachment') as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line)
trace_1 = ['time_threshold = 0',
'log_initfini = false',
'log_statement_start = true',
'log_statement_finish = true',
'max_sql_length = 5000',
'log_statement_prepare = true',
]
user_1_a = user_factory(name='TMP$C5269_1', password='123')
user_1_b = user_factory(name='TMP$C5269_2', password='456')
@ -250,7 +237,7 @@ where p.rdb$user = upper('TMP$C5269_2');
"""
@pytest.mark.version('>=4.0')
def test_1(act_1: Action, user_1_a: User, user_1_b: User, capsys):
def test_1(act_1: Action, user_1_a: User, user_1_b: User):
with act_1.test_role('role_for_trace_any_attachment'):
with act_1.db.connect() as con:
con.execute_immediate('alter role role_for_trace_any_attachment set system privileges to TRACE_ANY_ATTACHMENT')
@ -261,25 +248,11 @@ def test_1(act_1: Action, user_1_a: User, user_1_b: User, capsys):
act_1.isql(switches=[], input=test_script_1_a)
assert act_1.clean_stdout == act_1.clean_expected_stdout
# Run trace
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
# Test
with act_1.db.connect(user='TMP$C5269_1', password='123') as con:
with act_1.trace(db_events=trace_1), act_1.db.connect(user='TMP$C5269_1', password='123') as con:
c = con.cursor()
c.execute('select current_user from rdb$database')
time.sleep(2)
# End trace
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
# Check
act_1.reset()
act_1.expected_stdout = expected_stdout_1_b
act_1.stdout = capsys.readouterr().out
act_1.trace_to_stdout()
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -34,8 +34,6 @@
# qmid: None
import pytest
import time
from threading import Thread, Barrier
from difflib import unified_diff
from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
@ -291,37 +289,20 @@ act_1 = python_act('db_1', substitutions=substitutions_1)
temp_db_1 = temp_file('tmp_5273.fdb')
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
'database=%[\\\\/]tmp_5273.fdb',
'{',
' enabled = true',
' time_threshold = 0',
' log_sweep = true',
' log_errors = true',
' log_connections = true',
' log_statement_prepare = true',
' log_statement_start = true',
' log_statement_finish = true',
' log_statement_free = true',
' log_trigger_start = true',
' log_trigger_finish = true',
' print_perf = true',
' max_sql_length = 16384',
' max_log_size = 5000000',
'}',
'services',
'{',
' enabled = false',
' log_services = true',
' log_service_query = true',
' log_errors = true',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line)
trace_1 = ['time_threshold = 0',
'log_sweep = true',
'log_errors = true',
'log_connections = true',
'log_statement_prepare = true',
'log_statement_start = true',
'log_statement_finish = true',
'log_statement_free = true',
'log_trigger_start = true',
'log_trigger_finish = true',
'print_perf = true',
'max_sql_length = 16384',
'max_log_size = 5000000',
]
@pytest.mark.version('>=4.0')
def test_1(act_1: Action, temp_db_1: Path):
@ -337,20 +318,8 @@ def test_1(act_1: Action, temp_db_1: Path):
# Get content of firebird.log BEFORE test
log_before = act_1.get_firebird_log()
# Start trace
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
# Test
act_1.isql(switches=[], input=sql_ddl)
# Stop trace
time.sleep(2)
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
with act_1.trace(db_events=trace_1, keep_log=False, database=temp_db_1.name):
act_1.isql(switches=[], input=sql_ddl)
# Get content of firebird.log AFTER test
log_after = act_1.get_firebird_log()
# Check

View File

@ -16,8 +16,6 @@
# qmid: None
import pytest
import time
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
# version: 4.0
@ -266,38 +264,19 @@ expected_stdout_1 = """
Y 2
"""
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=',
'{',
' enabled = true',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
pass
trace_1 = ['database=',
'{',
'enabled = true',
'}']
@pytest.mark.version('>=4.0')
def test_1(act_1: Action, capsys):
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
# Trace ready, run tests
for i in range(3, 0, -1):
act_1.reset()
act_1.isql(switches=[], input=test_script_1)
print(act_1.stdout)
# Stop trace
time.sleep(2)
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
with act_1.trace(config=trace_1, keep_log=False):
for i in range(3, 0, -1):
act_1.reset()
act_1.isql(switches=[], input=test_script_1)
print(act_1.stdout)
# Check
act_1.reset()
act_1.expected_stdout = expected_stdout_1

View File

@ -2,13 +2,16 @@
#
# id: bugs.core_5418
# title: Inconsistent output when retrieving the server log via Services API
# decription:
# decription:
# Test gets FB home directory and copies firebird.log to *.tmp.
# Then it makes firebird.log empty and retrieves it via services API. Output should be empty.
# Finally, it opens firebird.log and adds to it several empty newlines.
# Finally, it opens firebird.log and adds to it several empty newlines.
# Repeat retrieveing content - it also should not contain any characters except newline.
# Checked on 2.5.7.27030, 4.0.0.465
#
#
# [pcisar] 7.12.2021
# Test requires direct changes to firebird.log which could require elevated privileges
#
# tracker_id: CORE-5418
# min_versions: ['2.5.7']
# versions: 2.5.7
@ -28,7 +31,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
# import os
# from fdb import services
# import subprocess
@ -36,34 +39,34 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# import shutil
# import difflib
# import re
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# engine = str(db_conn.engine_version) # convert to text because 'float' object has no attribute 'startswith'
# db_conn.close()
#
#
# svc = services.connect(host='localhost')
# fb_home=svc.get_home_directory()
# svc.close()
#
#
# #--------------------------------------------
#
#
# def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -74,69 +77,68 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None
#
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
#
# #--------------------------------------------
#
#
# if engine.startswith('2.5'):
# get_firebird_log_key='action_get_ib_log'
# else:
# get_firebird_log_key='action_get_fb_log'
#
#
# fb_log = os.path.join( fb_home, 'firebird.log' )
# fb_bak = os.path.join( fb_home, 'firebird.tmp' )
# shutil.copy2( fb_log, fb_bak )
# open(fb_log, 'w').close()
#
#
# f_init_log = open( os.path.join(context['temp_directory'],'tmp_5418_old.log'), 'w')
# subprocess.call( [ context['fbsvcmgr_path'],"localhost:service_mgr", get_firebird_log_key ], stdout = f_init_log, stderr = subprocess.STDOUT)
# flush_and_close( f_init_log )
#
#
# f = open(fb_log, 'w')
# for i in range(0,10):
# f.write(os.linesep)
# flush_and_close( f )
#
#
# f_curr_log = open( os.path.join(context['temp_directory'],'tmp_5418_new.log'), 'w')
# subprocess.call( [ context['fbsvcmgr_path'],"localhost:service_mgr", get_firebird_log_key ], stdout = f_curr_log, stderr = subprocess.STDOUT )
# flush_and_close( f_curr_log )
#
#
# shutil.move( fb_bak, fb_log )
#
#
# f_init_log=open(f_init_log.name, 'r')
# f_curr_log=open(f_curr_log.name, 'r')
# difftext = ''.join(difflib.unified_diff(
# f_init_log.readlines(),
# f_init_log.readlines(),
# f_curr_log.readlines()
# ))
# f_init_log.close()
# f_curr_log.close()
#
#
# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_5418_diff.txt'), 'w')
# f_diff_txt.write(difftext)
# flush_and_close( f_diff_txt )
#
#
# p = re.compile('\\+\\s?\\S+')
# with open( f_diff_txt.name,'r') as f:
# for line in f:
# if line.startswith('+') and line.strip() != '+++' and p.search(line):
# print( 'UNEXPECTED content in firebird.log: %(line)s' % locals() )
#
#
# # Cleanup.
# ##########
# time.sleep(1)
# cleanup( (f_init_log,f_curr_log,f_diff_txt) )
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
@pytest.mark.version('>=2.5.7')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
pytest.skip("Requires direct changes to firebird.log")

View File

@ -2,27 +2,27 @@
#
# id: bugs.core_5433
# title: Minor performance optimization - avoid additional database attachment from security objects mapping code
# decription:
# decription:
# After discuss with Alex (letters 08-10 mar 2017) it was decided to estimate effect of optimization
# by evaluating difference of attachment_id between two subsequent connections to DB.
# NB: Alex said that there was no way to see special service attachment because is was made with turned off
# NB: Alex said that there was no way to see special service attachment because is was made with turned off
# ability to trace it (see letter 09-mar-2017 16:16).
#
#
# Checked on:
# 4.0.0.477, 29-dec-2016: ClassicServer diff=2 - one of these two attachments should be removed
# 4.0.0.494, 10-jan-2017: ClassicServer diff=1 - Ok, expected.
# ::: NB :::
# ::: NB :::
# SuperServer will have diff=3 (THREE) attachment_id because of CacheWriter and GarbageCollector.
# For that reason we detect FB architecture here and SKIP checking SS results by substitution of
# dummy "OK" instead.
#
#
# tracker_id: CORE-5433
# min_versions: ['4.0']
# versions: 4.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from firebird.qa import db_factory, python_act, Action
# version: 4.0
# resources: None
@ -35,34 +35,34 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
# import os
# import fdb
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# db_conn.close()
#
#
# def get_fb_arch(a_dsn):
# try:
# con1 = fdb.connect(dsn = a_dsn)
# con2 = fdb.connect(dsn = a_dsn)
#
#
# cur1 = con1.cursor()
#
#
# sql=(
# "select count(distinct a.mon$server_pid), min(a.mon$remote_protocol), max(iif(a.mon$remote_protocol is null,1,0))"
# +" from mon$attachments a"
# +" where a.mon$attachment_id in (%s, %s) or upper(a.mon$user) = upper('%s')"
# % (con1.attachment_id, con2.attachment_id, 'cache writer')
# )
#
#
# cur1.execute(sql)
# for r in cur1.fetchall():
# server_cnt=r[0]
# server_pro=r[1]
# cache_wrtr=r[2]
#
#
# if server_pro == None:
# fba='Embedded'
# elif cache_wrtr == 1:
@ -70,52 +70,51 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# elif server_cnt == 2:
# fba='CS'
# else:
#
#
# f1=con1.db_info(fdb.isc_info_fetches)
#
#
# cur2=con2.cursor()
# cur2.execute('select 1 from rdb$database')
# for r in cur2.fetchall():
# pass
#
#
# f2=con1.db_info(fdb.isc_info_fetches)
#
#
# fba = 'SC' if f1 ==f2 else 'SS'
#
#
# #print(fba, con1.engine_version, con1.version)
# return fba
#
#
# finally:
# con1.close()
# con2.close()
#
#
# fb_arch= get_fb_arch(dsn)
#
#
# if fb_arch=='CS' or fb_arch=='CS':
# con1=fdb.connect(dsn=dsn)
# att1=con1.attachment_id
# con1.close()
#
#
# con2=fdb.connect(dsn=dsn)
# att2=con2.attachment_id
# con2.close()
#
#
# print( 'OK' if att2-att1<=1 else ('BAD: attachment_id diff=%d -- too big.' % (att2-att1) ) )
#
#
# else:
# print('OK')
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
OK
"""
act_1 = python_act('db_1', substitutions=substitutions_1)
@pytest.mark.version('>=4.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action):
if act_1.get_server_architecture() in ['CS', 'SC']:
with act_1.db.connect() as con:
att1 = con.info.id
with act_1.db.connect() as con:
att2 = con.info.id
assert att2 - att1 == 1

View File

@ -2,30 +2,32 @@
#
# id: bugs.core_5434
# title: Read-only transactions in SuperServer could avoid immediate write of Header and TIP pages after change
# decription:
# decription:
# If current FB arch is SuperServer then we:
# 1. We make 'snapshot' of mon$io_stats.mon$page_writes value before test and then launch plently transactions (e.g, 50)
# 1. We make 'snapshot' of mon$io_stats.mon$page_writes value before test and then launch plently transactions (e.g, 50)
# in READ-ONLY mode. All of them then are immediately committed, w/o any actions.
# 2. After this we take 2nd 'snapshot' of mon$io_stats.mon$page_writes and compare it with 1st one.
# 3. Difference of 'mon$page_writes' values should be 1 (One).
# Otherwise (SC/CS) we defer checking because improvement currently not implemented for these modes.
#
#
# Checked on:
# 1) WI-T4.0.0.463, WI-V3.0.2.32670 - room for improvement DOES exist: page_diff = 102 pages
# 2) WI-T4.0.0.511, WI-V3.0.2.32676 - all OK, page_diff = 1 (One).
#
#
# tracker_id: CORE-5434
# min_versions: ['3.0.2']
# versions: 3.0.2
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from firebird.qa import db_factory, python_act, Action
from firebird.driver import TraAccessMode, TPB
# version: 3.0.2
# resources: None
substitutions_1 = [('Deferred: fb_arch=.*', 'Acceptable')]
#substitutions_1 = [('Deferred: fb_arch=.*', 'Acceptable')]
substitutions_1 = []
init_script_1 = """"""
@ -33,32 +35,32 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
# import os
# import sys
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# def get_fb_arch(a_dsn):
# try:
# con1 = fdb.connect(dsn = a_dsn)
# con2 = fdb.connect(dsn = a_dsn)
#
#
# cur1 = con1.cursor()
#
#
# sql=(
# "select count(distinct a.mon$server_pid), min(a.mon$remote_protocol), max(iif(a.mon$remote_protocol is null,1,0))"
# +" from mon$attachments a"
# +" where a.mon$attachment_id in (%s, %s) or upper(a.mon$user) = upper('%s')"
# % (con1.attachment_id, con2.attachment_id, 'cache writer')
# )
#
#
# cur1.execute(sql)
# for r in cur1.fetchall():
# server_cnt=r[0]
# server_pro=r[1]
# cache_wrtr=r[2]
#
#
# if server_pro == None:
# fba='Embedded'
# elif cache_wrtr == 1:
@ -66,76 +68,91 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# elif server_cnt == 2:
# fba='CS'
# else:
#
#
# f1=con1.db_info(fdb.isc_info_fetches)
#
#
# cur2=con2.cursor()
# cur2.execute('select 1 from rdb$database')
# for r in cur2.fetchall():
# pass
#
#
# f2=con1.db_info(fdb.isc_info_fetches)
#
#
# fba = 'SC' if f1 ==f2 else 'SS'
#
#
# #print(fba, con1.engine_version, con1.version)
# return fba
#
#
# finally:
# con1.close()
# con2.close()
#
#
# fb_arch=get_fb_arch(dsn)
#
#
# if fb_arch == 'SS':
#
#
# txParams = ( [ fdb.isc_tpb_read ] )
# c2 = fdb.connect(dsn=dsn)
#
#
# sql='select mon$page_writes from mon$io_stats where mon$stat_group=0'
# cur=db_conn.cursor()
#
#
# cur.execute(sql)
# for r in cur:
# page_writes_before_test = r[0]
# db_conn.commit()
#
#
# ta=[]
# for i in range(0, 50):
# ta.append( c2.trans( default_tpb = txParams ) )
# ta[i].begin()
#
#
# for i in range(0,len(ta)):
# ta[i].rollback()
#
#
#
#
# cur.execute(sql)
# for r in cur:
# page_writes_after_test = r[0]
#
#
# pw_diff = page_writes_after_test - page_writes_before_test
#
#
# db_conn.commit()
# c2.close()
#
#
# msg = 'Acceptable' if pw_diff == 1 else ('Too big value of page_writes diff: %s' % ( pw_diff ))
# else:
# # FB works NOT in SuperServer. This currently must be SKIPPED from checking (see notes in the ticket).
# msg = 'Deferred: fb_arch=%s' % fb_arch
#
#
# print(msg)
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
Acceptable
"""
act_1 = python_act('db_1', substitutions=substitutions_1)
@pytest.mark.version('>=3.0.2')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action):
tpb = TPB(access_mode=TraAccessMode.READ).get_buffer()
sql = 'select mon$page_writes from mon$io_stats where mon$stat_group=0'
if act_1.get_server_architecture() == 'SS':
with act_1.db.connect() as con:
with act_1.db.connect() as con2:
cur = con.cursor()
page_writes_before_test = cur.execute(sql).fetchone()[0]
con.commit()
#
ta = []
for i in range(50):
tra = con2.transaction_manager(default_tpb=tpb)
tra.begin()
ta.append(tra)
for tra in ta:
tra.rollback()
#
cur = con.cursor()
page_writes_after_test = cur.execute(sql).fetchone()[0]
con.commit()
#
assert page_writes_after_test - page_writes_before_test == 1

View File

@ -2,33 +2,33 @@
#
# id: bugs.core_5435
# title: Badly selective index could be used for extra filtering even if selective index is used for sorting
# decription:
# decription:
# Test creates table and fills it with data like it was specified in the source ticket,
# but query has been CHANGED after discuss with dimitr (see letter 19-jan-2017 08:43):
# instead of 'f01 is null' we use 'f01 = 1' because actually problem that was fixed
# instead of 'f01 is null' we use 'f01 = 1' because actually problem that was fixed
# is NOT related with NULL handling.
# Usage of NULL will hide effect of improvement in optimizer because there is old bug
# in FB from early years which prevent engine from fast navigate on index (i.e. PLAN ORDER)
# when expression is like 'is NULL'.
# ----------------------------------
#
#
# Implementation details: we start trace and run ISQL with query, then stop trace, open its log
# and parse it with seeking lines with 'plan (' and 'fetch(es)'.
# Expected plan:
# PLAN (TEST ORDER TEST_F01_ID) -- confirmed on WI-T4.0.0.503
# and parse it with seeking lines with 'plan (' and 'fetch(es)'.
# Expected plan:
# PLAN (TEST ORDER TEST_F01_ID) -- confirmed on WI-T4.0.0.503
# 0 ms, 3 read(s), 44 fetch(es)
# WRONG (ineffective) plan:
# PLAN (TEST ORDER TEST_F01_ID INDEX (TEST_F02_ONLY)) -- detected on WI-T4.0.0.463
# 21 ms, 115 read(s), 157 fetch(es)
# Value of fetches is compared with threshold (currently = 80) which was received after several runs.
#
#
# tracker_id: CORE-5435
# min_versions: ['3.0.2']
# versions: 3.0.2
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from firebird.qa import db_factory, python_act, Action
# version: 3.0.2
# resources: None
@ -41,40 +41,40 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
# import os
# import subprocess
# import time
# from fdb import services
# from subprocess import Popen
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
# db_file = db_conn.database_name
# # Obtain engine version:
# engine = str(db_conn.engine_version)
#
#
# db_conn.close()
#
#
# FETCHES_THRESHOLD = 80
#
#
# #--------------------------------------------
#
#
# def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -85,28 +85,28 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None
#
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
#
# #--------------------------------------------
#
#
# # Change FW to OFF in order to speed up initial data filling:
# ##################
# fn_nul = open(os.devnull, 'w')
#
#
# subprocess.call([ context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_properties", "prp_write_mode", "prp_wm_async",
# "dbname", db_file ],
# stdout = fn_nul,
# stderr = subprocess.STDOUT
# )
#
#
# fn_nul.close()
#
#
# #####################
# # Prepare table: add data.
#
#
# sql_init='''
# recreate table test
# (
@ -114,7 +114,7 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# f01 int,
# f02 int
# );
#
#
# set term ^;
# create or alter procedure sp_add_init_data(a_rows_to_add int)
# as
@ -131,29 +131,29 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# ^
# set term ^;
# commit;
#
#
# execute procedure sp_add_init_data( 300000 );
# commit;
#
#
# create index test_f01_id on test(f01, id);
# create index test_f02_only on test(f02);
# create index test_f02_only on test(f02);
# commit;
# '''
#
#
# sql_cmd=open(os.path.join(context['temp_directory'],'tmp_core_5435.sql'), 'w')
# sql_cmd.write(sql_init)
# flush_and_close( sql_cmd )
#
#
# sql_log=open(os.path.join(context['temp_directory'],'tmp_core_5435.log'),'w')
# sql_err=open(os.path.join(context['temp_directory'],'tmp_core_5435.err'),'w')
#
#
# subprocess.call([ context['isql_path'], dsn, "-i", sql_cmd.name],stdout=sql_log, stderr=sql_err)
#
#
# flush_and_close( sql_log )
# flush_and_close( sql_err )
#
#
# #########################
#
#
# txt = '''# Generated auto, do not edit!
# database=%[\\\\\\\\/]security?.fdb
# {
@ -168,60 +168,60 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# print_perf = true
# }
# '''
#
#
# f_trc_cfg=open( os.path.join(context['temp_directory'],'tmp_trace_5435.cfg'), 'w')
# f_trc_cfg.write(txt)
# flush_and_close( f_trc_cfg )
#
#
# # ##############################################################
# # S T A R T T R A C E i n S E P A R A T E P R O C E S S
# # ##############################################################
#
#
# f_trc_log=open( os.path.join(context['temp_directory'],'tmp_trace_5435.log'), "w")
# f_trc_err=open( os.path.join(context['temp_directory'],'tmp_trace_5435.err'), "w")
#
#
# p_trace = Popen( [ context['fbsvcmgr_path'], 'localhost:service_mgr', 'action_trace_start' , 'trc_cfg', f_trc_cfg.name],stdout=f_trc_log,stderr=f_trc_err)
#
#
#
#
# sql_run='''
# set list on;
# select count(*) cnt_check
# select count(*) cnt_check
# from (
# select *
# from test -- ############################################################################
# where f01 = 1 and f02=0 -- <<< ::: NB ::: we check here 'f01 = 1' rather than 'f01 is NULL' <<< !!! <<<
# order by f01, id -- ############################################################################
# from test -- ############################################################################
# where f01 = 1 and f02 = 0 -- <<< ::: NB ::: we check here 'f01 = 1' rather than 'f01 is NULL' <<< !!! <<<
# order by f01, id -- ############################################################################
# )
# ;
# '''
#
#
# sql_cmd=open(os.path.join(context['temp_directory'],'tmp_core_5435.sql'), 'w')
#
#
# sql_cmd.write(sql_run)
# flush_and_close( sql_cmd )
#
#
# sql_log=open(os.path.join(context['temp_directory'],'tmp_core_5435.log'),'w')
# sql_err=open(os.path.join(context['temp_directory'],'tmp_core_5435.err'),'w')
#
#
# subprocess.call([ context['isql_path'], dsn, "-i", sql_cmd.name],stdout=sql_log, stderr=sql_err)
#
#
# flush_and_close( sql_log )
# flush_and_close( sql_err )
#
#
# # 01-mar-2021: do NOT remove delay from here.
# # It must be at least 2 seconds, otherwise trace log will not be fulfilled
# # when run on Linux!
# time.sleep(2)
#
#
# # ####################################################
# # G E T A C T I V E T R A C E S E S S I O N I D
# # ####################################################
# # Save active trace session info into file for further parsing it and obtain session_id back (for stop):
#
#
# f_trc_lst = open( os.path.join(context['temp_directory'],'tmp_trace_5435.lst'), 'w')
# subprocess.call([context['fbsvcmgr_path'], 'localhost:service_mgr', 'action_trace_list'], stdout=f_trc_lst)
# flush_and_close( f_trc_lst )
#
#
# trcssn=0
# with open( f_trc_lst.name,'r') as f:
# for line in f:
@ -232,9 +232,9 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# trcssn=word
# i=i+1
# break
#
#
# # Result: `trcssn` is ID of active trace session. Now we have to terminate it:
#
#
# # ####################################################
# # S E N D R E Q U E S T T R A C E T O S T O P
# # ####################################################
@ -244,14 +244,14 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# fn_nul.close()
# else:
# print('Trace session was finished forcedly: could not find its trc_id.')
#
#
# p_trace.terminate()
# flush_and_close( f_trc_log )
# flush_and_close( f_trc_err )
#
#
# run_with_plan=''
# num_of_fetches=-1
#
#
# with open( f_trc_log.name,'r') as f:
# for line in f:
# if line.lower().startswith('plan ('):
@ -261,7 +261,7 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# for k in range(len(words)):
# if words[k].startswith('fetch'):
# num_of_fetches = int( words[k-1] )
#
#
# if run_with_plan and num_of_fetches>0:
# print(run_with_plan)
# print('Number of fetches: acceptable.' if num_of_fetches < FETCHES_THRESHOLD else 'Too much fetches %(num_of_fetches)s: more than threshold = %(FETCHES_THRESHOLD)s' % locals())
@ -274,23 +274,93 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# print(r)
# f.close()
# print('=' * 21)
#
#
# # CLEANUP
# #########
# time.sleep(1)
# cleanup( (f_trc_cfg, f_trc_lst, f_trc_log, f_trc_err, sql_log, sql_err, sql_cmd) )
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
FETCHES_THRESHOLD = 80
expected_stdout_1 = """
PLAN (TEST ORDER TEST_F01_ID)
Number of fetches: acceptable.
"""
"""
async_init_script_1 = """
recreate table test
(
id int not null,
f01 int,
f02 int
);
set term ^;
create or alter procedure sp_add_init_data(a_rows_to_add int)
as
declare n int;
declare i int = 0;
begin
n = a_rows_to_add;
while ( i < n ) do
begin
insert into test(id, f01, f02) values( :i, nullif(mod(:i, :n/20), 0), iif( mod(:i,3)<2, 0, 1) )
returning :i+1 into i;
end
end
^
set term ^;
commit;
execute procedure sp_add_init_data( 300000 );
commit;
create index test_f01_id on test(f01, id);
create index test_f02_only on test(f02);
commit;
"""
test_script_1 = """
set list on;
select count(*) cnt_check
from (
select *
from test -- ############################################################################
where f01 = 1 and f02 = 0 -- <<< ::: NB ::: we check here 'f01 = 1' rather than 'f01 is NULL' <<< !!! <<<
order by f01, id -- ############################################################################
);
"""
trace_1 = ['time_threshold = 0',
'log_initfini = false',
'print_plan = true',
'print_perf = true',
'log_statement_finish = true',
]
@pytest.mark.version('>=3.0.2')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action):
act_1.db.set_async_write()
act_1.isql(switches=[], input=async_init_script_1)
#
with act_1.trace(db_events=trace_1):
act_1.reset()
act_1.isql(switches=[], input=test_script_1)
# Process trace
run_with_plan = ''
num_of_fetches = -1
for line in act_1.trace_log:
if line.lower().startswith('plan ('):
run_with_plan = line.strip()
if 'fetch(es)' in line:
words = line.split()
for k in range(len(words)):
if words[k].startswith('fetch'):
num_of_fetches = int(words[k - 1])
break
# Check
assert run_with_plan == 'PLAN (TEST ORDER TEST_F01_ID)'
assert num_of_fetches < FETCHES_THRESHOLD

View File

@ -2,34 +2,34 @@
#
# id: bugs.core_5441
# title: Cache physical numbers of often used data pages to reduce number of fetches of pointer pages
# decription:
# decription:
# We create table with single field, add several rows and create index.
# Number of these rows must be enough to fit all of them in the single data page.
# Than we do loop and query on every iteration one row, using PLAN INDEX.
# Only _first_ iteration should lead to reading PP (and this requires 1 fetch),
# Only _first_ iteration should lead to reading PP (and this requires 1 fetch),
# but all subseq. must require to read only DP. This should refect in trace as:
# * 4 fetches for 1st statement;
# * 3 fetches for statements starting with 2nd.
# Distinct number of fetches are accumulated in Python dict, and is displayed finally.
# We should have TWO distinct elements in this dict, and numbers in their values must
# differ at (N-1), where N = number of rows in the table.
#
#
# Checked on:
# * WI-T4.0.0.454 (22-nov-2016) -- number of fethes on every iteration is the same and equal to 4;
# * WI-T4.0.0.460 (02-dec-2016) -- only at FIRST iter. fetches=4; for all subsequent loops fetches = 3.
# This proves that lot of PP scans are avoided and necessary data are taken from cache.
#
#
# Also checked on:
# WI-V3.0.2.32677 (SS), WI-V3.0.2.32643 (SC/CS), WI-T4.0.0.519 (SS) -- all fine.
#
#
#
#
# tracker_id: CORE-5441
# min_versions: ['3.0.2']
# versions: 3.0.2
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from firebird.qa import db_factory, python_act, Action
# version: 3.0.2
# resources: None
@ -40,62 +40,62 @@ init_script_1 = """
recreate table test(x int primary key);
commit;
insert into test(x) select r from (
select row_number()over() r
select row_number()over() r
from rdb$types a,rdb$types b
rows 10
)
)
order by rand();
commit;
set term ^;
set term ^;
create procedure sp_test as
declare n int;
declare c int;
begin
declare n int;
declare c int;
begin
n = 10;
while( n > 0 ) do
while( n > 0 ) do
begin
execute statement ( 'select 1 from test where x = ? rows 1' ) ( :n ) into c;
n = n - 1;
end
end^
n = n - 1;
end
end^
set term ;^
commit;
"""
"""
db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
# import os
# import subprocess
# import time
# from fdb import services
# from subprocess import Popen
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# db_conn.close()
#
#
# #--------------------------------------------
#
#
# def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -106,12 +106,12 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None
#
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
#
# #--------------------------------------------
#
#
# txt = '''# Generated auto, do not edit!
# database=%[\\\\\\\\/]security?.fdb
# {
@ -123,37 +123,37 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# time_threshold = 0
# include_filter = "%(select % from test where x = ?)%"
# log_statement_finish = true
#
#
# }
# '''
#
#
# f_trc_cfg=open( os.path.join(context['temp_directory'],'tmp_trace_5441.cfg'), 'w')
# f_trc_cfg.write(txt)
# flush_and_close( f_trc_cfg )
#
#
# # ##############################################################
# # S T A R T T R A C E i n S E P A R A T E P R O C E S S
# # ##############################################################
#
#
# f_trc_log=open( os.path.join(context['temp_directory'],'tmp_trace_5441.log'), "w")
# f_trc_err=open( os.path.join(context['temp_directory'],'tmp_trace_5441.err'), "w")
#
#
# p_trace = Popen( [ context['fbsvcmgr_path'], 'localhost:service_mgr', 'action_trace_start' , 'trc_cfg', f_trc_cfg.name],stdout=f_trc_log,stderr=f_trc_err)
#
#
# time.sleep(1)
#
#
# # ####################################################
# # G E T A C T I V E T R A C E S E S S I O N I D
# # ####################################################
# # Save active trace session info into file for further parsing it and obtain session_id back (for stop):
#
#
# f_trc_lst = open( os.path.join(context['temp_directory'],'tmp_trace_5441.lst'), 'w')
# subprocess.call([context['fbsvcmgr_path'], 'localhost:service_mgr', 'action_trace_list'], stdout=f_trc_lst)
# flush_and_close( f_trc_lst )
#
#
# # !!! DO NOT REMOVE THIS LINE !!!
# # time.sleep(1)
#
#
# trcssn=0
# with open( f_trc_lst.name,'r') as f:
# for line in f:
@ -164,21 +164,21 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# trcssn=word
# i=i+1
# break
#
#
# # Result: `trcssn` is ID of active trace session. Now we have to terminate it:
#
#
# con1 = fdb.connect(dsn=dsn)
# cur1=con1.cursor()
#
#
# #############################
# # R U N T E S T P R O C
# #############################
# cur1.callproc('sp_test')
# con1.close()
#
#
# # Let trace log to be entirely written on disk:
# time.sleep(1)
#
#
# # ####################################################
# # S E N D R E Q U E S T T R A C E T O S T O P
# # ####################################################
@ -186,18 +186,18 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# fn_nul = open(os.devnull, 'w')
# subprocess.call([context['fbsvcmgr_path'], 'localhost:service_mgr', 'action_trace_stop','trc_id', trcssn], stdout=fn_nul)
# fn_nul.close()
#
#
# # 23.02.2021. DELAY FOR AT LEAST 1 SECOND REQUIRED HERE!
# # Otherwise trace log can remain empty.
# time.sleep(2)
#
#
# p_trace.terminate()
# flush_and_close( f_trc_log )
# flush_and_close( f_trc_err )
#
#
# # Following file should be EMPTY:
# ################
#
#
# f_list=(f_trc_err,)
# for i in range(len(f_list)):
# f_name=f_list[i].name
@ -205,9 +205,9 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# with open( f_name,'r') as f:
# for line in f:
# print("Unexpected STDERR, file "+f_name+": "+line)
#
#
# fetches_distinct_amounts={}
#
#
# with open( f_trc_log.name,'r') as f:
# for line in f:
# if 'fetch(es)' in line:
@ -218,26 +218,50 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# fetches_distinct_amounts[ words[k-1] ] = 1
# else:
# fetches_distinct_amounts[ words[k-1] ] += 1
#
#
# for k, v in sorted( fetches_distinct_amounts.items() ):
# print( ''.join( ('fetches=',k) ), 'occured', v, 'times' )
#
#
# # CLEANUP
# #########
# time.sleep(1)
# cleanup( (f_trc_cfg, f_trc_lst, f_trc_log, f_trc_err) )
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """
fetches=3 occured 9 times
fetches=4 occured 1 times
"""
"""
trace_1 = ['time_threshold = 0',
'log_initfini = false',
'log_statement_finish = true',
'include_filter = "%(select % from test where x = ?)%"',
]
@pytest.mark.version('>=3.0.2')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action, capsys):
with act_1.trace(db_events=trace_1), act_1.db.connect() as con:
c = con.cursor()
c.call_procedure('sp_test')
# Process trace
fetches_distinct_amounts = {}
for line in act_1.trace_log:
if 'fetch(es)' in line:
words = line.split()
for k in range(len(words)):
if words[k].startswith('fetch'):
amount = words[k - 1]
if not amount in fetches_distinct_amounts:
fetches_distinct_amounts[amount] = 1
else:
fetches_distinct_amounts[amount] += 1
for k, v in sorted(fetches_distinct_amounts.items()):
print(f'fetches={k} occured {v} times')
# Check
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,23 +2,23 @@
#
# id: bugs.core_5447
# title: EXECUTE STATEMENT <e> when <e> starts with '--' issues -Unexpected ... column <NNN>, value <NNN> is invalid and can change randomly
# decription:
# decription:
# We run EB that is show in the ticket three times, with redirection STDOUT and STDERR to separate files.
# Then we open file of STDERR and parse it: search for lines which contain "-Unexpected end of command" text.
# Extract nineth word from this and add it to Python structure of type = SET.
# Finally, we check that this set:
# 1) all columns are positive integers;
# 2) contains only one element (i.e. all columns have the same value).
#
#
# Checked on 3.0.02.32684, 4.0.0.531 - all fine.
#
#
# tracker_id: CORE-5447
# min_versions: ['3.0.2']
# versions: 3.0.2
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from firebird.qa import db_factory, python_act, Action
# version: 3.0.2
# resources: None
@ -31,47 +31,47 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
# import os
# import sys
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# so=sys.stdout
# se=sys.stderr
#
#
#
#
# f_sql_log=os.path.join(context['temp_directory'],'tmp_c5447.log')
# f_sql_err=os.path.join(context['temp_directory'],'tmp_c5447.err')
#
#
# sys.stdout = open( f_sql_log, 'w')
# sys.stderr = open( f_sql_err, 'w')
#
#
# es_sql='''
# set term ^;
# execute block as
# begin
# execute statement '-- table ''test'' has no trigger, DROP TRIGGER is skipped.';
# execute statement '-- table ''test'' has no trigger, DROP TRIGGER is skipped.';
# end
# ^
# execute block as
# begin
# execute statement '-- table ''test'' has no trigger, DROP TRIGGER is skipped.';
# execute statement '-- table ''test'' has no trigger, DROP TRIGGER is skipped.';
# end
# ^
# execute block as
# begin
# execute statement '-- table ''test'' has no trigger, DROP TRIGGER is skipped.';
# execute statement '-- table ''test'' has no trigger, DROP TRIGGER is skipped.';
# end
# ^
# set term ;^
# '''
# runProgram('isql',[dsn], es_sql)
#
#
# sys.stdout = so
# sys.stderr = se
#
#
# col_set=set()
# with open( f_sql_err, 'r') as f:
# for line in f:
@ -81,25 +81,46 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# col_number=line.split()[8]
# print( 'OK: column is integer > 0' if col_number.isdigit() and str(col_number) > 0 else 'FAIL: column is ZERO, NEGATIVE or NaN' )
# col_set.add( line.split()[8] )
#
#
# print( 'OK: all columns are the same' if len(col_set)==1 else 'FAIL: columns differ or empty set()' )
#
#
# os.remove(f_sql_log)
# os.remove(f_sql_err)
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
OK: column is integer > 0
OK: column is integer > 0
OK: column is integer > 0
OK: all columns are the same
"""
act_1 = python_act('db_1', substitutions=substitutions_1)
test_script_1 = """
set term ^;
execute block as
begin
execute statement '-- table ''test'' has no trigger, DROP TRIGGER is skipped.';
end
^
execute block as
begin
execute statement '-- table ''test'' has no trigger, DROP TRIGGER is skipped.';
end
^
execute block as
begin
execute statement '-- table ''test'' has no trigger, DROP TRIGGER is skipped.';
end
^
set term ;^
"""
@pytest.mark.version('>=3.0.2')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action):
act_1.expected_stderr = "We expect errors"
act_1.isql(switches=[], input=test_script_1)
col_set = set()
for line in act_1.stderr.splitlines():
if '-Unexpected end of command' in line:
# -Unexpected end of command - line 0, column -45949567
# 0 1 2 3 4 5 6 7 8
col_number = line.split()[8]
assert col_number.isdigit() and int(col_number) > 0, "column is ZERO, NEGATIVE or NaN"
col_set.add(col_number)
assert len(col_set) == 1, "columns differ or empty set()"

View File

@ -2,7 +2,7 @@
#
# id: bugs.core_5457
# title: Bugcheck 167 (invalid SEND request)
# decription:
# decription:
# Reproduced on: WI-V3.0.1.32609 - got in firebird.log:
# "internal Firebird consistency check (invalid SEND request (167), file: JrdStatement.cpp line: 325)"
# On client side got:
@ -12,7 +12,7 @@
# - internal Firebird consistency check (can't continue after bugcheck)
# -902
# 335544333
#
#
# Test extracts content of firebird.log, then runs scenario which earlier led to "invalid SEND request (167)"
# and then again get firebird.log for comparing with its previous content.
# The only new record in firebird.log must be:
@ -22,15 +22,18 @@
# FB30SS, build 3.0.4.32988: OK, 3.047s.
# FB40CS, build 4.0.0.955: OK, 4.531s.
# FB40SS, build 4.0.0.1008: OK, 3.141s.
#
#
#
#
# tracker_id: CORE-5457
# min_versions: ['3.0.2']
# versions: 3.0.2
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
import time
import re
from difflib import unified_diff
from firebird.qa import db_factory, python_act, Action
# version: 3.0.2
# resources: None
@ -43,7 +46,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
# import os
# import sys
# import subprocess
@ -51,28 +54,28 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# import re
# import time
# from fdb import services
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
# db_conn.close()
#
#
# #--------------------------------------------
#
#
# def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -83,16 +86,16 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None
#
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
#
# #--------------------------------------------
#
#
# def svc_get_fb_log( f_fb_log ):
#
#
# global subprocess
#
#
# subprocess.call( [ context['fbsvcmgr_path'],
# "localhost:service_mgr",
# "action_get_fb_log"
@ -100,15 +103,15 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stdout=f_fb_log, stderr=subprocess.STDOUT
# )
# return
#
#
# ###########################################################################################
#
#
# f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_5457_fblog_before.txt'), 'w')
# svc_get_fb_log( f_fblog_before )
# flush_and_close( f_fblog_before )
#
#
# con1=fdb.connect(dsn = dsn)
#
#
# sp_test_ddl='''
# create procedure sp_calc_val(a_id int) returns(val int) as
# begin
@ -116,11 +119,11 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# suspend;
# end
# '''
#
#
# test_table_ddl='''
# create table test(
# id int primary key,
# txt varchar(80),
# id int primary key,
# txt varchar(80),
# calc_val computed by ( (select val from sp_calc_val(test.id) ) )
# )
# '''
@ -128,82 +131,132 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# con1.commit()
# con1.execute_immediate(test_table_ddl)
# con1.commit()
#
#
# cur1=con1.cursor()
# cur1.execute('insert into test select row_number()over(), ascii_char( 96+row_number()over() ) from rdb$types rows 7')
# con1.commit()
#
#
# cur1.execute('select count(*), sum(calc_val) from test')
# for r in cur1:
# pass;
#
#
#
#
# sp_alter_ddl='''
# alter procedure sp_calc_val (a_id int) returns (val int) as
# begin
# val = a_id * 7;
# suspend;
# alter procedure sp_calc_val (a_id int) returns (val int) as
# begin
# val = a_id * 7;
# suspend;
# end
# '''
#
#
# con1.execute_immediate(sp_alter_ddl)
#
#
# cur1.execute('select count(*), sum(calc_val) from test')
# for r in cur1:
# pass;
# con1.commit()
#
#
# cur1.execute('select count(*), sum(calc_val) from test')
# for r in cur1:
# pass;
#
#
# time.sleep(1)
#
#
# f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_5457_fblog_after.txt'), 'w')
# svc_get_fb_log( f_fblog_after )
# flush_and_close( f_fblog_after )
#
#
#
#
# # Compare firebird.log versions BEFORE and AFTER this test:
# ######################
#
#
# oldfb=open(f_fblog_before.name, 'r')
# newfb=open(f_fblog_after.name, 'r')
#
#
# difftext = ''.join(difflib.unified_diff(
# oldfb.readlines(),
# oldfb.readlines(),
# newfb.readlines()
# ))
# oldfb.close()
# newfb.close()
#
#
# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_5457_diff.txt'), 'w')
# f_diff_txt.write(difftext)
# flush_and_close( f_diff_txt )
#
#
# unexpected_patterns =(
# re.compile('\\s+internal\\s+Firebird\\s+consistency\\s+check', re.IGNORECASE),
# )
#
#
# with open( f_diff_txt.name,'r') as f:
# for line in f:
# if line.startswith('+'):
# match2some = filter( None, [ p.search(line) for p in unexpected_patterns ] )
# if match2some:
# print( 'UNEXPECTED TEXT IN FIREBIRD.LOG: ' + (' '.join(line.split()).upper()) )
#
#
# # Cleanup:
# ##########
# time.sleep(1)
# cleanup( (f_diff_txt,f_fblog_before,f_fblog_after) )
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
@pytest.mark.version('>=3.0.2')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action, capsys):
log_before = act_1.get_firebird_log()
#
with act_1.db.connect() as con:
sp_test_ddl = """
create procedure sp_calc_val(a_id int) returns(val int) as
begin
val = a_id * 10;
suspend;
end
"""
con.execute_immediate(sp_test_ddl)
con.commit()
test_table_ddl = """
create table test(
id int primary key,
txt varchar(80),
calc_val computed by ((select val from sp_calc_val(test.id)))
)
"""
con.execute_immediate(test_table_ddl)
con.commit()
#
c = con.cursor()
c.execute('insert into test select row_number()over(), ascii_char(96 + row_number()over()) from rdb$types rows 7')
con.commit()
#
c.execute('select count(*), sum(calc_val) from test').fetchall()
#
sp_alter_ddl = """
alter procedure sp_calc_val (a_id int) returns (val int) as
begin
val = a_id * 7;
suspend;
end
"""
con.execute_immediate(sp_alter_ddl)
c.execute('select count(*), sum(calc_val) from test').fetchall()
con.commit()
c.execute('select count(*), sum(calc_val) from test').fetchall()
#
time.sleep(1)
#
log_after = act_1.get_firebird_log()
unexpected_patterns = [re.compile('\\s+internal\\s+Firebird\\s+consistency\\s+check', re.IGNORECASE)]
for line in unified_diff(log_before, log_after):
if line.startswith('+'):
match2some = list(filter(None, [p.search(line) for p in unexpected_patterns]))
if match2some:
print(f'UNEXPECTED: {line}')
#
act_1.expected_stdout = ''
act_1.stdout = capsys.readouterr().out
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,11 +2,11 @@
#
# id: bugs.core_5464
# title: AV in fbclient when reading blob stored in incompatible encoding
# decription:
# decription:
# Reproduced crash of isql on WI-T4.0.0.463
# (standard message appeared with text about program that is to be closed).
# Checked on 3.0.2.32677, 4.0.0.519 - works fine.
#
#
# tracker_id: CORE-5464
# min_versions: ['3.0.2']
# versions: 3.0.2
@ -24,8 +24,8 @@ init_script_1 = """
create domain d_int int;
comment on domain d_int is
'*Лев Николаевич Толстой * *Анна Каренина * /Мне отмщение, и аз воздам/ *ЧАСТЬ ПЕРВАЯ* *I *
Все счастливые семьи похожи друг на друга, каждая несчастливая
семья несчастлива по-своему.
Все счастливые семьи похожи друг на друга, каждая несчастливая
семья несчастлива по-своему.
Все смешалось в доме Облонских. Жена узнала, что муж был в связи
с бывшею в их доме француженкою-гувернанткой, и объявила мужу, что
не может жить с ним в одном доме. Положение это продолжалось уже
@ -34,24 +34,24 @@ init_script_1 = """
чувствовали, что нет смысла в их сожительстве и что на каждом
п1
';
commit;
"""
commit;
"""
db_1 = db_factory(charset='WIN1251', sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
# import os
# import sys
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
# db_conn.close()
#
#
# so=sys.stdout
# se=sys.stderr
#
#
# fn_log = os.path.join(context['temp_directory'],'tmp_c5464.log')
# fn_err = os.path.join(context['temp_directory'],'tmp_c5464.err')
# sys.stdout = open( fn_log, 'w')
@ -61,49 +61,67 @@ db_1 = db_factory(charset='WIN1251', sql_dialect=3, init=init_script_1)
# connect '%s' user '%s' password '%s';
# set blob all;
# set list on;
#
#
# select c.rdb$character_set_name as connection_cset, r.rdb$character_set_name as db_default_cset
# from mon$attachments a
# join rdb$character_sets c on a.mon$character_set_id = c.rdb$character_set_id
# cross join rdb$database r where a.mon$attachment_id=current_connection;
#
# select rdb$field_name, rdb$system_flag, rdb$description
# from rdb$fields where rdb$description is not null;
#
# select rdb$field_name, rdb$system_flag, rdb$description
# from rdb$fields where rdb$description is not null;
# ''' % ( dsn, user_name, user_password )
#
#
# runProgram('isql',['-q'],sql)
#
#
# sys.stdout = so
# sys.stderr = se
#
#
# with open( fn_log,'r') as f:
# for line in f:
# line=line.replace('SQL> ', '').replace('CON> ', '').rstrip()
# if line.split():
# print(line)
#
#
# with open( fn_err,'r') as f:
# for line in f:
# if line.split():
# print(line)
#
#
# os.remove(fn_log)
# os.remove(fn_err)
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
test_script_1 = """
set blob all;
set list on;
select c.rdb$character_set_name as connection_cset, r.rdb$character_set_name as db_default_cset
from mon$attachments a
join rdb$character_sets c on a.mon$character_set_id = c.rdb$character_set_id
cross join rdb$database r where a.mon$attachment_id=current_connection;
select rdb$field_name, rdb$system_flag, rdb$description
from rdb$fields where rdb$description is not null;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
CONNECTION_CSET WIN1250
DB_DEFAULT_CSET WIN1251
"""
expected_stderr_1 = """
Statement failed, SQLSTATE = 22018
Cannot transliterate character between character sets
"""
Cannot transliterate character between character sets
"""
@pytest.mark.version('>=3.0.2')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action):
act_1.expected_stderr = expected_stderr_1
act_1.expected_stdout = expected_stdout_1
act_1.isql(switches=['-q'], input=test_script_1, charset='WIN1250')
assert act_1.clean_stderr == act_1.clean_expected_stderr
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,21 +2,21 @@
#
# id: bugs.core_5470
# title: Trace INCLUDE_FILTER with [[:WHITESPACE:]]+ does not work when statement contains newline is issued
# decription:
# We create a list of several DDLs which all contain NEWLINE character(s) between jeyword and name of DB object.
# decription:
# We create a list of several DDLs which all contain NEWLINE character(s) between keyword and name of DB object.
# Then we launch trace session and execute all these DDLs.
# Finally we check whether trace log contains every DDL or not.
# Expected result: text of every DDL should be FOUND in the trace log.
#
#
# Checked on 2.5.7.27048, 3.0.2.32685, 4.0.0.531 - all fine.
#
#
# tracker_id: CORE-5470
# min_versions: ['2.5.7']
# versions: 2.5.7
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from firebird.qa import db_factory, python_act, Action
# version: 2.5.7
# resources: None
@ -29,38 +29,38 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
# import os
# import subprocess
# import time
# from fdb import services
# from subprocess import Popen
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# # Obtain engine version:
# engine = str(db_conn.engine_version) # convert to text because 'float' object has no attribute 'startswith'
#
#
# db_conn.close()
#
#
# #--------------------------------------------
#
#
# def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -71,19 +71,19 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None
#
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
#
# #--------------------------------------------
#
#
# if engine.startswith('2.5'):
# fb_home = fb_home + 'bin'+os.sep
# txt = '''# Generated auto, do not edit!
# <database %[\\\\\\\\/]security?.fdb>
# enabled false
# </database>
#
#
# <database %[\\\\\\\\/]bugs.core_5470.fdb>
# enabled true
# time_threshold 0
@ -108,31 +108,31 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# include_filter = "%(recreate|create|alter|drop|comment on)[[:WHITESPACE:]]+(domain|generator|sequence|exception|procedure|function|table|index|view|trigger|role|filter|external function)%"
# }
# '''
#
#
# f_trc_cfg=open( os.path.join(context['temp_directory'],'tmp_trc_5470.cfg'), 'w')
# f_trc_cfg.write(txt)
# flush_and_close( f_trc_cfg )
#
#
# # ##############################################################
# # S T A R T T R A C E i n S E P A R A T E P R O C E S S
# # ##############################################################
#
#
# f_trc_log=open( os.path.join(context['temp_directory'],'tmp_trc_5470.log'), "w")
# f_trc_err=open( os.path.join(context['temp_directory'],'tmp_trc_5470.err'), "w")
#
#
# p_trace = Popen( [ context['fbsvcmgr_path'], 'localhost:service_mgr', 'action_trace_start' , 'trc_cfg', f_trc_cfg.name],stdout=f_trc_log,stderr=f_trc_err)
#
#
# time.sleep(1)
#
#
# # ####################################################
# # G E T A C T I V E T R A C E S E S S I O N I D
# # ####################################################
# # Save active trace session info into file for further parsing it and obtain session_id back (for stop):
#
#
# f_trc_lst = open( os.path.join(context['temp_directory'],'tmp_trace_5470.lst'), 'w')
# subprocess.call([context['fbsvcmgr_path'], 'localhost:service_mgr', 'action_trace_list'], stdout=f_trc_lst)
# flush_and_close( f_trc_lst )
#
#
# trcssn=0
# with open( f_trc_lst.name,'r') as f:
# for line in f:
@ -144,52 +144,52 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# i=i+1
# break
# # Result: `trcssn` is ID of active trace session. Now we have to terminate it:
#
#
# ddl_lst=(
# '''recreate
#
# table
#
#
#
# t_test(x int)
# '''recreate
#
# table
#
#
#
# t_test(x int)
# ''',
# '''comment on
# table
#
#
# t_test is
# '''comment on
# table
#
#
# t_test is
# 'foo
# bar'
# bar'
# ''',
# '''
#
#
# create
# or
#
#
# alter
# view
#
#
# v_rio
#
#
# as
# select *
# from
#
#
# rdb$database
# '''
# )
#
#
# con1 = fdb.connect(dsn=dsn)
# for s in ddl_lst:
# con1.execute_immediate(s)
#
#
# con1.commit()
# con1.close()
#
#
# # Let trace log to be entirely written on disk:
# time.sleep(2)
#
#
# # ####################################################
# # S E N D R E Q U E S T T R A C E T O S T O P
# # ####################################################
@ -199,14 +199,14 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# fn_nul.close()
# # # DO NOT REMOVE THIS LINE:
# # time.sleep(2)
#
#
# p_trace.terminate()
# flush_and_close( f_trc_log )
# flush_and_close( f_trc_err )
#
#
# # Following file should be EMPTY:
# ################
#
#
# f_list=(f_trc_err,)
# for i in range(len(f_list)):
# f_name=f_list[i].name
@ -214,29 +214,68 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# with open( f_name,'r') as f:
# for line in f:
# print("Unexpected STDERR, file "+f_name+": "+line)
#
#
# with open( f_trc_log.name,'r') as f:
# lines=f.read()
# for s in ddl_lst:
# print( 'FOUND' if lines.find(s) > 0 else 'NOT found' )
#
#
# # CLEANUP
# #########
# time.sleep(1)
# cleanup( (f_trc_cfg, f_trc_lst, f_trc_log, f_trc_err) )
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
FOUND
FOUND
FOUND
"""
act_1 = python_act('db_1', substitutions=substitutions_1)
ddl_lst = ["""recreate
table
t_test(x int)
""",
"""comment on
table
t_test is
'foo
bar'
""",
"""
create
or
alter
view
v_rio
as
select *
from
rdb$database
"""]
trace_1 = ['time_threshold = 0',
'log_initfini = false',
'log_errors = true',
'log_statement_finish = true',
'include_filter = "%(recreate|create|alter|drop|comment on)[[:WHITESPACE:]]+(domain|generator|sequence|exception|procedure|function|table|index|view|trigger|role|filter|external function)%"',
]
@pytest.mark.version('>=2.5.7')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action):
with act_1.trace(db_events=trace_1), act_1.db.connect() as con:
for cmd in ddl_lst:
con.execute_immediate(cmd)
con.commit()
# Check
act_1.trace_to_stdout()
for cmd in ddl_lst:
assert act_1.stdout.find(cmd) > 0

View File

@ -2,18 +2,18 @@
#
# id: bugs.core_5475
# title: Provide ability to filter out info and warnings from trace log
# decription:
# decription:
# For FB 2.5 we verify that one may to suppress messages like TRACE_INIT/FINI ("log_initfini false")
# Also, by default no more warnings should appear in the trace.
#
#
# For FB 3.0 and above we check that trace log contains only those gdscodes which we expect.
# Also, trace should NOT contain gdscodes which we explicitly prohibit or which we do not expect
# Also, trace should NOT contain gdscodes which we explicitly prohibit or which we do not expect
# because not empty list in 'include_gds_codes' parameter.
# Also, we verify that no more TRACE_INIT / TRACE_FINI in trace because log_initfini=false.
#
#
# Checked on 2.5.7.27048, 3.0.2.32685, 4.0.0.531 -- all fine.
#
# 01.04.2020. Up to 4.0.0.1820 (~29.03.2020, including) cursor.execute() with SELECT
#
# 01.04.2020. Up to 4.0.0.1820 (~29.03.2020, including) cursor.execute() with SELECT
# statement did NOT actually fetch record until we explicitly do futher actions, e.g:
# for r in cur:
# print(r[0])
@ -23,9 +23,9 @@
# that we specify in the trace config (i.e. include_gdscodes or exclude_gdscodes).
# It was decided to replace selectable cursor with DML and use value that will violates limits
# of Firebird implementation (timestamp field, assign value = '01.01.0000' - it can not be used in FB).
#
#
# See letter from Vlad, 31-mar-2020 14:29.
#
#
# Checked after refactoring (01.04.2020) on:
# 4.0.0.1850 SS: 9.750s.
# 4.0.0.1820 SC: 9.703s.
@ -33,14 +33,15 @@
# 3.0.6.33273 SS: 8.235s.
# 3.0.6.33240 SC: 8.016s.
# 3.0.6.33247 CS: 10.563s.
#
#
# tracker_id: CORE-5475
# min_versions: ['2.5.7']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from firebird.qa import db_factory, python_act, Action
from firebird.driver import DatabaseError, FirebirdWarning
# version: 3.0
# resources: None
@ -50,50 +51,50 @@ substitutions_1 = []
init_script_1 = """
recreate table test(
id int generated by default as identity constraint test_pk primary key using index test_id,
tiny_num smallint,
tiny_num smallint,
dts timestamp,
s varchar(4) -- added 01.04.2020
);
commit;
insert into test(id, tiny_num, dts) values(1,32767, null);
commit;
"""
"""
db_1 = db_factory(sql_dialect=1, init=init_script_1)
# test_script_1
#---
#
#
# import os
# import subprocess
# import time
# from fdb import services
# from subprocess import Popen
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# # Obtain engine version:
# engine = str(db_conn.engine_version) # convert to text because 'float' object has no attribute 'startswith'
# db_conn.close()
#
#
# #--------------------------------------------
#
#
# def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -104,13 +105,13 @@ db_1 = db_factory(sql_dialect=1, init=init_script_1)
# else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None
#
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
#
# #--------------------------------------------
#
#
#
#
# txt = '''# Generated auto, do not edit!
# database=%[\\\\\\\\/]security?.fdb
# {
@ -124,53 +125,53 @@ db_1 = db_factory(sql_dialect=1, init=init_script_1)
# log_initfini = false
# log_errors = true
# log_warnings = true
#
#
# log_statement_prepare = true
# log_statement_finish = true
#
# # 1. We EXPLICITLY ALLOW to be logged (and EXPECT because of current test scenario):
# # 1.1) any kind of "arithmetic exc, num ovf, or string trunc"
#
# # 1. We EXPLICITLY ALLOW to be logged (and EXPECT because of current test scenario):
# # 1.1) any kind of "arithmetic exc, num ovf, or string trunc"
# # 1.2) PK/UK violation
# # 1.3) (warning) 336068855 : Warning: ALL on ALL is not granted to <...>.
# include_gds_codes = arith_except,unique_key_violation,336068855
#
#
# # We EXPLICITLY PREVENT from logging:
# # 2.1 "FP divide by zero",
# # 2.2 "Integer divide by zero" and
# # 2.2 "Integer divide by zero" and
# # 2.3 (warning) "DATE data type is now called TIMESTAMP" (warning)
# exclude_gds_codes = 335544772,335544778,335544808
#
#
# # We do not prevent (explicitly) but DO NOT expect see in trace following errors:
# # 1) 335544347 : validation error for column ..., value "*** null ***"
# # 2) 335544913 : value exceeds the range for valid timestamps
#
#
# }
# '''
#
#
# f_trc_cfg=open( os.path.join(context['temp_directory'],'tmp_trace_5475.cfg'), 'w')
# f_trc_cfg.write(txt)
# flush_and_close( f_trc_cfg )
#
#
# # ##############################################################
# # S T A R T T R A C E i n S E P A R A T E P R O C E S S
# # ##############################################################
#
#
# f_trc_log=open( os.path.join(context['temp_directory'],'tmp_trace_5475.log'), "w")
# f_trc_err=open( os.path.join(context['temp_directory'],'tmp_trace_5475.err'), "w")
#
#
# p_trace = Popen( [ context['fbsvcmgr_path'], 'localhost:service_mgr', 'action_trace_start' , 'trc_cfg', f_trc_cfg.name],stdout=f_trc_log,stderr=f_trc_err)
#
#
# time.sleep(1)
#
#
# # ####################################################
# # G E T A C T I V E T R A C E S E S S I O N I D
# # ####################################################
# # Save active trace session info into file for further parsing it and obtain session_id back (for stop):
#
#
# f_trc_lst = open( os.path.join(context['temp_directory'],'tmp_trace_5475.lst'), 'w')
# subprocess.call([context['fbsvcmgr_path'], 'localhost:service_mgr', 'action_trace_list'], stdout=f_trc_lst)
# flush_and_close( f_trc_lst )
#
#
# trcssn=0
# with open( f_trc_lst.name,'r') as f:
# for line in f:
@ -182,8 +183,8 @@ db_1 = db_factory(sql_dialect=1, init=init_script_1)
# i=i+1
# break
# # Result: `trcssn` is ID of active trace session. Now we have to terminate it:
#
#
#
#
# con1 = fdb.connect(dsn=dsn)
# cur1 = con1.cursor()
# try:
@ -194,8 +195,8 @@ db_1 = db_factory(sql_dialect=1, init=init_script_1)
# except Exception as e:
# print(e[0])
# con1.close()
#
#
#
#
# con2 = fdb.connect(dsn=dsn)
# cur2 = con2.cursor()
# try:
@ -206,8 +207,8 @@ db_1 = db_factory(sql_dialect=1, init=init_script_1)
# except Exception as e:
# print(e[0])
# con2.close()
#
#
#
#
# con3 = fdb.connect(dsn=dsn)
# cur3 = con3.cursor()
# try:
@ -219,15 +220,15 @@ db_1 = db_factory(sql_dialect=1, init=init_script_1)
# except Exception as e:
# print(e[0])
# con3.close()
#
#
#
#
# con4 = fdb.connect(dsn=dsn)
# cur4 = con4.cursor()
# try:
# #############################################################
# ### NB ### CHANGED 01.04.2020 BECAUSE OF NEW BEHAVIOUR IN 4.0
# #############################################################
#
#
# # Up to 4.0.0.1820 cursor.execute() with SELECT statement did NOT fetch record until
# # until we explicitly do futher actions, i.e.: for r in cur4: / print(r[0]) etc
# # Trace log did NOT contain any of 335544914 / 335545033 exactly because of ***THIS***
@ -235,29 +236,29 @@ db_1 = db_factory(sql_dialect=1, init=init_script_1)
# # See letter from Vlad, 31-mar-2020 14:29.
# ##########################################
# # !! USELESS !! DO NOT USE IT >>> cur4.execute( "select cast('qwerty' as varchar(5)) from rdb$database" )
#
#
# # Decided to try to append into table value with timestamp that is out of avaliable range: 01.01.0000.
# # This action raises gdscode = 335544913 which does not belong NEITHER to include* not to exclude* lists.
# # For this reason such gdscode should NOT appear in the trace - it will bechecked at the end of this test.
# cur4.execute( "insert into test( dts ) values( '01.01.0000' )" )
#
# cur4.execute( "insert into test( dts ) values( '01.01.0000' )" )
#
# except Exception as e:
# print(e[0])
# con4.close()
#
#
#
#
# con5 = fdb.connect(dsn=dsn, sql_dialect=1)
# cur5 = con5.cursor()
# try:
# # Leads to WARNING in trace:
# # 335544807 : SQL warning code = 301
# # 335544808 : DATE data type is now called TIMESTAMP
# cur5.execute( "update test set dts = cast('now' as date)" )
# cur5.execute( "update test set dts = cast('now' as date)" )
# except Exception as e:
# print(e[0])
# con5.close()
#
#
#
#
# con6 = fdb.connect(dsn=dsn)
# cur6 = con6.cursor()
# try:
@ -269,32 +270,32 @@ db_1 = db_factory(sql_dialect=1, init=init_script_1)
# except Exception as e:
# print(e[0])
# con6.close()
#
#
#
#
# con7 = fdb.connect(dsn=dsn)
# # Should raise:
# # (warning) 336068855 : Warning: ALL on ALL is not granted to <...>.
# con7.execute_immediate('revoke all on all from tmp$c5475')
# con7.close()
#
#
# # 1. We ALLOW to be logged:
# # 1.1) any kind of "arithmetic exc, num ovf, or string trunc"
#
#
# # 1. We ALLOW to be logged:
# # 1.1) any kind of "arithmetic exc, num ovf, or string trunc"
# # 1.2) PK/UK violation
# # 1.3) (warning) 336068855 : Warning: ALL on ALL is not granted to <...>.
# #include_gds_codes = arith_except,no_dup,336068855
#
#
# # We PREVENT from logging:
# # 2.1 "FP divide by zero",
# # 2.2 "Integer divide by zero" and
# # 2.2 "Integer divide by zero" and
# # 2.3 (warning) "DATE data type is now called TIMESTAMP" (warning)
# #exclude_gds_codes = 335544772,335544778,335544808
#
#
#
#
# # Let trace log to be entirely written on disk:
# time.sleep(1)
#
#
#
#
# # ####################################################
# # S E N D R E Q U E S T T R A C E T O S T O P
# # ####################################################
@ -304,14 +305,14 @@ db_1 = db_factory(sql_dialect=1, init=init_script_1)
# fn_nul.close()
# # DO NOT REMOVE THIS LINE:
# time.sleep(1)
#
#
# p_trace.terminate()
# flush_and_close( f_trc_log )
# flush_and_close( f_trc_err )
#
#
# # Following file should be EMPTY:
# ################
#
#
# f_list=(f_trc_err,)
# for i in range(len(f_list)):
# f_name=f_list[i].name
@ -319,25 +320,25 @@ db_1 = db_factory(sql_dialect=1, init=init_script_1)
# for line in f:
# if line.split():
# print("Unexpected STDERR, file "+f_name+": "+line)
#
#
# # 1. We ALLOW to be logged (and EXPECT because of current test scenario):
#
#
# # 1. We ALLOW to be logged (and EXPECT because of current test scenario):
# # 1.1) "arithmetic exc" caused by numeric overflow // 335544321, 335544916
# # 1.2) PK/UK violation // 335544665
# # 1.3) ALL on ALL is not granted to <...> // 336068855 - warning
# #include_gds_codes = arith_except,unique_key_violation,336068855
#
#
# # We PREVENT from logging:
# # 2.1 "FP divide by zero" // 335544772 - occurs when dialect=1
# # 2.2 "Integer divide by zero" // 335544778 - occurs when dialect=3
# # 2.3 (warning) "DATE data type is now called TIMESTAMP" // 335544808 - warning
# #exclude_gds_codes = 335544772,335544778,335544808
#
#
# # we do not prevent but do not expect to be logged:
# # 335544347 : validation error for column ..., value "*** null ***"
# # 335544914 : string right truncation (plus 335545033 : expected length 5, actual 6)
#
#
#
#
# # Following gdscodes MUST BE found in the trace log:
# # ~~~~~~~~~~~~~~~~~~~~~~~~~~
# missed_expected_token = 'ERROR: missed expected token which MUST be in the trace log'
@ -347,7 +348,7 @@ db_1 = db_factory(sql_dialect=1, init=init_script_1)
# ,'335544665' : missed_expected_token
# ,'336068855' : missed_expected_token
# }
#
#
# # Following gdscodes must NOT present on trace log because we explicitly PROHIBIT them:
# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~
# not_found_explicitly_prohibited = 'SUCCESS: no tokens which are explicitly prohibited in exclude_gds_codes list'
@ -358,7 +359,7 @@ db_1 = db_factory(sql_dialect=1, init=init_script_1)
# ,'TRACE_INIT' : not_found_explicitly_prohibited
# ,'TRACE_FINI' : not_found_explicitly_prohibited
# }
#
#
# # Following gdscodes are UNEXPECTED, i.e. we did not prohibit them explicitly
# # but none of them are in include_gds_codes ==> they should NOT appear in the trace:
# not_found_implicitly_prohibited = 'SUCCESS: no tokens which are not in include_gds_codes list.'
@ -366,8 +367,8 @@ db_1 = db_factory(sql_dialect=1, init=init_script_1)
# '335544347' : not_found_implicitly_prohibited
# ,'335544913' : not_found_implicitly_prohibited
# }
#
#
#
#
# with open( f_trc_log.name,'r') as f:
# for line in f:
# if line.split():
@ -378,49 +379,51 @@ db_1 = db_factory(sql_dialect=1, init=init_script_1)
# prohibit_set[ starting_word ] = 'ERROR: found token which is EXPLICITLY prohibited in exclude_gds_codes list.'
# elif starting_word in unexpect_set:
# unexpect_set[ starting_word ] = 'ERROR: found token NOT from include_gdscodes list (implicitly prohibited).'
#
#
# for k,v in sorted(expected_set.items()):
# print('expected_token:', k, '; trace search result:', v)
#
#
# for k,v in sorted(prohibit_set.items()):
# print('prohibit_token:', k, '; trace search result:', v)
#
#
# for k,v in sorted(unexpect_set.items()):
# print('unexpect_token:', k, '; trace search result:', v)
#
#
#
#
# # CLEANUP
# #########
# time.sleep(1)
# cleanup( (f_trc_cfg, f_trc_lst, f_trc_log, f_trc_err) )
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
# Following gdscodes MUST BE found in the trace log:
MISSED_EXPECTED_TOKEN = 'ERROR: missed expected token which MUST be in the trace log'
EXPECTED_SET = {'335544321' : MISSED_EXPECTED_TOKEN,
'335544916' : MISSED_EXPECTED_TOKEN,
'335544665' : MISSED_EXPECTED_TOKEN,
'336068855' : MISSED_EXPECTED_TOKEN
}
# Following gdscodes must NOT present on trace log because we explicitly PROHIBIT them:
NOT_FOUND_EXPLICITLY_PROHIBITED = 'SUCCESS: no tokens which are explicitly prohibited in exclude_gds_codes list'
PROHIBIT_SET = {'335544772' : NOT_FOUND_EXPLICITLY_PROHIBITED,
'335544778' : NOT_FOUND_EXPLICITLY_PROHIBITED,
'335544808' : NOT_FOUND_EXPLICITLY_PROHIBITED,
'TRACE_INIT' : NOT_FOUND_EXPLICITLY_PROHIBITED,
'TRACE_FINI' : NOT_FOUND_EXPLICITLY_PROHIBITED
}
# Following gdscodes are UNEXPECTED, i.e. we did not prohibit them explicitly
# but none of them are in include_gds_codes ==> they should NOT appear in the trace:
NOT_FOUND_IMPLICITLY_PROHIBITED = 'SUCCESS: no tokens which are not in include_gds_codes list.'
UNEXPECT_SET = {'335544347' : NOT_FOUND_IMPLICITLY_PROHIBITED,
'335544913' : NOT_FOUND_IMPLICITLY_PROHIBITED
}
expected_stdout_1 = """
Error while executing SQL statement:
- SQLCODE: -802
- arithmetic exception, numeric overflow, or string truncation
- Integer divide by zero. The code attempted to divide an integer value by an integer divisor of zero.
Error while executing SQL statement:
- SQLCODE: -803
- violation of PRIMARY or UNIQUE KEY constraint "TEST_PK" on table "TEST"
- Problematic key value is ("ID" = 1)
Error while executing SQL statement:
- SQLCODE: -802
- arithmetic exception, numeric overflow, or string truncation
- numeric value is out of range
Error while executing SQL statement:
- SQLCODE: -833
- value exceeds the range for valid timestamps
Error while executing SQL statement:
- SQLCODE: -625
- validation error for column "TEST"."ID", value "*** null ***"
expected_token: 335544321 ; trace search result: SUCCESS: found expected token which MUST be in the trace log.
expected_token: 335544665 ; trace search result: SUCCESS: found expected token which MUST be in the trace log.
expected_token: 335544916 ; trace search result: SUCCESS: found expected token which MUST be in the trace log.
@ -432,11 +435,88 @@ expected_stdout_1 = """
prohibit_token: TRACE_INIT ; trace search result: SUCCESS: no tokens which are explicitly prohibited in exclude_gds_codes list
unexpect_token: 335544347 ; trace search result: SUCCESS: no tokens which are not in include_gds_codes list.
unexpect_token: 335544913 ; trace search result: SUCCESS: no tokens which are not in include_gds_codes list.
"""
"""
trace_1 = ['time_threshold = 0',
'log_initfini = false',
'log_connections = true',
'log_errors = true',
'log_warnings = true',
'log_statement_prepare = true',
'log_statement_finish = true',
# 1. We EXPLICITLY ALLOW to be logged (and EXPECT because of current test scenario):
# 1.1) any kind of "arithmetic exc, num ovf, or string trunc"
# 1.2) PK/UK violation
# 1.3) (warning) 336068855 : Warning: ALL on ALL is not granted to <...>.
'include_gds_codes = arith_except,unique_key_violation,336068855',
# We EXPLICITLY PREVENT from logging:
# 2.1 "FP divide by zero",
# 2.2 "Integer divide by zero" and
# 2.3 (warning) "DATE data type is now called TIMESTAMP" (warning)
'exclude_gds_codes = 335544772,335544778,335544808',
# We do not prevent (explicitly) but DO NOT expect see in trace following errors:
# 1) 335544347 : validation error for column ..., value "*** null ***"
# 2) 335544913 : value exceeds the range for valid timestamps
]
@pytest.mark.version('>=3.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action, capsys):
with act_1.trace(db_events=trace_1):
with act_1.db.connect(sql_dialect=1) as con:
c = con.cursor()
with pytest.raises(DatabaseError, match='.*Floating-point divide by zero.*'):
c.execute('insert into test(tiny_num) values(1/0)')
#
with act_1.db.connect(sql_dialect=1) as con:
c = con.cursor()
with pytest.raises(DatabaseError, match='.*violation of PRIMARY or UNIQUE KEY constraint "TEST_PK" on table "TEST".*'):
c.execute('insert into test(id,tiny_num) values(?, ?)', [1, 1])
#
with act_1.db.connect(sql_dialect=1) as con:
c = con.cursor()
with pytest.raises(DatabaseError, match='.*numeric value is out of range.*'):
c.execute('update test set tiny_num = tiny_num + 1')
#
with act_1.db.connect(sql_dialect=1) as con:
c = con.cursor()
with pytest.raises(DatabaseError, match='.*value exceeds the range for valid timestamps.*'):
c.execute("insert into test(dts) values('01.01.0000')" )
#
with act_1.db.connect(sql_dialect=1) as con:
c = con.cursor()
with pytest.warns(FirebirdWarning, match='.*DATE data type is now called TIMESTAMP.*'):
# Leads to WARNING in trace:
# 335544807 : SQL warning code = 301
# 335544808 : DATE data type is now called TIMESTAMP
c.execute("update test set dts = cast('now' as date)")
#
with act_1.db.connect(sql_dialect=1) as con:
c = con.cursor()
with pytest.raises(DatabaseError, match='.*validation error for column.*'):
c.execute('insert into test(id) values(null)')
#
with act_1.db.connect(sql_dialect=1) as con:
with pytest.warns(FirebirdWarning, match='.*ALL on ALL is not granted.*'):
# (warning) 336068855 : Warning: ALL on ALL is not granted to <...>.
con.execute_immediate('revoke all on all from tmp$c5475')
# Process trace
for line in act_1.trace_log:
if line.split():
starting_word = line.split()[0]
if starting_word in EXPECTED_SET:
EXPECTED_SET[starting_word] = 'SUCCESS: found expected token which MUST be in the trace log.'
elif starting_word in PROHIBIT_SET:
PROHIBIT_SET[starting_word] = 'ERROR: found token which is EXPLICITLY prohibited in exclude_gds_codes list.'
elif starting_word in UNEXPECT_SET:
UNEXPECT_SET[starting_word] = 'ERROR: found token NOT from include_gdscodes list (implicitly prohibited).'
#
for k,v in sorted(EXPECTED_SET.items()):
print(f'expected_token: {k} ; trace search result: {v}')
for k,v in sorted(PROHIBIT_SET.items()):
print(f'prohibit_token: {k} ; trace search result: {v}')
for k,v in sorted(UNEXPECT_SET.items()):
print(f'unexpect_token: {k} ; trace search result: {v}')
# Check
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,44 +2,47 @@
#
# id: bugs.core_5488_session_idle
# title: Timeout for IDLE connection (SET SESSION IDLE TIMEOUT <N>)
# decription:
# We create .sql script with
# decription:
# We create .sql script with
# 1) SET SESSION IDLE TIMEOUT with small delay N (in seconds);
# 2) two trivial statements that are separated by artificial delay with T > N.
# Both statements are trivial: select <literal> from rdb$database.
#
#
# This delay is done by isql 'SHELL' command and its form depens on OS:
# * shell ping 127.0.0.1 -- for Windows
# * shell sleep -- for Linux.
#
#
# Before .sql we launch trace with logging events for parsing them at final stage of test.
#
#
# When this .sql script is launched and falls into delay, session timeout must be triggered
# and second statement should raise exception.
# We redirect ISQL output to separate logs and expect that:
# * log of STDOUT contains all except result of 2nd statement (which must fail);
# * log of STDERR contains exception SQLSTATE = 08003 / connection shutdown / -Idle timeout expired
#
#
# Trace log should contain only following events:
# attach to DB, start Tx, first statement finish, rollback Tx and detach DB.
#
#
# ::: NB:::
# No events related to SECOND statement should be in the trace log.
#
#
# Checked on WI-4.0.0.550 -- works fine.
#
#
# tracker_id: CORE-5488
# min_versions: ['4.0']
# versions: 4.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
import os
import re
from firebird.qa import db_factory, python_act, Action
# version: 4.0
# resources: None
substitutions_1 = [('timeout .* second', 'timeout second'), ('.*After line [\\d]+.*', ''), ('.*shell.*', '')]
substitutions_1 = [('timeout .* second', 'timeout second'),
('.*After line [\\d]+.*', ''), ('.*shell.*', '')]
init_script_1 = """"""
@ -47,40 +50,40 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
# import os
# import re
# import subprocess
# import time
# from fdb import services
# from subprocess import Popen
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# fb_home = services.connect(host='localhost', user= user_name, password= user_password).get_home_directory()
#
#
# fbv=db_conn.firebird_version # 'WI-...' ==> Windows; 'LI-' ==> Linux
# db_conn.close()
#
#
#
#
# #--------------------------------------------
#
#
# def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -91,40 +94,40 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None
#
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
#
# #--------------------------------------------
#
#
#
#
# # 2017-03-08T17:36:33.6870 (3720:00D22BD0) ATTACH_DATABASE | START_TRANSACTION | EXECUTE_STATEMENT_FINISH | ...
# trace_dts_pattern=re.compile('.*(ATTACH_DATABASE|START_TRANSACTION|EXECUTE_STATEMENT_FINISH|ROLLBACK_TRANSACTION|DETACH_DATABASE)')
#
#
# session_idle_timeout=1
# shell_sleep_sec=session_idle_timeout + 2
# if os.name == 'nt':
# shell_sleep_cmd = 'shell ping -n %d 127.0.0.1 > %s' % (shell_sleep_sec, os.devnull)
# else:
# shell_sleep_cmd = 'shell sleep %d > %s' % (shell_sleep_sec, os.devnull)
#
#
# sql='''
# set list on;
# set bail on;
# set echo on;
#
#
# set session idle timeout %(session_idle_timeout)s second;
#
#
# select 1 as point_1 from rdb$database;
# %(shell_sleep_cmd)s;
# select 2 as point_2 from rdb$database;
# quit;
# ''' % locals()
#
#
# f_sql_cmd=open( os.path.join(context['temp_directory'],'tmp_c5488_run_session_idle.sql'), 'w')
# f_sql_cmd.write(sql)
# flush_and_close( f_sql_cmd )
#
#
# txt = '''# Generated auto, do not edit!
# database=%[\\\\\\\\/]security?.fdb
# {
@ -142,28 +145,28 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# log_statement_finish = true
# }
# '''
#
#
# f_trc_cfg=open( os.path.join(context['temp_directory'],'tmp_c5488_trc_session_idle.cfg'), 'w', buffering = 0)
# f_trc_cfg.write(txt)
# flush_and_close( f_trc_cfg )
#
#
# # ##############################################################
# # S T A R T T R A C E i n S E P A R A T E P R O C E S S
# # ##############################################################
#
#
# f_trc_log=open( os.path.join(context['temp_directory'],'tmp_c5488_trc_session_idle.log'), "w", buffering = 0)
# f_trc_err=open( os.path.join(context['temp_directory'],'tmp_c5488_trc_session_idle.err'), "w", buffering = 0)
#
#
# p_trace = Popen( [ context['fbsvcmgr_path'], 'localhost:service_mgr', 'action_trace_start' , 'trc_cfg', f_trc_cfg.name],
# stdout=f_trc_log,
# stderr=f_trc_err
# )
#
#
# time.sleep(1)
#
#
# f_isql_log=open( os.path.join(context['temp_directory'],'tmp_c5488_run_session_idle.log'), 'w', buffering = 0)
# f_isql_err=open( os.path.join(context['temp_directory'],'tmp_c5488_run_session_idle.err'), 'w', buffering = 0)
#
#
# ######################
# # S T A R T I S Q L
# ######################
@ -173,17 +176,17 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# )
# flush_and_close( f_isql_log )
# flush_and_close( f_isql_err )
#
#
#
#
# # ####################################################
# # G E T A C T I V E T R A C E S E S S I O N I D
# # ####################################################
# # Save active trace session info into file for further parsing it and obtain session_id back (for stop):
#
#
# f_trc_lst = open( os.path.join(context['temp_directory'],'tmp_c5488_trc_session_idle.lst'), 'w', buffering = 0)
# subprocess.call([context['fbsvcmgr_path'], 'localhost:service_mgr', 'action_trace_list'], stdout=f_trc_lst)
# flush_and_close( f_trc_lst )
#
#
# trcssn=0
# with open( f_trc_lst.name,'r') as f:
# for line in f:
@ -194,11 +197,11 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# trcssn=word
# i=i+1
# break
#
#
# # Result: `trcssn` is ID of active trace session. Now we have to terminate it:
#
#
#
#
#
#
# # ####################################################
# # S E N D R E Q U E S T T R A C E T O S T O P
# # ####################################################
@ -208,11 +211,11 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# fn_nul.close()
# # DO NOT REMOVE THIS LINE:
# time.sleep(1)
#
#
# p_trace.terminate()
# flush_and_close( f_trc_log )
# flush_and_close( f_trc_err )
#
#
# # Output of TRACE results:
# ##########################
# with open(f_trc_log.name, 'r') as f:
@ -223,7 +226,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # Since fixed CORE-6469 ("Provide ability to see in the trace log actions related to session management"), 20.01.2021:
# # https://github.com/FirebirdSQL/firebird/commit/a65f19f8b36384d59a55fbb6e0a43a6b84cf4978
# print( ''.join( ( 'TRCLOG: ', line ) ) )
#
#
# # Following file should be EMPTY:
# ################
# f_list=(f_trc_err,)
@ -233,51 +236,100 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# with open( f_name,'r') as f:
# for line in f:
# print("Unexpected TRCERR, file "+f_name+": "+line)
#
#
#
#
# # Output of ISQL results
# ################
# with open(f_isql_log.name, 'r') as f:
# for line in f:
# if line.split():
# print( ''.join( ( 'SQLLOG: ', line.strip() ) ) )
#
#
# with open(f_isql_err.name, 'r') as f:
# for line in f:
# if line.split():
# print( ''.join( ( 'SQLERR: ', line.strip() ) ) )
#
#
#
#
# # CLEANUP
# #########
# time.sleep(1)
# cleanup( (f_trc_cfg, f_trc_lst, f_trc_log, f_trc_err, f_sql_cmd, f_isql_log, f_isql_err ) )
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_trace_1 = """
ATTACH_DATABASE
START_TRANSACTION
EXECUTE_STATEMENT_FINISH
set session idle timeout second
EXECUTE_STATEMENT_FINISH
ROLLBACK_TRANSACTION
DETACH_DATABASE
"""
expected_stdout_1 = """
TRCLOG: ATTACH_DATABASE
TRCLOG: START_TRANSACTION
TRCLOG: EXECUTE_STATEMENT_FINISH
TRCLOG: set session idle timeout second
TRCLOG: EXECUTE_STATEMENT_FINISH
TRCLOG: ROLLBACK_TRANSACTION
TRCLOG: DETACH_DATABASE
set session idle timeout second;
select 1 as point_1 from rdb$database;
POINT_1 1
select 2 as point_2 from rdb$database;
"""
SQLLOG: set session idle timeout second;
SQLLOG: select 1 as point_1 from rdb$database;
SQLLOG: POINT_1 1
SQLLOG: select 2 as point_2 from rdb$database;
expected_stderr_1 = """
Statement failed, SQLSTATE = 08003
connection shutdown
-Idle timeout expired.
"""
SQLERR: Statement failed, SQLSTATE = 08003
SQLERR: connection shutdown
SQLERR: -Idle timeout expired.
"""
trace_1 = ['time_threshold = 0',
'log_initfini = false',
'log_errors = true',
'log_connections = true',
'log_transactions = true',
'log_statement_finish = true',
]
@pytest.mark.version('>=4.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action, capsys):
trace_dts_pattern = re.compile('.*(ATTACH_DATABASE|START_TRANSACTION|EXECUTE_STATEMENT_FINISH|ROLLBACK_TRANSACTION|DETACH_DATABASE)')
session_idle_timeout = 1
shell_sleep_sec = session_idle_timeout + 2
if os.name == 'nt':
shell_sleep_cmd = f'shell ping -n {shell_sleep_sec} 127.0.0.1 > {os.devnull}'
else:
shell_sleep_cmd = f'shell sleep {shell_sleep_sec} > {os.devnull}'
sql = f"""
set list on;
set bail on;
set echo on;
set session idle timeout {session_idle_timeout} second;
select 1 as point_1 from rdb$database;
{shell_sleep_cmd};
select 2 as point_2 from rdb$database;
quit;
"""
# Trace
with act_1.trace(db_events=trace_1):
act_1.expected_stderr = expected_stderr_1
act_1.expected_stdout = expected_stdout_1
act_1.isql(switches=['-q', '-n'], input=sql)
# Check
for line in act_1.trace_log:
if trace_dts_pattern.match(line):
print(line.strip().split()[-1])
if 'set session idle' in line:
# Since fixed CORE-6469 ("Provide ability to see in the trace log actions related to session management"), 20.01.2021:
# https://github.com/FirebirdSQL/firebird/commit/a65f19f8b36384d59a55fbb6e0a43a6b84cf4978
print(line)
assert act_1.clean_stdout == act_1.clean_expected_stdout
assert act_1.clean_stderr == act_1.clean_expected_stderr
act_1.reset()
act_1.expected_stdout = expected_trace_1
act_1.stdout = capsys.readouterr().out
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,10 +2,10 @@
#
# id: bugs.core_5489
# title: Bad performance for NULLs filtering inside a navigational index scan
# decription:
# decription:
# See prototype and explanations for this test in CORE_5435.fbt
# Confirmed improvement:
#
#
# 3.0.2.32643, 4.0.0.563:
# **********
# PLAN (TEST ORDER TEST_F01_ID)
@ -13,20 +13,20 @@
# 1143 ms, 2375 read(s), 602376 fetch(es) ---------------- poor :(
# Table Natural Index
# ****************************************************
# TEST 300000
# TEST 300000
# **********
#
#
#
#
# 3.0.2.32708, 4.0.0.572:
# **********
# PLAN (TEST ORDER TEST_F01_ID)
# 0 ms, 22 read(s), 63 fetch(es) --------------------------- cool :)
# Table Natural Index
# ****************************************************
# TEST 20
# TEST 20
# **********
#
#
#
#
# tracker_id: CORE-5489
# min_versions: ['3.0.2']
# versions: 3.0.2
@ -46,38 +46,38 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
# import os
# import subprocess
# import time
# from fdb import services
# from subprocess import Popen
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# # Obtain engine version:
# engine = str(db_conn.engine_version) # convert to text because 'float' object has no attribute 'startswith'
# db_file = db_conn.database_name
# db_conn.close()
#
#
# #--------------------------------------------
#
#
# def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -88,31 +88,31 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None
#
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
#
# #--------------------------------------------
#
#
# FETCHES_THRESHOLD = 80
#
#
# # Change FW to OFF in order to speed up initial data filling:
# ##################
#
#
# fn_nul = open(os.devnull, 'w')
#
#
# subprocess.call([ context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_properties", "prp_write_mode", "prp_wm_async",
# "dbname", db_file ],
# stdout = fn_nul,
# stderr = subprocess.STDOUT
# )
#
#
# fn_nul.close()
#
#
# #####################
# # Prepare table: add data.
#
#
# sql_init='''
# recreate table test
# (
@ -120,7 +120,7 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# f01 int,
# f02 int
# );
#
#
# set term ^;
# create or alter procedure sp_add_init_data(a_rows_to_add int)
# as
@ -137,29 +137,29 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# ^
# set term ^;
# commit;
#
#
# execute procedure sp_add_init_data( 300000 );
# commit;
#
#
# create index test_f01_id on test(f01, id);
# create index test_f02_only on test(f02);
# create index test_f02_only on test(f02);
# commit;
# '''
#
#
# sql_cmd=open(os.path.join(context['temp_directory'],'tmp_core_5489.sql'), 'w')
# sql_cmd.write(sql_init)
# flush_and_close( sql_cmd )
#
#
# sql_log=open(os.path.join(context['temp_directory'],'tmp_core_5489.log'),'w')
# sql_err=open(os.path.join(context['temp_directory'],'tmp_core_5489.err'),'w')
#
#
# subprocess.call([context['isql_path'], dsn, "-i", sql_cmd.name],stdout=sql_log, stderr=sql_err)
#
#
# flush_and_close( sql_log )
# flush_and_close( sql_err )
#
#
# #########################
#
#
# # ::: NB ::: Trace config file format in 3.0 differs from 2.5 one:
# # 1) header section must be enclosed in "[" and "]",
# # 2) parameter-value pairs must be separated with '=' sign:
@ -167,13 +167,13 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# # {
# # parameter = value
# # }
#
#
# if engine.startswith('2.5'):
# txt = '''# Generated auto, do not edit!
# <database %[\\\\\\\\/]security?.fdb>
# enabled false
# </database>
#
#
# <database %[\\\\\\\\/]bugs.core_5489.fdb>
# enabled true
# time_threshold 0
@ -197,29 +197,29 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# print_perf = true
# }
# '''
#
#
# f_trc_cfg=open( os.path.join(context['temp_directory'],'tmp_trace_5489.cfg'), 'w')
# f_trc_cfg.write(txt)
# flush_and_close( f_trc_cfg )
#
#
# # ##############################################################
# # S T A R T T R A C E i n S E P A R A T E P R O C E S S
# # ##############################################################
#
#
# f_trc_log=open( os.path.join(context['temp_directory'],'tmp_trace_5489.log'), "w")
# f_trc_err=open( os.path.join(context['temp_directory'],'tmp_trace_5489.err'), "w")
#
#
# p_trace = Popen( [ context['fbsvcmgr_path'], 'localhost:service_mgr', 'action_trace_start' , 'trc_cfg', f_trc_cfg.name],stdout=f_trc_log,stderr=f_trc_err)
#
#
# time.sleep(1)
#
#
# sql_run='''
# set list on;
# --show version;
# select count(*) cnt_check
# select count(*) cnt_check
# from (
# select *
# from test
# from test
# where f01 -- ###################################################################
# IS NULL -- <<< ::: NB ::: we check here 'f01 is NULL', exactly as ticket says.
# and f02=0 -- ###################################################################
@ -227,31 +227,31 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# )
# ;
# '''
#
#
# sql_cmd=open(os.path.join(context['temp_directory'],'tmp_core_5489.sql'), 'w')
# sql_cmd.write(sql_run)
# flush_and_close( sql_cmd )
#
#
# sql_log=open(os.path.join(context['temp_directory'],'tmp_core_5489.log'),'w')
# sql_err=open(os.path.join(context['temp_directory'],'tmp_core_5489.err'),'w')
#
#
# subprocess.call([context['isql_path'], dsn, "-i", sql_cmd.name],stdout=sql_log, stderr=sql_err)
#
#
# flush_and_close( sql_log )
# flush_and_close( sql_err )
#
#
# # ####################################################
# # G E T A C T I V E T R A C E S E S S I O N I D
# # ####################################################
# # Save active trace session info into file for further parsing it and obtain session_id back (for stop):
#
#
# f_trc_lst = open( os.path.join(context['temp_directory'],'tmp_trace_5489.lst'), 'w')
# subprocess.call([context['fbsvcmgr_path'], 'localhost:service_mgr', 'action_trace_list'], stdout=f_trc_lst)
# flush_and_close( f_trc_lst )
#
#
# # !!! DO NOT REMOVE THIS LINE !!!
# time.sleep(1)
#
#
# trcssn=0
# with open( f_trc_lst.name,'r') as f:
# for line in f:
@ -263,7 +263,7 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# i=i+1
# break
# # Result: `trcssn` is ID of active trace session. Now we have to terminate it:
#
#
# # ####################################################
# # S E N D R E Q U E S T T R A C E T O S T O P
# # ####################################################
@ -273,15 +273,15 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# fn_nul.close()
# # DO NOT REMOVE THIS LINE:
# time.sleep(1)
#
#
# p_trace.terminate()
# flush_and_close( f_trc_log )
# flush_and_close( f_trc_err )
#
#
#
#
# run_with_plan=''
# num_of_fetches=99999999
#
#
# with open( f_trc_log.name,'r') as f:
# for line in f:
# if line.lower().startswith('plan ('):
@ -291,18 +291,18 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# for k in range(len(words)):
# if words[k].startswith('fetch'):
# num_of_fetches = int( words[k-1] )
#
#
# print(run_with_plan)
# print( 'Number of fetches: acceptable.'
# if num_of_fetches < FETCHES_THRESHOLD else
# print( 'Number of fetches: acceptable.'
# if num_of_fetches < FETCHES_THRESHOLD else
# 'Too much fetches %(num_of_fetches)s -- more than threshold = %(FETCHES_THRESHOLD)s' % locals()
# )
#
#
# # CLEANUP
# #########
# time.sleep(1)
# cleanup( (f_trc_cfg, f_trc_lst, f_trc_log, f_trc_err, sql_log, sql_err, sql_cmd) )
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)