6
0
mirror of https://github.com/FirebirdSQL/firebird-qa.git synced 2025-01-22 13:33:07 +01:00

More python tests

This commit is contained in:
Pavel Císař 2021-11-19 20:16:48 +01:00
parent 1ae09a95b9
commit dd67d1fcba
6 changed files with 972 additions and 405 deletions

View File

@ -36,8 +36,16 @@
# versions: 3.0
# qmid: None
from __future__ import annotations
from typing import List
import pytest
from firebird.qa import db_factory, isql_act, Action
from subprocess import run, CompletedProcess, STDOUT, PIPE
import re
import time
from threading import Thread, Barrier, Lock
from difflib import unified_diff
from firebird.qa import db_factory, python_act, Action
from firebird.driver import ShutdownMode, ShutdownMethod, SrvRepairFlag
# version: 3.0
# resources: None
@ -266,7 +274,8 @@ db_1 = db_factory(from_backup='core4236.fbk', init=init_script_1)
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """
DIFF IN FIREBIRD.LOG: + VALIDATION FINISHED: 0 ERRORS, 0 WARNINGS, 0 FIXED
@ -276,9 +285,112 @@ expected_stdout_1 = """
Result: OK, launched = actual
"""
def isql_job(act: Action, b: Barrier, lock: Lock, result_list: List[str]):
dml_script = """
set bail on;
set list on;
set term ^;
execute block returns(ins_progress int) as
declare n int = 100000;
begin
ins_progress=0;
while (n > 0) do
begin
insert into test(s) values(rpad('', 500, uuid_to_char(gen_uuid())));
ins_progress = ins_progress + 1;
if (mod(ins_progress, 100) = 0) then suspend;
n = n - 1;
end
end
^ set term ;^
quit;
"""
b.wait()
result: CompletedProcess = run([act.vars['isql'], '-user', act.db.user,
'-password', act.db.password, act.db.dsn],
input=dml_script, encoding='utf8',
stdout=PIPE, stderr=STDOUT)
with lock:
result_list.append(result.stdout)
@pytest.mark.version('>=3.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action, capsys):
PLANNED_DML_ATTACHMENTS = 20
WAIT_FOR_ALL_CONNECTIONS_START_JOB = 10
lock = Lock()
threads = []
result_list = []
# Start multiple ISQL instances with heavy DML job
b = Barrier(PLANNED_DML_ATTACHMENTS + 1)
for i in range(PLANNED_DML_ATTACHMENTS):
isql_thread = Thread(target=isql_job, args=[act_1, b, lock, result_list])
threads.append(isql_thread)
isql_thread.start()
b.wait()
time.sleep(WAIT_FOR_ALL_CONNECTIONS_START_JOB)
with act_1.connect_server() as srv:
# Move database to shutdown with ability to run after it validation (prp_sm_single)
srv.database.shutdown(database=str(act_1.db.db_path), mode=ShutdownMode.SINGLE,
method=ShutdownMethod.FORCED, timeout=0)
# get firebird.log _before_ validation
srv.info.get_log()
log_before = srv.readlines()
# At this point no further I/O should be inside database, including internal engine actions
# that relate to backouts. This mean that we *must* have ability to run DB validation in
# _exclusive_ mode, like gfix -v -full does.
#
# Run validation that requires exclusive database access.
# This process normally should produce NO output at all, it is "silent".
# If database currently is in use by engine or some attachments than it shoudl fail
# with message "database <db_file> shutdown."
try:
srv.database.repair(database=str(act_1.db.db_path),
flags=SrvRepairFlag.FULL | SrvRepairFlag.VALIDATE_DB)
except Exception as exc:
print(f'Database repair failed with: {exc}')
#
# get firebird.log _after_ validation
srv.info.get_log()
log_after = srv.readlines()
# bring database online
srv.database.bring_online(database=str(act_1.db.db_path))
# At this point, threads should be dead
for thread in threads:
thread.join(1)
if thread.is_alive():
print(f'Thread {thread.ident} is still alive')
# Compare logs
log_diff = list(unified_diff(log_before, log_after))
# We are ionterested only for lines that contains result of validation:
p = re.compile('validation[ ]+finished(:){0,1}[ ]+\\d+[ ]errors', re.IGNORECASE)
for line in log_diff:
if line.startswith('+') and p.search(line):
print('DIFF IN FIREBIRD.LOG: ' + (' '.join(line.split()).upper()))
#
actual_dml_attachments = 0
logged_shutdown_count = 0
for sqllog in result_list:
if 'INS_PROGRESS' in sqllog:
actual_dml_attachments += 1
if 'SQLSTATE = HY000' in sqllog:
logged_shutdown_count += 1
#
print("Check-1: how many DML attachments really could do their job ?")
if PLANNED_DML_ATTACHMENTS == actual_dml_attachments:
print("Result: OK, launched = actual")
else:
print("Result: BAD, launched<>actual")
#
print("Check-2: how many sessions got SQLSTATE = HY000 on shutdown ?")
if PLANNED_DML_ATTACHMENTS == logged_shutdown_count:
print("Result: OK, launched = actual")
else:
print("Result: BAD, launched<>actual")
# Check
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -27,7 +27,8 @@
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
# version: 2.5.5
# resources: None
@ -140,7 +141,8 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """
ĄČĘĢÆĖŠŚÖÜØ£
@ -159,9 +161,35 @@ expected_stdout_1 = """
atstarotās un filtrē zaļā krāsā). Dzīvnieki ar zaļo krāsu izmantošanu maskēties fona augiem.
"""
test_script_1 = temp_file('test-script.sql')
@pytest.mark.version('>=2.5.5')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action, test_script_1: Path):
test_script_1.write_text("""
recreate table "ĄČĘĢÆĖŠŚÖÜØ£"(
"ąčęėįšųūž" varchar(50) character set dos775
,"Õisu ja kariste järved" blob sub_type 1 character set dos775
);
commit;
show table;
show table "ĄČĘĢÆĖŠŚÖÜØ£";
insert into "ĄČĘĢÆĖŠŚÖÜØ£"("ąčęėįšųūž", "Õisu ja kariste järved")
values(
'ÓßŌŃõÕµńĶķĻļņĒŅ',
'Green - viens no trim primārās krāsas, zaļā tiek uzskatīts diapazontsvetov spektrs ar viļņa
garumu aptuveni 500-565 nanometri. Sistēma CMYK druka zaļā iegūst, sajaucot dzelteno un
zilganzaļi (cyan).Dabā, Catalpa - zaļa augs.
Krāsu zaļie augi ir dabiski, ka cilvēks etalonomzeleni.
Zaļā koku varde.
Ir plaši izplatīti dabā. Lielākā daļa augu ir zaļā krāsā, jo tie satur pigmentu fotosintēzes -
hlorofilu (hlorofils absorbē lielu daļu no sarkano stariem saules spektra, atstājot uztveri
atstarotās un filtrē zaļā krāsā). Dzīvnieki ar zaļo krāsu izmantošanu maskēties fona augiem.'
);
set list on;
set blob all;
select "ąčęėįšųūž", "Õisu ja kariste järved" as blob_content
from "ĄČĘĢÆĖŠŚÖÜØ£";
""", encoding='cp775')
act_1.expected_stdout = expected_stdout_1
act_1.isql(switches=['-q', '-b'], input_file=test_script_1, charset='DOS775')
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -18,7 +18,10 @@
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
import subprocess
import time
from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
# version: 3.0
# resources: None
@ -288,23 +291,202 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
KILLER LOG: Found worker ? 1
KILLER LOG: Records affected: 1
KILLER LOG: DELETED_MON_ATT_ID 1
KILLER LOG: DELETED_MON_USER SYSDBA
KILLER LOG: DELETED_MON_PROTOCOL tcp
KILLER LOG: DELETED_MON_PROCESS isql
KILLER LOG: DELETED_MON_SYS_FLAG 0
WORKER LOG: Statement failed, SQLSTATE = 08003
WORKER LOG: connection shutdown
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1_killer = """
Found worker ? 1
Records affected: 1
DELETED_MON_ATT_ID 1
DELETED_MON_USER SYSDBA
DELETED_MON_PROTOCOL tcp
DELETED_MON_PROCESS isql
DELETED_MON_SYS_FLAG 0
"""
expected_stdout_1_worker = """
Statement failed, SQLSTATE = 08003
connection shutdown
"""
heavy_script_1 = temp_file('heavy_script.sql')
heavy_output_1 = temp_file('heavy_script.out')
@pytest.mark.version('>=3.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action, heavy_script_1: Path, heavy_output_1: Path):
killer_sql = """
set list on;
set count on;
/*
select a.*, s.*
from mon$attachments a
left join mon$statements s on a.mon$attachment_id = s.mon$attachment_id
where
a.mon$attachment_id <> current_connection
and a.mon$system_flag is distinct from 1
;
*/
select count(*) as "Found worker ?"
from mon$attachments a
where
a.mon$attachment_id <> current_connection
and a.mon$system_flag is distinct from 1
;
delete from mon$attachments
where
mon$attachment_id <> current_connection
and mon$system_flag is distinct from 1
returning
sign(mon$attachment_id) as deleted_mon_att_id,
mon$user as deleted_mon_user,
iif(mon$remote_protocol containing 'tcp', 'tcp', null) as deleted_mon_protocol,
iif(mon$remote_process containing 'isql', 'isql', null) as deleted_mon_process,
mon$system_flag as deleted_mon_sys_flag
;
"""
heavy_script_1.write_text("""
set bail on;
set list on;
set count on;
with a as (
select rf.rdb$field_id fid, rr.rdb$relation_id rid, rr.rdb$relation_name rnm
from rdb$relation_fields rf join rdb$relations rr on rf.rdb$relation_name=rr.rdb$relation_name
)
,b as (
select fid, rnm, rid, iif(rid is null, 1, r) r
from (--f
select fid, rnm, rid,
iif(lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
, r + 1, r) r
from (--e
select fid, rnm, rid,
iif(lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
, r + 1, r) r
from (--d
select fid, rnm, rid,
iif(lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
, r + 1, r) r
from (--c
select fid, rnm, rid,
iif(lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
,r + 1, r) r
from (--b
select fid, rnm, rid,
iif( lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
,r + 1, r) r
from (
select a.*, 1 r
from a
) a
) b
) c
) d
) e
) f
)
-- select max(r) r from b group by fid having max(r) < 6; -- ok
,c
as (
select fid, rnm, rid, iif(rid is null, 1, r) r
from (--f
select fid, rnm, rid,
iif(lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
, r + 1, r) r
from (--e
select fid, rnm, rid,
iif(lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
, r + 1, r) r
from (--d
select fid, rnm, rid,
iif(lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
, r + 1, r) r
from (--c
select fid, rnm, rid,
iif(lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
,r + 1, r) r
from (--b
select fid, rnm, rid,
iif( lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
,r + 1, r) r
from (
select fid, rnm, rid, max(r) over(partition by fid) r from b
) a
) b
) c
) d
) e
) f
)
-- select * from c -- ok
,d
as (
select fid, rnm, rid, iif(rid is null, 1, r) r
from (--f
select fid, rnm, rid,
iif( lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
, r + 1, r) r
from (--e
select fid, rnm, rid,
iif( lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
, r + 1, r) r
from (--d
select fid, rnm, rid,
iif( lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
, r + 1, r) r
from (--c
select fid, rnm, rid,
iif( lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
,r + 1, r) r
from (--b
select fid, rnm, rid,
iif( lag(fid) over(partition by rid order by fid) is not null
and lag(r) over(partition by rid order by fid) >= r
,r + 1, r) r
from (
select fid, rnm, rid, max(r) over(partition by fid) r from c
) a
) b
) c
) d
) e
) f
)
select * from d rows 0;
set count off;
select 'WORKER FINISHED TOO FAST! DELAY IN TEST MUST BE REDUCED!' msg from rdb$database;
""")
with open(heavy_output_1, mode='w') as heavy_out:
p_heavy_sql = subprocess.Popen([act_1.vars['isql'], '-i', str(heavy_script_1),
'-user', act_1.db.user,
'-password', act_1.db.password, act_1.db.dsn],
stdout=heavy_out, stderr=subprocess.STDOUT)
try:
time.sleep(4)
act_1.expected_stdout = expected_stdout_1_killer
act_1.isql(switches=[], input=killer_sql)
finally:
p_heavy_sql.terminate()
#
assert act_1.clean_stdout == act_1.clean_expected_stdout
# And worker...
act_1.reset()
act_1.expected_stdout = expected_stdout_1_worker
act_1.stdout = heavy_output_1.read_text()
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -38,12 +38,14 @@
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from firebird.qa import db_factory, python_act, Action
from firebird.driver import SrvStatFlag
# version: 2.5.2
# resources: None
substitutions_1 = [('Average version length: [\\d]+.[\\d]+, total versions: 5, max versions: 1', 'total versions: 5, max versions: 1')]
substitutions_1 = [('Average version length: [\\d]+.[\\d]+, total versions: 5, max versions: 1',
'total versions: 5, max versions: 1')]
init_script_1 = """
recreate table test(id int, x int);
@ -237,15 +239,22 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """
Average version length: 9.00, total versions: 5, max versions: 1
"""
@pytest.mark.version('>=2.5.2')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action):
with act_1.db.connect() as con:
c = con.cursor()
c.execute('update test set x = -x')
con.commit()
act_1.svcmgr(switches=['localhost:service_mgr', 'user', act_1.db.user,
'password', act_1.db.password, 'action_db_stats', 'dbname',
str(act_1.db.db_path), 'sts_record_versions'])
act_1.stdout = '\n'.join([line for line in act_1.stdout.splitlines() if 'versions:' in line.lower()])
act_1.expected_stdout = expected_stdout_1
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -22,12 +22,14 @@
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
import time
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
# version: 2.5.3
# resources: None
substitutions_1 = [('TRACE SESSION ID.*', 'TRACE SESSION ID')]
substitutions_1 = []
init_script_1 = """"""
@ -153,16 +155,58 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
TRACE SESSION ID STARTED
STATEMENT FAILED, SQLSTATE = 08001
act_1 = python_act('db_1', substitutions=substitutions_1)
#expected_stdout_1 = """
#TRACE SESSION ID STARTED
#STATEMENT FAILED, SQLSTATE = 08001
#"""
expected_stderr_1 = """
Rolling back work.
Statement failed, SQLSTATE = 08001
I/O error during "open" operation for file "some_non_existent"
-Error while trying to open file
-No such file or directory
Use CONNECT or CREATE DATABASE to specify a database
Command error: show database
Cannot get server version without database connection
"""
@pytest.mark.version('>=2.5.3')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' time_threshold = 0',
' log_errors = true',
' connection_id = 1234',
' log_connections = true',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line.upper())
@pytest.mark.version('>=3.0')
def test_1(act_1: Action, capsys):
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
try:
act_1.expected_stderr = expected_stderr_1
act_1.isql(switches=['-n'],
input="connect 'localhost:some_non_existent' user 'SYSDBA' password 'masterkey'; show database; show version;")
time.sleep(2)
finally:
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
# check that we are still kicking and got expected result from isql
assert act_1.clean_stderr == act_1.clean_expected_stderr

View File

@ -34,18 +34,40 @@
# 40CS, build 4.0.0.748: OK, 31.313s.
# 40SS, build 4.0.0.748: OK, 22.578s.
#
# [pcisar] 19.11.2021
# Small difference in reimplementation of sweep killer script
# On v4.0.0.2496 COMMIT after delete from mon#attachments fails with:
# STATEMENT FAILED, SQLSTATE = HY008, OPERATION WAS CANCELLED
# Without commit this test PASSES, i.e. sweep is terminated with all outputs as expected
#
# tracker_id: CORE-4337
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
import re
import time
import subprocess
from difflib import unified_diff
from threading import Thread, Barrier
from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
from firebird.driver import DbWriteMode
# version: 3.0
# resources: None
substitutions_1 = [('[ ]+', ' '), ('TRACE_LOG:.* SWEEP_START', 'TRACE_LOG: SWEEP_START'), ('TRACE_LOG:.* SWEEP_FAILED', 'TRACE_LOG: SWEEP_FAILED'), ('TRACE_LOG:.* ERROR AT JPROVIDER::ATTACHDATABASE', 'TRACE_LOG: ERROR AT JPROVIDER::ATTACHDATABASE'), ('.*KILLED BY DATABASE ADMINISTRATOR.*', ''), ('TRACE_LOG:.*GFIX.EXE.*', 'TRACE_LOG: GFIX.EXE'), ('OIT [0-9]+', 'OIT'), ('OAT [0-9]+', 'OAT'), ('OST [0-9]+', 'OST'), ('NEXT [0-9]+', 'NEXT'), ('FIREBIRD.LOG:.* ERROR DURING SWEEP OF .*BUGS.CORE_4337.FDB.*', 'ERROR DURING SWEEP OF BUGS.CORE_4337.FDB')]
substitutions_1 = [('[ ]+', ' '), ('TRACE_LOG:.* SWEEP_START', 'TRACE_LOG: SWEEP_START'),
('TRACE_LOG:.* SWEEP_FAILED', 'TRACE_LOG: SWEEP_FAILED'),
('TRACE_LOG:.* ERROR AT JPROVIDER::ATTACHDATABASE',
'TRACE_LOG: ERROR AT JPROVIDER::ATTACHDATABASE'),
('.*KILLED BY DATABASE ADMINISTRATOR.*', ''),
('TRACE_LOG:.*GFIX.EXE.*', 'TRACE_LOG: GFIX.EXE'),
('OIT [0-9]+', 'OIT'), ('OAT [0-9]+', 'OAT'), ('OST [0-9]+', 'OST'),
('NEXT [0-9]+', 'NEXT'),
('FIREBIRD.LOG:.* ERROR DURING SWEEP OF .*TEST.FDB.*',
'FIREBIRD.LOG: + ERROR DURING SWEEP OF TEST.FDB')]
init_script_1 = """"""
@ -535,12 +557,46 @@ db_1 = db_factory(page_size=16384, sql_dialect=3, init=init_script_1)
# cleanup( [i.name for i in (f_work_sql,f_work_log,f_work_err,f_fblog_before,f_trccfg,f_trclog,f_trcerr,f_wait_sql,f_wait_log,f_wait_err,f_gfix_log,f_trclst,f_db_reset_log,f_fblog_after,f_diff_txt) ] )
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
make_garbage_1 = """
set list on;
select current_time from rdb$database;
recreate table t(s01 varchar(4000));
commit;
set term ^;
execute block as
declare n int = 20000;
declare w int;
begin
select f.rdb$field_length
from rdb$relation_fields rf
join rdb$fields f on rf.rdb$field_source=f.rdb$field_name
where rf.rdb$relation_name=upper('t')
into w;
while (n>0) do
insert into t(s01) values( rpad('', :w, uuid_to_char(gen_uuid())) ) returning :n-1 into n;
end^
set term ;^
commit;
select count(*) check_total_cnt, min(char_length(s01)) check_min_length from t;
create index t_s01 on t(s01);
commit;
delete from t;
commit;
-- overall time for data filling , create index and delete all rows: ~ 3 seconds.
-- This database requires about 4 seconds to be swept (checked on P-IV 3.0 GHz).
select current_time from rdb$database;
--show database;
quit;
"""
expected_stdout_1 = """
POINT BEFORE GFIX -SWEEP.
CONNECTION SHUTDOWN
POINT AFTER GFIX -SWEEP.
ISQL LOG: WAITING FOR GFIX START SWEEP
ISQL LOG: STARTING TO DELETE GFIX PROCESS FROM MON$ATTACHMENTS
ISQL LOG: RECORDS AFFECTED: 1
@ -548,13 +604,149 @@ expected_stdout_1 = """
TRACE_LOG: SWEEP_FAILED
FIREBIRD.LOG: + SWEEP IS STARTED BY SYSDBA
FIREBIRD.LOG: + OIT, OAT, OST, NEXT
FIREBIRD.LOG: + ERROR DURING SWEEP OF BUGS.CORE_4337.FDB
FIREBIRD.LOG: + ERROR DURING SWEEP OF TEST.FDB
FIREBIRD.LOG: + CONNECTION SHUTDOWN
"""
sweep_killer_script_1 = temp_file('killer.sql')
sweep_killer_out_1 = temp_file('killer.out')
sweep_killer_err_1 = temp_file('killer.err')
sweep_out_1 = temp_file('sweep.out')
def trace_session(act: Action, b: Barrier):
cfg30 = ['# Trace config, format for 3.0. Generated auto, do not edit!',
f'database=%[\\\\/]{act.db.db_path.name}',
'{',
' enabled = true',
' time_threshold = 0',
' log_errors = true',
' log_sweep = true',
' log_connections = true',
'}']
with act.connect_server() as srv:
srv.trace.start(config='\n'.join(cfg30))
b.wait()
for line in srv:
print(line.upper())
@pytest.mark.version('>=3.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action, capsys, sweep_killer_script_1: Path, sweep_killer_out_1: Path,
sweep_killer_err_1: Path, sweep_out_1: Path):
sweep_killer_script_1.write_text("""
set list on;
recreate table tmp4wait(id int);
commit;
insert into tmp4wait(id) values(1);
commit;
set transaction lock timeout 2; ------------------ D E L A Y
update tmp4wait set id=id;
select 'Waiting for GFIX start SWEEP' as " " from rdb$database;
set term ^;
execute block as
begin
in autonomous transaction do
begin
update tmp4wait set id=id;
when any do
begin
-- NOP --
end
end
end
^
set term ;^
commit;
--select MON$PAGE_BUFFERS from mon$database;
select 'Starting to delete GFIX process from mon$attachments' as " " from rdb$database;
set count on;
delete from mon$attachments where mon$remote_process containing 'gfix';
-- On v4.0.0.2496 COMMIT fails with: STATEMENT FAILED, SQLSTATE = HY008, OPERATION WAS CANCELLED
-- Without commit this test PASSES, i.e. sweep is terminated with all outputs as expected
-- commit;
set count off;
select 'Finished deleting GFIX process from mon$attachments' as " " from rdb$database;
""")
with act_1.connect_server() as srv:
# get firebird log before action
srv.info.get_log()
log_before = srv.readlines()
# Change FW to OFF in order to speed up initial data filling
srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.ASYNC)
# make garbage
act_1.isql(switches=[], input=make_garbage_1)
# REDUCE number of cache buffers in DB header in order to sweep make its work as long as possible
srv.database.set_default_cache_size(database=str(act_1.db.db_path), size=100)
# Change FW to ON (in order to make sweep life harder :))
srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.SYNC)
# Start trace
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])
trace_thread.start()
b.wait()
try:
# Launch (async.) ISQL which will make small delay and then kill GFIX attachment
with open(sweep_killer_out_1, 'w') as killer_out, \
open(sweep_killer_err_1, 'w') as killer_err:
p_killer = subprocess.Popen([act_1.vars['isql'],
'-i', str(sweep_killer_script_1),
'-user', act_1.db.user,
'-password', act_1.db.password, act_1.db.dsn],
stdout=killer_out, stderr=killer_err)
try:
time.sleep(2)
# Launch GFIX -SWEEP (sync.). It should be killed by ISQL which we have
# launched previously after delay in its script will expire:
act_1.expected_stderr = 'We expect errors'
act_1.gfix(switches=['-sweep', act_1.db.dsn])
gfix_out = act_1.stdout
gfix_err = act_1.stderr
finally:
p_killer.terminate()
time.sleep(2)
finally:
# stop trace
with act_1.connect_server() as srv:
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)
if trace_thread.is_alive():
pytest.fail('Trace thread still alive')
#
trace_log = capsys.readouterr().out
# get firebird log after action
with act_1.connect_server() as srv:
srv.info.get_log()
log_after = srv.readlines()
# construct final stdout for checks
print(gfix_out.upper())
print(gfix_err.upper())
# sweep filler output
for line in sweep_killer_out_1.read_text().splitlines():
if line:
print('ISQL LOG:', line.upper())
for line in sweep_killer_err_1.read_text().splitlines():
if line:
print('ISQL ERR:', line.upper())
# Trace log
found_sweep_failed = 0
for line in trace_log.splitlines():
if 'SWEEP_FAILED' in line:
print('TRACE_LOG:' + (' '.join(line.split()).upper()))
found_sweep_failed = 1
if found_sweep_failed == 1 and ('ATTACH_DATABASE' in line):
print('TRACE: ATTACH DETECTED AFTER SWEEP FAILED! ')
print('TRACE_LOG:' + (' '.join(line.split()).upper()))
#
pattern = re.compile("\\+[\\s]+OIT[ ]+[0-9]+,[\\s]*OAT[\\s]+[0-9]+,[\\s]*OST[\\s]+[0-9]+,[\\s]*NEXT[\\s]+[0-9]+")
for line in unified_diff(log_before, log_after):
if line.startswith('+'):
line = line.upper()
if 'SWEEP' in line or 'CONNECTION' in line or pattern.match(line):
print( 'FIREBIRD.LOG: ' + (' '.join(line.split())) )
#
# check
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
assert act_1.clean_stdout == act_1.clean_expected_stdout