6
0
mirror of https://github.com/FirebirdSQL/firebird-qa.git synced 2025-02-02 02:40:42 +01:00

Tests updated to Path support in driver 1.3.5

This commit is contained in:
Pavel Císař 2021-12-07 13:36:20 +01:00
parent 16fc93cc83
commit 264ed4d657
47 changed files with 112 additions and 114 deletions

View File

@ -58,10 +58,10 @@ act_1 = python_act('db_1', substitutions=substitutions_1)
@pytest.mark.version('>=2.0.2')
def test_1(act_1: Action):
with act_1.connect_server() as srv, act_1.db.connect() as con:
srv.database.shutdown(database=str(act_1.db.db_path), mode=ShutdownMode.FULL,
srv.database.shutdown(database=act_1.db.db_path, mode=ShutdownMode.FULL,
method=ShutdownMethod.FORCED, timeout=0)
c = con.cursor()
with pytest.raises(DatabaseError, match='.*shutdown'):
c.execute('select 1 from rdb$database')
#
srv.database.bring_online(database=str(act_1.db.db_path))
srv.database.bring_online(database=act_1.db.db_path)

View File

@ -562,14 +562,14 @@ def test_1(act_1: Action):
# backup + restore _WITHOUT_ building indices:
backup = BytesIO()
with act_1.connect_server() as srv:
srv.database.local_backup(database=str(act_1.db.db_path), backup_stream=backup)
srv.database.local_backup(database=act_1.db.db_path, backup_stream=backup)
backup.seek(0)
srv.database.local_restore(backup_stream=backup, database=str(act_1.db.db_path),
srv.database.local_restore(backup_stream=backup, database=act_1.db.db_path,
flags=SrvRestoreFlag.DEACTIVATE_IDX | SrvRestoreFlag.REPLACE)
# Get FB log before validation, run validation and get FB log after it:
srv.info.get_log()
log_before = srv.readlines()
srv.database.repair(database=str(act_1.db.db_path), flags=SrvRepairFlag.CORRUPTION_CHECK)
srv.database.repair(database=act_1.db.db_path, flags=SrvRepairFlag.CORRUPTION_CHECK)
srv.info.get_log()
log_after = srv.readlines()
# Extract metadata from restored DB
@ -580,7 +580,7 @@ def test_1(act_1: Action):
# to drop will fail in test treadown as connect trigger referes to index tat was not activated
with act_1.connect_server() as srv:
backup.seek(0)
srv.database.local_restore(backup_stream=backup, database=str(act_1.db.db_path),
srv.database.local_restore(backup_stream=backup, database=act_1.db.db_path,
flags=SrvRestoreFlag.REPLACE)
#
diff_meta = ''.join(unified_diff(meta_1.splitlines(), meta_2.splitlines()))

View File

@ -91,9 +91,9 @@ act_1 = python_act('db_1', substitutions=substitutions_1)
def test_1(act_1: Action):
backup = BytesIO()
with act_1.connect_server() as srv:
srv.database.local_backup(database=str(act_1.db.db_path), backup_stream=backup)
srv.database.local_backup(database=act_1.db.db_path, backup_stream=backup)
backup.seek(0)
# test fails if restore raises an exception
srv.database.local_restore(backup_stream=backup, database=str(act_1.db.db_path),
srv.database.local_restore(backup_stream=backup, database=act_1.db.db_path,
flags=SrvRestoreFlag.ONE_AT_A_TIME | SrvRestoreFlag.REPLACE)

View File

@ -218,11 +218,11 @@ file_1 = temp_file('pytest-run.fbk')
def test_1(act_1: Action, file_1):
src_backup = act_1.vars['backups'] / 'core1999_30.fbk'
with act_1.connect_server() as srv:
srv.database.restore(database=str(act_1.db.db_path), backup=str(src_backup),
srv.database.restore(database=act_1.db.db_path, backup=src_backup,
flags=SrvRestoreFlag.REPLACE,
verbose=True, stats='TDWR')
restore_log = srv.readlines()
srv.database.backup(database=str(act_1.db.db_path), backup=str(file_1),
srv.database.backup(database=act_1.db.db_path, backup=file_1,
verbose=True, stats='TDWR')
backup_log = srv.readlines()
#

View File

@ -87,7 +87,7 @@ def test_1(act_1: Action):
assert act_1.clean_stdout == act_1.clean_expected_stdout
#
with act_1.connect_server() as srv:
srv.database.set_access_mode(database=str(act_1.db.db_path), mode=DbAccessMode.READ_ONLY)
srv.database.set_access_mode(database=act_1.db.db_path, mode=DbAccessMode.READ_ONLY)
#
act_1.reset()
act_1.expected_stdout = expected_stdout_1_b

View File

@ -522,7 +522,7 @@ act_1 = python_act('db_1', substitutions=substitutions_1)
def test_1(act_1: Action):
# CHANGE FW to OFF
with act_1.connect_server() as srv:
srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.ASYNC)
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.ASYNC)
# 1. FIRST RUN DML_TEST
act_1.script = test_script_1
act_1.execute()
@ -535,14 +535,14 @@ def test_1(act_1: Action):
# [pcisar] I don't understand the point of validation as the original test does not check
# that validation passed
with act_1.connect_server() as srv:
srv.database.validate(database=str(act_1.db.db_path))
srv.database.validate(database=act_1.db.db_path)
validate_log_1 = srv.readlines()
# 4. TRY TO BACKUP AND RESTORE
with act_1.connect_server() as srv:
backup = BytesIO()
srv.database.local_backup(database=str(act_1.db.db_path), backup_stream=backup)
srv.database.local_backup(database=act_1.db.db_path, backup_stream=backup)
backup.seek(0)
srv.database.local_restore(backup_stream=backup, database=str(act_1.db.db_path),
srv.database.local_restore(backup_stream=backup, database=act_1.db.db_path,
flags=SrvRestoreFlag.REPLACE)
backup.close()
# 5. EXTRACT METADATA-2
@ -556,7 +556,7 @@ def test_1(act_1: Action):
run_dml_log_2 = act_1.stdout
# 7. VALIDATE DATABASE-2
with act_1.connect_server() as srv:
srv.database.validate(database=str(act_1.db.db_path))
srv.database.validate(database=act_1.db.db_path)
validate_log_2 = srv.readlines()
# 8. CHECKS
# 1) STDERR for: create DB, backup, restore, validation-1 and validation-2 - they all must be EMPTY.

View File

@ -155,14 +155,14 @@ def test_1(act_1: Action):
act_1.execute()
backup = BytesIO()
with act_1.connect_server() as srv:
srv.database.local_backup(database=str(act_1.db.db_path), backup_stream=backup,
srv.database.local_backup(database=act_1.db.db_path, backup_stream=backup,
flags=SrvBackupFlag.NO_TRIGGERS)
backup.seek(0)
act_1.reset()
act_1.expected_stdout = expected_stdout_1
act_1.isql(switches=['-nod'], input=check_sql)
assert act_1.clean_stdout == act_1.clean_expected_stdout
srv.database.local_restore(backup_stream=backup, database=str(act_1.db.db_path),
srv.database.local_restore(backup_stream=backup, database=act_1.db.db_path,
flags=SrvRestoreFlag.REPLACE)
backup.close()
act_1.reset()

View File

@ -281,31 +281,27 @@ def test_1(act_1: Action, file_1: Path):
with act_1.connect_server() as srv:
backup = BytesIO()
# Run-1: try to skip BACKUP of data for tables 'test_0a' and 'test_0b'.
srv.database.local_backup(database=str(act_1.db.db_path), backup_stream=backup,
srv.database.local_backup(database=act_1.db.db_path, backup_stream=backup,
skip_data='test_0[[:alpha:]]')
backup.seek(0)
srv.database.local_restore(backup_stream=backup, database=str(file_1))
srv.database.local_restore(backup_stream=backup, database=file_1)
# check
act_1.expected_stdout = expected_stdout_1_a
act_1.isql(switches=['-user', act_1.db.user,
'-password', act_1.db.password,
str(file_1)], input=check_script, connect_db=False)
act_1.isql(switches=[str(file_1)], input=check_script, connect_db=False)
assert act_1.clean_stdout == act_1.clean_expected_stdout
# Run-2: try to skip RESTORE of data for tables 'test_01' and 'test_02'.
if file_1.is_file():
file_1.unlink()
backup.close()
backup = BytesIO()
srv.database.local_backup(database=str(act_1.db.db_path), backup_stream=backup)
srv.database.local_backup(database=act_1.db.db_path, backup_stream=backup)
backup.seek(0)
srv.database.local_restore(backup_stream=backup, database=str(file_1),
srv.database.local_restore(backup_stream=backup, database=file_1,
skip_data='test_0[[:digit:]]')
# check
act_1.reset()
act_1.expected_stdout = expected_stdout_1_b
act_1.isql(switches=['-user', act_1.db.user,
'-password', act_1.db.password,
str(file_1)], input=check_script, connect_db=False)
act_1.isql(switches=[str(file_1)], input=check_script, connect_db=False)
assert act_1.clean_stdout == act_1.clean_expected_stdout
# Run-3: try to skip BACKUP of data for table "опечатка".
srv.encoding = 'utf8'
@ -313,13 +309,11 @@ def test_1(act_1: Action, file_1: Path):
file_1.unlink()
backup.close()
backup = BytesIO()
srv.database.local_backup(database=str(act_1.db.db_path), backup_stream=backup,
srv.database.local_backup(database=act_1.db.db_path, backup_stream=backup,
skip_data='(о|а)(п|ч)(е|и)(п|ч)(а|я)(т|д)(к|г)(о|а)')
backup.seek(0)
srv.database.local_restore(backup_stream=backup, database=str(file_1))
srv.database.local_restore(backup_stream=backup, database=file_1)
# check
act_1.reset()
act_1.expected_stdout = expected_stdout_1_c
act_1.isql(switches=['-user', act_1.db.user,
'-password', act_1.db.password,
str(file_1)], input=check_script, connect_db=False)
act_1.isql(switches=[str(file_1)], input=check_script, connect_db=False)

View File

@ -186,7 +186,7 @@ expected_stdout_1_b = """
def test_1(act_1: Action, nbak_file_base: Path, nbak_file_add: Path):
with act_1.connect_server() as srv, act_1.db.connect() as con:
# Backup base database
srv.database.nbackup(database=str(act_1.db.db_path), backup=str(nbak_file_base),
srv.database.nbackup(database=act_1.db.db_path, backup=nbak_file_base,
level=0)
c = con.cursor()
# get db GUID
@ -196,19 +196,19 @@ def test_1(act_1: Action, nbak_file_base: Path, nbak_file_add: Path):
c.execute("insert into test(id,s,t,b) values(1, 'qwerty', '11.12.2013 14:15:16.178', 'foo-rio-bar')")
con.commit()
# Backup changes
srv.database.nbackup(database=str(act_1.db.db_path), backup=str(nbak_file_add),
srv.database.nbackup(database=act_1.db.db_path, backup=nbak_file_add,
guid=db_guid)
# Restore inplace
srv.database.nrestore(flags=SrvNBackupFlag.IN_PLACE, database=str(nbak_file_base),
srv.database.nrestore(flags=SrvNBackupFlag.IN_PLACE, database=nbak_file_base,
backups=[str(nbak_file_add)])
# Check restored database
act_1.expected_stdout = expected_stdout_1_a
act_1.isql(switches=['-user', act_1.db.user, '-password', act_1.db.password, str(nbak_file_base)],
act_1.isql(switches=[str(nbak_file_base)],
connect_db=False,
input="set list on;set count on;set blob all;select id,s,t,b as blob_id from test;")
assert act_1.clean_stdout == act_1.clean_expected_stdout
# Validate restored database
srv.database.validate(database=str(nbak_file_base))
srv.database.validate(database=nbak_file_base)
act_1.reset()
act_1.expected_stdout = expected_stdout_1_b
act_1.stdout = '\n'.join(srv.readlines())

View File

@ -173,7 +173,7 @@ act_1 = python_act('db_1', substitutions=substitutions_1)
def test_1(act_1: Action):
# Change FW to OFF in order to speed up initial data filling:
with act_1.connect_server() as srv:
srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.ASYNC)
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.ASYNC)
# prepare DB for testing: create lot of tables:
num_of_tables = 1000
sql_ddl = f'''

View File

@ -81,6 +81,6 @@ Validation finished
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
with act_1.connect_server() as srv:
srv.database.validate(database=str(act_1.db.db_path))
srv.database.validate(database=act_1.db.db_path)
act_1.stdout = '\n'.join(srv.readlines())
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -309,19 +309,19 @@ def test_1(act_1: Action, work_script_1: Path):
select '-- shutdown me now --' from rdb$database;
''')
with act_1.connect_server() as srv:
srv.database.set_sweep_interval(database=str(act_1.db.db_path), interval=100)
srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.ASYNC)
srv.database.set_sweep_interval(database=act_1.db.db_path, interval=100)
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.ASYNC)
p_work_sql = subprocess.Popen([act_1.vars['isql'], '-i', str(work_script_1),
'-user', act_1.db.user,
'-password', act_1.db.password, act_1.db.dsn],
stderr = subprocess.STDOUT)
time.sleep(3)
try:
srv.database.shutdown(database=str(act_1.db.db_path), mode=ShutdownMode.FULL,
srv.database.shutdown(database=act_1.db.db_path, mode=ShutdownMode.FULL,
method=ShutdownMethod.FORCED, timeout=0)
finally:
p_work_sql.terminate()
srv.database.bring_online(database=str(act_1.db.db_path))
srv.database.bring_online(database=act_1.db.db_path)
srv.info.get_log()
fblog_before = srv.readlines()
with act_1.db.connect() as con_for_sweep_start:

View File

@ -253,7 +253,7 @@ def test_1(act_1: Action, isql_script: Path, capsys):
stderr=subprocess.STDOUT)
time.sleep(2)
# LAUNCH SWEEP while ISQL is working
srv.database.sweep(database=str(act_1.db.db_path))
srv.database.sweep(database=act_1.db.db_path)
p_isql.terminate()
# Get content of firebird.log AFTER test
srv.info.get_log()

View File

@ -33,6 +33,7 @@
# qmid: None
import pytest
import time
from threading import Thread, Barrier
from io import BytesIO
from firebird.qa import db_factory, python_act, Action, temp_file
@ -289,12 +290,13 @@ def test_1(act_1: Action, capsys, temp_file_1):
with act_1.connect_server() as srv:
# Make some service requests
b.wait()
srv.database.set_sweep_interval(database=str(act_1.db.db_path), interval=1234321)
srv.database.get_statistics(database=str(act_1.db.db_path), flags=SrvStatFlag.HDR_PAGES)
srv.database.set_sweep_interval(database=act_1.db.db_path, interval=1234321)
srv.database.get_statistics(database=act_1.db.db_path, flags=SrvStatFlag.HDR_PAGES)
srv.wait()
srv.database.backup(database=str(act_1.db.db_path), backup=str(temp_file_1))
srv.database.backup(database=act_1.db.db_path, backup=temp_file_1)
srv.wait()
#
time.sleep(2)
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(2.0)

View File

@ -309,9 +309,9 @@ def test_1(act_1: Action):
work_thread.start()
time.sleep(2)
#
srv.database.shutdown(database=str(act_1.db.db_path), mode=ShutdownMode.FULL,
srv.database.shutdown(database=act_1.db.db_path, mode=ShutdownMode.FULL,
method=ShutdownMethod.FORCED, timeout=0)
srv.database.bring_online(database=str(act_1.db.db_path))
srv.database.bring_online(database=act_1.db.db_path)
#
srv.info.get_log()
log_after = srv.readlines()

View File

@ -62,9 +62,9 @@ expected_stdout_1 = """
def test_1(act_1: Action):
with act_1.connect_server() as srv:
backup = BytesIO()
srv.database.local_backup(database=str(act_1.db.db_path), backup_stream=backup)
srv.database.local_backup(database=act_1.db.db_path, backup_stream=backup)
backup.seek(0)
srv.database.local_restore(backup_stream=backup, database=str(act_1.db.db_path),
srv.database.local_restore(backup_stream=backup, database=act_1.db.db_path,
flags=SrvRestoreFlag.REPLACE)
act_1.expected_stdout = expected_stdout_1
act_1.isql(switches=[], input="show sequ g1; show sequ g2;")
@ -111,9 +111,9 @@ expected_stdout_2 = """
def test_2(act_2: Action):
with act_2.connect_server() as srv:
backup = BytesIO()
srv.database.local_backup(database=str(act_2.db.db_path), backup_stream=backup)
srv.database.local_backup(database=act_2.db.db_path, backup_stream=backup)
backup.seek(0)
srv.database.local_restore(backup_stream=backup, database=str(act_2.db.db_path),
srv.database.local_restore(backup_stream=backup, database=act_2.db.db_path,
flags=SrvRestoreFlag.REPLACE)
act_2.expected_stdout = expected_stdout_2
act_2.isql(switches=[], input="show sequ g1; show sequ g2;")

View File

@ -474,7 +474,7 @@ def test_1(act_1: Action, capsys):
NUM_ROWS_TO_BE_ADDED = 45000
# Change FW to OFF in order to speed up initial data filling
with act_1.connect_server() as srv:
srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.ASYNC)
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.ASYNC)
# Make initial data filling into PERMANENT table for retrieving later number of data pages
# (it should be the same for any kind of tables, including GTTs):
with act_1.db.connect() as con:

View File

@ -40,7 +40,7 @@ def test_1(act_1: Action):
act_1.gfix(switches=['-user', act_1.db.user, '-password', act_1.db.password,
'-shut', 'full', '-force', '0', str(act_1.db.db_path)])
with act_1.connect_server() as srv:
srv.database.get_statistics(database=str(act_1.db.db_path), flags=SrvStatFlag.HDR_PAGES)
srv.database.get_statistics(database=act_1.db.db_path, flags=SrvStatFlag.HDR_PAGES)
stats = srv.readlines()
act_1.gfix(switches=['-user', act_1.db.user, '-password', act_1.db.password,
'-online', str(act_1.db.db_path)])

View File

@ -21,6 +21,7 @@
import pytest
import re
import time
from threading import Thread, Barrier
from firebird.qa import db_factory, python_act, Action
@ -334,8 +335,9 @@ def check_sweep(act_1: Action, log_sweep: bool):
b.wait()
with act_1.connect_server() as srv:
# Run sweep
srv.database.sweep(database=str(act_1.db.db_path))
srv.database.sweep(database=act_1.db.db_path)
# Stop trace
time.sleep(2)
for session in list(srv.trace.sessions.keys()):
srv.trace.stop(session_id=session)
trace_thread.join(1.0)

View File

@ -51,12 +51,12 @@ fbk_file_1 = temp_file('test.fbk')
@pytest.mark.version('>=2.5')
def test_1(act_1: Action, fbk_file_1: Path):
with act_1.connect_server() as srv:
srv.database.backup(database=str(act_1.db.db_path), backup=str(fbk_file_1))
srv.database.backup(database=act_1.db.db_path, backup=fbk_file_1)
srv.wait()
# Try overwrite existing database file
with pytest.raises(DatabaseError,
match='atabase .* already exists. To replace it, use the -REP switch'):
srv.database.restore(database=str(act_1.db.db_path), backup=str(fbk_file_1))
srv.database.restore(database=act_1.db.db_path, backup=fbk_file_1)
srv.wait()

View File

@ -325,9 +325,9 @@ def test_1(act_1: Action):
#
backup = BytesIO()
with act_1.connect_server() as srv:
srv.database.local_backup(database=str(act_1.db.db_path), backup_stream=backup)
srv.database.local_backup(database=act_1.db.db_path, backup_stream=backup)
backup.seek(0)
srv.database.local_restore(database=str(act_1.db.db_path), backup_stream=backup,
srv.database.local_restore(database=act_1.db.db_path, backup_stream=backup,
flags=SrvRestoreFlag.REPLACE)
# gather metadta and test cript result after backup & restore
act_1.reset()

View File

@ -584,7 +584,7 @@ def test_1(act_1: Action, capsys):
act_1.isql(switches=[], input=sql_ddl)
# Temporay change FW to OFF in order to make DML faster:
with act_1.connect_server() as srv:
srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.ASYNC)
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.ASYNC)
#
sql_data = f"""
set term ^;
@ -617,7 +617,7 @@ def test_1(act_1: Action, capsys):
act_1.isql(switches=['-nod'], input=sql_data)
# Restore FW to ON (make sweep to do its work "harder"):
with act_1.connect_server() as srv:
srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.SYNC)
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.SYNC)
# Trace
b_trace = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b_trace])
@ -656,7 +656,7 @@ def test_1(act_1: Action, capsys):
DTS_END_FOR_ATTACHMENTS = datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
# Move database to shutdown in order to stop sweep
with act_1.connect_server() as srv:
srv.database.shutdown(database=str(act_1.db.db_path), mode=ShutdownMode.FULL,
srv.database.shutdown(database=act_1.db.db_path, mode=ShutdownMode.FULL,
method=ShutdownMethod.FORCED, timeout=0)
finally:
# Kill sweep
@ -673,7 +673,7 @@ def test_1(act_1: Action, capsys):
# Return database online in order to check number of attachments that were established
# while sweep was in work
with act_1.connect_server() as srv:
srv.database.bring_online(database=str(act_1.db.db_path))
srv.database.bring_online(database=act_1.db.db_path)
# Check: number of ISQL attachments between DTS_BEG_FOR_ATTACHMENTS and
# DTS_END_FOR_ATTACHMENTS must be equal to 'PLANNED_ATTACH_CNT'
#

View File

@ -143,9 +143,9 @@ def test_1(act_1: Action):
src_timestamp1 = os.path.getmtime(act_1.db.db_path)
backup = BytesIO()
with act_1.connect_server() as srv:
srv.database.local_backup(database=str(act_1.db.db_path), backup_stream=backup)
srv.database.local_backup(database=act_1.db.db_path, backup_stream=backup)
backup.seek(0)
srv.database.local_restore(database=str(act_1.db.db_path), backup_stream=backup,
srv.database.local_restore(database=act_1.db.db_path, backup_stream=backup,
flags=SrvRestoreFlag.REPLACE)
src_timestamp2 = os.path.getmtime(act_1.db.db_path)
assert src_timestamp2 - src_timestamp1 > 0

View File

@ -334,7 +334,7 @@ def test_1(act_1: Action, capsys):
time.sleep(WAIT_FOR_ALL_CONNECTIONS_START_JOB)
with act_1.connect_server() as srv:
# Move database to shutdown with ability to run after it validation (prp_sm_single)
srv.database.shutdown(database=str(act_1.db.db_path), mode=ShutdownMode.SINGLE,
srv.database.shutdown(database=act_1.db.db_path, mode=ShutdownMode.SINGLE,
method=ShutdownMethod.FORCED, timeout=0)
# get firebird.log _before_ validation
srv.info.get_log()
@ -348,7 +348,7 @@ def test_1(act_1: Action, capsys):
# If database currently is in use by engine or some attachments than it shoudl fail
# with message "database <db_file> shutdown."
try:
srv.database.repair(database=str(act_1.db.db_path),
srv.database.repair(database=act_1.db.db_path,
flags=SrvRepairFlag.FULL | SrvRepairFlag.VALIDATE_DB)
except Exception as exc:
print(f'Database repair failed with: {exc}')
@ -357,7 +357,7 @@ def test_1(act_1: Action, capsys):
srv.info.get_log()
log_after = srv.readlines()
# bring database online
srv.database.bring_online(database=str(act_1.db.db_path))
srv.database.bring_online(database=act_1.db.db_path)
# At this point, threads should be dead
for thread in threads:
thread.join(1)

View File

@ -673,13 +673,13 @@ def test_1(act_1: Action, capsys, sweep_killer_script_1: Path, sweep_killer_out_
srv.info.get_log()
log_before = srv.readlines()
# Change FW to OFF in order to speed up initial data filling
srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.ASYNC)
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.ASYNC)
# make garbage
act_1.isql(switches=[], input=make_garbage_1)
# REDUCE number of cache buffers in DB header in order to sweep make its work as long as possible
srv.database.set_default_cache_size(database=str(act_1.db.db_path), size=100)
srv.database.set_default_cache_size(database=act_1.db.db_path, size=100)
# Change FW to ON (in order to make sweep life harder :))
srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.SYNC)
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.SYNC)
# Start trace
b = Barrier(2)
trace_thread = Thread(target=trace_session, args=[act_1, b])

View File

@ -297,9 +297,9 @@ fbk_file_1 = temp_file('test.fbk')
@pytest.mark.version('>=3.0')
def test_1(act_1: Action, fbk_file_1: Path):
with act_1.connect_server() as srv:
srv.database.backup(database=str(act_1.db.db_path), backup=str(fbk_file_1))
srv.database.backup(database=act_1.db.db_path, backup=fbk_file_1)
srv.wait()
srv.database.restore(backup=str(fbk_file_1), database=str(act_1.db.db_path),
srv.database.restore(backup=fbk_file_1, database=act_1.db.db_path,
flags=SrvRestoreFlag.REPLACE)
srv.wait()
act_1.expected_stdout = expected_stdout_1

View File

@ -138,15 +138,15 @@ def test_1(act_1: Action, capsys):
print (':::MSG::: ISQL setting new value for linger finished.')
print (':::MSG::: Starting GFIX setting new value for page buffers...')
#with act_1.connect_server() as srv:
#srv.database.set_default_cache_size(database=str(act_1.db.db_path), size=3791)
#srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.ASYNC)
#srv.database.set_access_mode(database=str(act_1.db.db_path), mode=DbAccessMode.READ_ONLY)
#srv.database.shutdown(database=str(act_1.db.db_path), mode=ShutdownMode.SINGLE,
#srv.database.set_default_cache_size(database=act_1.db.db_path, size=3791)
#srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.ASYNC)
#srv.database.set_access_mode(database=act_1.db.db_path, mode=DbAccessMode.READ_ONLY)
#srv.database.shutdown(database=act_1.db.db_path, mode=ShutdownMode.SINGLE,
#method=ShutdownMethod.DENNY_ATTACHMENTS, timeout=20)
#srv.database.get_statistics(database=str(act_1.db.db_path), flags=SrvStatFlag.HDR_PAGES,
#srv.database.get_statistics(database=act_1.db.db_path, flags=SrvStatFlag.HDR_PAGES,
#callback=print)
#srv.database.bring_online(database=str(act_1.db.db_path))
#srv.database.set_access_mode(database=str(act_1.db.db_path), mode=DbAccessMode.READ_WRITE)
#srv.database.bring_online(database=act_1.db.db_path)
#srv.database.set_access_mode(database=act_1.db.db_path, mode=DbAccessMode.READ_WRITE)
act_1.reset()
act_1.gfix(switches=['-buffers', '3791', act_1.db.dsn])
act_1.reset()

View File

@ -60,7 +60,7 @@ act_1 = python_act('db_1', substitutions=substitutions_1)
@pytest.mark.version('>=2.5.6')
def test_1(act_1: Action):
with act_1.connect_server() as srv:
srv.database.set_access_mode(database=str(act_1.db.db_path), mode=DbAccessMode.READ_ONLY)
srv.database.set_access_mode(database=act_1.db.db_path, mode=DbAccessMode.READ_ONLY)
script = """
commit;
set transaction read committed;

View File

@ -105,12 +105,12 @@ def test_1(act_1: Action, user_1: User, temp_db_1: Path, capsys):
print ('Starting backup...')
backup = BytesIO()
with act_1.connect_server() as srv:
srv.database.local_backup(database=str(act_1.db.db_path), backup_stream=backup)
srv.database.local_backup(database=act_1.db.db_path, backup_stream=backup)
print ('Backup finished.')
backup.seek(0)
with act_1.connect_server(user=user_1.name, password=user_1.password) as srv:
print ('Starting restore using NON sysdba user account...')
srv.database.local_restore(database=str(temp_db_1), backup_stream=backup,
srv.database.local_restore(database=temp_db_1, backup_stream=backup,
flags=SrvRestoreFlag.REPLACE)
print ('Restore using NON sysdba user account finished.')
#
@ -127,7 +127,7 @@ def test_1(act_1: Action, user_1: User, temp_db_1: Path, capsys):
"""
print ('Starting ISQL using NON sysdba user account...')
act_1.isql(switches=['-q', '-user', 'tmp$c4648', '-pas', '123', f'localhost:{temp_db_1}'],
connect_db=False, input=script)
connect_db=False, input=script, credentials=False)
print(act_1.stdout)
print ('ISQL using NON sysdba user account finished.')
act_1.reset()

View File

@ -302,9 +302,9 @@ def test_1(act_1: Action, dba_privileged_user: User, non_privileged_user: User,
act_1.isql(switches=['-q'], input=prep_script)
#
with act_1.connect_server() as srv:
srv.database.shutdown(database=str(act_1.db.db_path), mode=ShutdownMode.FULL,
srv.database.shutdown(database=act_1.db.db_path, mode=ShutdownMode.FULL,
method=ShutdownMethod.FORCED, timeout=0)
srv.database.bring_online(database=str(act_1.db.db_path))
srv.database.bring_online(database=act_1.db.db_path)
#
test_script = f"""
-- ###################################################################################

View File

@ -255,7 +255,7 @@ heavy_output_1 = temp_file('heavy_script.out')
def test_1(act_1: Action, heavy_script_1: Path, heavy_output_1: Path, capsys):
# Change database FW to OFF in order to increase speed of insertions and output its header info
with act_1.connect_server() as srv:
srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.ASYNC)
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.ASYNC)
# Preparing script for ISQL that will do 'heavy DML'
heavy_script_1.write_text("""
recreate sequence g;
@ -288,10 +288,10 @@ def test_1(act_1: Action, heavy_script_1: Path, heavy_output_1: Path, capsys):
# Run validation twice
with act_1.connect_server() as srv:
print('Iteration #1:')
srv.database.validate(database=str(act_1.db.db_path), lock_timeout=1,
srv.database.validate(database=act_1.db.db_path, lock_timeout=1,
callback=print)
print('Iteration #2:')
srv.database.validate(database=str(act_1.db.db_path), lock_timeout=1,
srv.database.validate(database=act_1.db.db_path, lock_timeout=1,
callback=print)
# Stopping ISQL that is doing now 'heavy DML' (bulk-inserts):
act_1.isql(switches=[], input='insert into stop(id) values(1); commit;')

View File

@ -120,7 +120,7 @@ expected_stdout_1 = """
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
with act_1.connect_server() as srv:
srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.ASYNC)
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.ASYNC)
# Read FNC scripts from zip file and execute it
script_file = Path(act_1.vars['files'] / 'core_4880.zip',
at='core_4880_fnc.tmp')

View File

@ -174,7 +174,7 @@ expected_stdout_1 = """
def test_1(act_1: Action, capsys):
# Move database to FW = OFF in order to increase speed of insertions and output its header info:
with act_1.connect_server() as srv:
srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.ASYNC)
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.ASYNC)
# Preparing script for ISQL that will do inserts with long keys:
long_keys_cmd = """
recreate table test(s varchar(1015)); -- with THIS length of field following EB will get exception very fast.
@ -197,7 +197,7 @@ def test_1(act_1: Action, capsys):
print(act_1.stdout)
print(act_1.stderr)
# Run validation after ISQL will finish (with runtime exception due to implementation limit exceeding):
srv.database.validate(database=str(act_1.db.db_path), lock_timeout=1, callback=print)
srv.database.validate(database=act_1.db.db_path, lock_timeout=1, callback=print)
# Check
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out

View File

@ -135,7 +135,7 @@ def test_1(act_1: Action, fbk_file_1: Path, fdb_file_1: Path):
at='tmp_core_5078.fbk')
fbk_file_1.write_bytes(zipped_fbk_file.read_bytes())
with act_1.connect_server() as srv:
srv.database.restore(database=str(fdb_file_1), backup=str(fbk_file_1))
srv.database.restore(database=fdb_file_1, backup=fbk_file_1)
srv.wait()
# This should execute without errors
act_1.isql(switches=[str(fdb_file_1)], input='set list on; select * from do_changeTxStatus;',

View File

@ -124,7 +124,7 @@ def test_1(act_1: Action):
#with act_1.connect_server() as srv:
# This raises error in new FB OO API while calling spb.insert_string(SPBItem.DBNAME, database):
# "Internal error when using clumplet API: attempt to store data in dataless clumplet"
#srv.database.nfix_database(database=str(act_1.db.db_path))
#srv.database.nfix_database(database=act_1.db.db_path)
# So we have to use svcmgr...
act_1.reset()
act_1.svcmgr(switches=['action_nfix', 'dbname', str(act_1.db.db_path)])

View File

@ -641,7 +641,7 @@ def test_1(act_1: Action, db_dml_sessions_1: Database, dml_logs_1: List[Path],
att_chk.commit()
# Shutdown database
with act_1.connect_server() as srv:
srv.database.shutdown(database=str(db_dml_sessions_1.db_path),
srv.database.shutdown(database=db_dml_sessions_1.db_path,
mode=ShutdownMode.FULL, method=ShutdownMethod.FORCED,
timeout=0)
cur_chk.execute("select 'check_point_3: after shutdown <db_dml_sessions>' as msg from rdb$database")
@ -683,7 +683,7 @@ def test_1(act_1: Action, db_dml_sessions_1: Database, dml_logs_1: List[Path],
f_check_log.write(f'Found crash messages in DML worker logs: {crashes_in_worker_logs}\n') # must be 0.
f_check_log.write('Point before bring DML database online.\n')
with act_1.connect_server() as srv:
srv.database.bring_online(database=str(db_dml_sessions_1.db_path))
srv.database.bring_online(database=db_dml_sessions_1.db_path)
f_check_log.write('Point after bring DML database online.\n')
chk_script = f"""
set list on;

View File

@ -86,7 +86,7 @@ expected_stdout_1 = """
@pytest.mark.version('>=2.5.6')
def test_1(act_1: Action):
with act_1.connect_server() as srv:
srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.ASYNC)
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.ASYNC)
#
custom_tpb = TPB(isolation=Isolation.CONCURRENCY).get_buffer()
with act_1.db.connect(no_gc=True) as con:

View File

@ -183,9 +183,9 @@ expected_stdout_1 = """
def test_1(act_1: Action):
with act_1.connect_server() as srv:
backup = BytesIO()
srv.database.local_backup(database=str(act_1.db.db_path), backup_stream=backup)
srv.database.local_backup(database=act_1.db.db_path, backup_stream=backup)
backup.seek(0)
srv.database.local_restore(database=str(act_1.db.db_path), backup_stream=backup,
srv.database.local_restore(database=act_1.db.db_path, backup_stream=backup,
flags=SrvRestoreFlag.REPLACE)
act_1.expected_stdout = expected_stdout_1
act_1.isql(switches=['-q'],

View File

@ -167,10 +167,10 @@ fbk_file = temp_file('core_5143.fbk')
@pytest.mark.version('>=3.0')
def test_1(act_1: Action, fbk_file: Path):
with act_1.connect_server() as srv:
srv.database.backup(database=str(act_1.db.db_path), backup=str(fbk_file),
srv.database.backup(database=act_1.db.db_path, backup=fbk_file,
verbose=True)
srv.wait()
srv.database.restore(database=str(act_1.db.db_path), backup=str(fbk_file),
srv.database.restore(database=act_1.db.db_path, backup=fbk_file,
flags=SrvRestoreFlag.REPLACE | SrvRestoreFlag.ONE_AT_A_TIME,
verbose=True)
restore_log = srv.readlines()

View File

@ -157,7 +157,7 @@ tmp_db_file = temp_file('tmp_core_5201.fdb')
@pytest.mark.version('>=3.0')
def test_1(act_1: Action, fbk_file: Path, tmp_db_file: Path):
with act_1.connect_server() as srv:
srv.database.backup(database=str(act_1.db.db_path), backup=str(fbk_file))
srv.database.backup(database=act_1.db.db_path, backup=fbk_file)
assert srv.readlines() == []
#
act_1.expected_stderr = 'We expect error'

View File

@ -181,7 +181,7 @@ def test_1(act_1: Action, fbk_file_1: Path, fdb_file_1: Path, capsys):
at='core_5207.fbk')
fbk_file_1.write_bytes(zipped_fbk_file.read_bytes())
with act_1.connect_server() as srv:
srv.database.restore(database=str(fdb_file_1), backup=str(fbk_file_1))
srv.database.restore(database=fdb_file_1, backup=fbk_file_1)
srv.wait()
act_1.isql(switches=['-x', str(fdb_file_1)], connect_db=False)
metadata = act_1.stdout

View File

@ -127,7 +127,7 @@ test_sript_1 = """
@pytest.mark.version('>=4.0')
def test_1(act_1: Action):
with act_1.connect_server() as srv:
srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.ASYNC)
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.ASYNC)
act_1.expected_stdout = expected_stdout_1
act_1.isql(switches=[], input=test_sript_1)
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -618,7 +618,7 @@ def test_1(act_1: Action, bulk_insert_script_1: Path, bulk_insert_output_1: Path
log_after = act_1.get_firebird_log()
# Run database validation
with act_1.connect_server() as srv:
srv.database.validate(database=str(act_1.db.db_path), callback=print_validation)
srv.database.validate(database=act_1.db.db_path, callback=print_validation)
# Check
act_1.reset()
act_1.expected_stdout = expected_stdout_1

View File

@ -119,10 +119,10 @@ fdb_file_2 = temp_file('tmp_core_5295-2.db1')
@pytest.mark.version('>=2.5.6')
def test_1(act_1: Action, fbk_file: Path, fdb_file_1: Path, fdb_file_2: Path):
with act_1.connect_server() as srv:
srv.database.backup(database=str(act_1.db.db_path), backup=str(fbk_file))
srv.database.backup(database=act_1.db.db_path, backup=fbk_file)
srv.wait()
srv.database.restore(backup=str(fbk_file),
database=[str(fdb_file_1), str(fdb_file_2)],
srv.database.restore(backup=fbk_file,
database=[fdb_file_1, fdb_file_2],
db_file_pages=[100000])
srv.wait()
# Only 'gfix -v' raised error. Online validation works fine:

View File

@ -79,9 +79,9 @@ fdb_file = temp_file('tmp_core_5304.fdb')
@pytest.mark.version('>=4.0')
def test_1(act_1: Action, fbk_file: Path, fdb_file: Path):
with act_1.connect_server() as srv:
srv.database.backup(database=str(act_1.db.db_path), backup=str(fbk_file))
srv.database.backup(database=act_1.db.db_path, backup=fbk_file)
srv.wait()
srv.database.restore(backup=str(fbk_file), database=str(fdb_file))
srv.database.restore(backup=fbk_file, database=fdb_file)
srv.wait()

View File

@ -126,7 +126,7 @@ expected_stderr_1 = """
@pytest.mark.version('>=3.0.6')
def test_1(act_1: Action):
with act_1.connect_server() as srv:
srv.database.shutdown(database=str(act_1.db.db_path), mode=ShutdownMode.SINGLE,
srv.database.shutdown(database=act_1.db.db_path, mode=ShutdownMode.SINGLE,
method=ShutdownMethod.FORCED, timeout=0)
with act_1.db.connect() as con:
c = con.cursor()

View File

@ -268,7 +268,7 @@ def test_1(act_1: Action):
if act_1.get_server_architecture() == 'SS':
# Bucgcheck is reproduced on 2.5.7.27030 only when FW = OFF
with act_1.connect_server() as srv:
srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.ASYNC)
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.ASYNC)
# Test
act_1.expected_stdout = expected_stdout_1
act_1.isql(switches=[], input=test_script_1)