mirror of
https://github.com/FirebirdSQL/firebird-qa.git
synced 2025-01-22 13:33:07 +01:00
More python tests
This commit is contained in:
parent
f6294a3afe
commit
560873a3f0
BIN
files/core_5965.zip
Normal file
BIN
files/core_5965.zip
Normal file
Binary file not shown.
BIN
files/core_6023-ods-11_2-fdb.zip
Normal file
BIN
files/core_6023-ods-11_2-fdb.zip
Normal file
Binary file not shown.
BIN
files/core_6028_25.zip
Normal file
BIN
files/core_6028_25.zip
Normal file
Binary file not shown.
386
files/core_6078.sql
Normal file
386
files/core_6078.sql
Normal file
@ -0,0 +1,386 @@
|
||||
-- connect '%(dsn)s' user %(user_name)s password '%(user_password)s';
|
||||
set list on;
|
||||
set count on;
|
||||
|
||||
set term ^;
|
||||
execute block as
|
||||
begin
|
||||
begin
|
||||
execute statement 'drop domain dm_test';
|
||||
when any do begin end
|
||||
end
|
||||
end
|
||||
^
|
||||
set term ;^
|
||||
commit;
|
||||
|
||||
-- ALTER statement will try to change its NOT_NULL state:
|
||||
create domain dm_test as int;
|
||||
commit;
|
||||
|
||||
recreate table test( uid char(16) character set octets, x int, y int, constraint test_unq unique(uid) );
|
||||
commit;
|
||||
|
||||
comment on table test is 'This is table TEST. And no one allowed to alter it!.';
|
||||
commit;
|
||||
|
||||
create descending index test_uid on test(uid); -- ALTER statement will try to change its state to INACTIVE
|
||||
commit;
|
||||
|
||||
set term ^;
|
||||
create or alter trigger test_bi for test active before insert position 0 as
|
||||
begin
|
||||
new.uid = gen_uuid();
|
||||
end
|
||||
^
|
||||
set term ;^
|
||||
commit;
|
||||
|
||||
alter user tmp$c6078_0 using plugin Srp revoke admin role; -- Unprivileged user
|
||||
-- create or alter user tmp$c6078_0 password '123' using plugin Srp revoke admin role; -- Unprivileged user
|
||||
-- create or alter user tmp$c6078_1 password '123' using plugin Srp;
|
||||
-- create or alter user tmp$c6078_2 password '456' using plugin Srp;
|
||||
-- commit;
|
||||
|
||||
create or alter mapping local_map_c6078 using plugin srp from user tmp$c6078_1 to user ltost;
|
||||
create or alter global mapping global_map_c6078 using plugin srp from user tmp$c6078_2 to user gtost;
|
||||
commit;
|
||||
|
||||
connect '%(dsn)s' user tmp$c6078_0 password '123';
|
||||
commit;
|
||||
|
||||
-- ######################################################################################################
|
||||
-- ### F o l l o w i n g i s d o n e b y n o n - p r i v i l e g e d u s e r ###
|
||||
-- ######################################################################################################
|
||||
|
||||
|
||||
-- 29.01.2020. Attempt to alter another non-privileged USER.
|
||||
-- Expected:
|
||||
-- Statement failed, SQLSTATE = 28000
|
||||
-- modify record error
|
||||
-- -no permission for UPDATE access to COLUMN PLG$SRP_VIEW.PLG$ACTIVE
|
||||
-- (FB 4.0.0 only): -Effective user is TMP$C6078_0
|
||||
alter user tmp$c6078_1 inactive using plugin Srp;
|
||||
commit;
|
||||
|
||||
-- 29.01.2020. Attempt to alter THE WHOLE DATABASE.
|
||||
-- Expected:
|
||||
-- Statement failed, SQLSTATE = 28000
|
||||
-- unsuccessful metadata update
|
||||
-- -ALTER DATABASE failed
|
||||
-- -no permission for ALTER access to DATABASE
|
||||
alter database set linger to 31;
|
||||
commit;
|
||||
|
||||
----------------------------------------------------------------
|
||||
|
||||
-- 29.01.2020. Attempt to alter DOMAIN.
|
||||
-- Expected:
|
||||
-- Statement failed, SQLSTATE = 28000
|
||||
-- unsuccessful metadata update
|
||||
-- -ALTER DOMAIN DM_TEST failed
|
||||
-- -no permission for ALTER access to DOMAIN DM_TEST
|
||||
-- (FB 4.0.0 only): -Effective user is TMP$C6078_0
|
||||
alter domain dm_test set not null;
|
||||
commit;
|
||||
|
||||
----------------------------------------------------------------
|
||||
|
||||
-- 29.01.2020. Attempt to alter table DROP constraint.
|
||||
-- Expected:
|
||||
-- Statement failed, SQLSTATE = 28000
|
||||
-- unsuccessful metadata update
|
||||
-- -ALTER TABLE TEST failed
|
||||
-- -no permission for ALTER access to TABLE TEST
|
||||
-- (FB 4.0.0 only): -Effective user is TMP$C6078_0
|
||||
alter table test drop constraint test_unq;
|
||||
|
||||
----------------------------------------------------------------
|
||||
|
||||
-- 29.01.2020. Attempt to alter table alter column.
|
||||
-- Expected:
|
||||
-- Statement failed, SQLSTATE = 28000
|
||||
-- unsuccessful metadata update
|
||||
-- -ALTER TABLE TEST failed
|
||||
-- -no permission for ALTER access to TABLE TEST
|
||||
-- (FB 4.0.0 only): -Effective user is TMP$C6078_0
|
||||
alter table test alter x type bigint;
|
||||
|
||||
----------------------------------------------------------------
|
||||
|
||||
-- 29.01.2020. Attempt to alter INDEX: make it inactive.
|
||||
-- Statement failed, SQLSTATE = 28000
|
||||
-- unsuccessful metadata update
|
||||
-- -ALTER INDEX TEST_UID failed
|
||||
-- -no permission for ALTER access to TABLE TEST
|
||||
-- -Effective user is TMP$C6078_0
|
||||
alter index test_uid inactive;
|
||||
|
||||
----------------------------------------------------------------
|
||||
|
||||
-- 29.01.2020. Attempt to change existing COMMENT to the table TEST (make it NULL).
|
||||
-- Expected:
|
||||
-- Statement failed, SQLSTATE = 28000
|
||||
-- unsuccessful metadata update
|
||||
-- -COMMENT ON TEST failed
|
||||
-- -no permission for ALTER access to TABLE TEST
|
||||
-- (FB 4.0.0): -Effective user is TMP$C6078_0
|
||||
comment on table test is null;
|
||||
|
||||
----------------------------------------------------------------
|
||||
|
||||
-- Attempt to alter TRIGGER on existing table (CREATED BY SYSDBA)
|
||||
-- Expected:
|
||||
-- Statement failed, SQLSTATE = 28000
|
||||
-- unsuccessful metadata update
|
||||
-- -CREATE OR ALTER TRIGGER TEST_BI failed
|
||||
-- -no permission for ALTER access to TABLE TEST
|
||||
-- (FB 4.0.0 only): -Effective user is TMP$C6078_0
|
||||
set term ^;
|
||||
create or alter trigger test_bi for test active before insert position 0 as
|
||||
begin
|
||||
new.uid = 'QWE';
|
||||
end
|
||||
^
|
||||
set term ;^
|
||||
commit;
|
||||
|
||||
----------------------------------------------------------------
|
||||
|
||||
-- Attempt to create/alter TRIGGER on DB-level event:
|
||||
-- Expected:
|
||||
-- Statement failed, SQLSTATE = 28000
|
||||
-- unsuccessful metadata update
|
||||
-- -CREATE OR ALTER TRIGGER TRG$START failed
|
||||
-- -no permission for ALTER access to DATABASE
|
||||
set term ^;
|
||||
create or alter trigger trg$start
|
||||
inactive on transaction start position 0
|
||||
as
|
||||
begin
|
||||
rdb$set_context('USER_SESSION', 'TRANS_ID', current_transaction);
|
||||
end
|
||||
^
|
||||
set term ;^
|
||||
|
||||
----------------------------------------------------------------
|
||||
|
||||
-- Attempt to alter TRIGGER for DDL event:
|
||||
-- Expected:
|
||||
-- Statement failed, SQLSTATE = 28000
|
||||
-- unsuccessful metadata update
|
||||
-- -CREATE OR ALTER TRIGGER TRIG_DDL_SP failed
|
||||
-- -no permission for ALTER access to DATABASE
|
||||
set term ^;
|
||||
create or alter trigger trig_ddl_sp before create procedure as
|
||||
begin
|
||||
end
|
||||
^
|
||||
set term ;^
|
||||
|
||||
|
||||
-- Check that there is still ONE trigger that was created at the start ofthis script (by SYSDBA) and it has unchanged body:
|
||||
-- Expected:
|
||||
-- RDB$TRIGGER_NAME TEST_BI
|
||||
-- RDB$TRIGGER_SOURCE c:3cc
|
||||
-- as
|
||||
-- begin
|
||||
-- new.uid = gen_uuid();
|
||||
-- end
|
||||
-- Records affected: 1
|
||||
|
||||
select t.rdb$trigger_name altered_trigger_name, t.rdb$trigger_source altered_trigger_source
|
||||
from rdb$database r
|
||||
left join rdb$triggers t on t.rdb$system_flag is distinct from 1;
|
||||
|
||||
---------------------------------------------------------------
|
||||
|
||||
-- Attempt to alter PACKAGE header.
|
||||
-- Expected:
|
||||
-- Statement failed, SQLSTATE = 42000
|
||||
-- unsuccessful metadata update
|
||||
-- -CREATE OR ALTER PACKAGE PKG_TEST failed
|
||||
-- -No permission for CREATE PACKAGE operation
|
||||
set term ^ ;
|
||||
create or alter package pkg_test -- error did raise, but packages still WAS created.
|
||||
as
|
||||
begin
|
||||
function f_test_inside_pkg
|
||||
returns smallint;
|
||||
end
|
||||
^
|
||||
set term ;^
|
||||
|
||||
---------------------------------------------------------------
|
||||
|
||||
-- Attempt to alter PACKAGE body.
|
||||
-- Expected:
|
||||
-- Statement failed, SQLSTATE = 42000
|
||||
-- unsuccessful metadata update
|
||||
-- -RECREATE PACKAGE BODY PKG_TEST failed
|
||||
-- -No permission for CREATE PACKAGE operation
|
||||
set term ^;
|
||||
recreate package body PKG_TEST
|
||||
as
|
||||
begin
|
||||
function f_test_inside_pkg
|
||||
returns smallint
|
||||
as
|
||||
begin
|
||||
return 1;
|
||||
end
|
||||
end
|
||||
^
|
||||
set term ;^
|
||||
|
||||
commit;
|
||||
|
||||
-- Check that no packages did appear in the database.
|
||||
-- Expected:
|
||||
-- RDB$PACKAGE_NAME <null>
|
||||
-- Records affected: 1
|
||||
select p.rdb$package_name as altered_pkg_name
|
||||
from rdb$database r
|
||||
left join rdb$packages p on p.rdb$system_flag is distinct from 1;
|
||||
commit;
|
||||
|
||||
|
||||
---------------------------------------------------------------
|
||||
|
||||
-- Attempt to alter standalone PSQL function
|
||||
set term ^;
|
||||
create or alter function fn_c6078 returns int as -- error did raise, but function still WAS created.
|
||||
begin
|
||||
return 123987;
|
||||
end
|
||||
^
|
||||
set term ^;
|
||||
commit;
|
||||
|
||||
-- Expected:
|
||||
-- RDB$FUNCTION_NAME <null>
|
||||
-- Records affected: 1
|
||||
select f.rdb$function_name as altered_standalone_func from rdb$database r left join rdb$functions f on f.rdb$system_flag is distinct from 1 and f.RDB$PACKAGE_NAME is null;
|
||||
commit;
|
||||
|
||||
---------------------------------------------------------------
|
||||
|
||||
-- Attempt to alter standalone procedure
|
||||
set term ^;
|
||||
create or alter procedure sp_c6078 returns(whoami varchar(32)) as
|
||||
begin
|
||||
whoami = current_user;
|
||||
suspend;
|
||||
end
|
||||
^
|
||||
set term ;^
|
||||
commit;
|
||||
|
||||
-- Expected:
|
||||
-- RDB$PROCEDURE_NAME <null>
|
||||
-- Records affected: 1
|
||||
select p.rdb$procedure_name as altered_standalone_proc from rdb$database r left join rdb$procedures p on p.rdb$system_flag is distinct from 1;
|
||||
|
||||
---------------------------------------------------------------
|
||||
|
||||
-- Attempt to alter view
|
||||
create or alter view v_c6078 as select * from rdb$database; -- NO error at all, view WAS created.
|
||||
commit;
|
||||
|
||||
-- Expected
|
||||
-- RDB$RELATION_NAME <null>
|
||||
-- Records affected: 1
|
||||
select v.rdb$relation_name as altered_view_name from rdb$database r left join rdb$relations v on v.rdb$system_flag is distinct from 1 and v.rdb$relation_name = upper('v_c6078');
|
||||
commit;
|
||||
|
||||
---------------------------------------------------------------
|
||||
-- Attempt to alter sequence
|
||||
|
||||
create or alter sequence sq_c6078 start with 192837465;
|
||||
commit;
|
||||
|
||||
-- Expected:
|
||||
-- RDB$GENERATOR_NAME <null>
|
||||
-- Records affected: 1
|
||||
select g.rdb$generator_name as altered_sequence_name from rdb$database r left join rdb$generators g on g.rdb$system_flag is distinct from 1;
|
||||
commit;
|
||||
|
||||
---------------------------------------------------------------
|
||||
-- Attempt to alter exception
|
||||
|
||||
--create or alter exception ex_c6078 'Something wrong.'; -- here no error, even for 1st run of this statement!
|
||||
create or alter exception ex_c6078 'Something wrong.';
|
||||
commit;
|
||||
|
||||
-- Expected
|
||||
-- RDB$EXCEPTION_NAME <null>
|
||||
-- Records affected: 1
|
||||
select x.rdb$exception_name as altered_exception_name from rdb$database r left join rdb$exceptions x on x.rdb$system_flag is distinct from 1;
|
||||
commit;
|
||||
|
||||
---------------------------------------------------------------
|
||||
|
||||
-- Attempt to alter UDR-base function:
|
||||
-- before fix there was NO error here and UDR-based funtion WAS created
|
||||
create or alter function wait_event (
|
||||
event_name varchar(31) character set utf8 not null
|
||||
) returns integer not null
|
||||
external name 'udrcpp_example!wait_event'
|
||||
engine udr;
|
||||
|
||||
commit;
|
||||
|
||||
-- Expected:
|
||||
-- RDB$FUNCTION_NAME <null>
|
||||
-- Records affected: 1
|
||||
select f.rdb$function_name as altered_UDR_based_func from rdb$database r left join rdb$functions f on f.rdb$system_flag is distinct from 1 and f.rdb$engine_name = upper('udr');
|
||||
commit;
|
||||
|
||||
---------------------------------------------------------------
|
||||
|
||||
-- 29.01.2020. Attempt to alter character set.
|
||||
-- Expected:
|
||||
-- Statement failed, SQLSTATE = 28000
|
||||
-- unsuccessful metadata update
|
||||
-- -ALTER CHARACTER SET UTF8 failed
|
||||
-- -no permission for ALTER access to CHARACTER SET UTF8
|
||||
-- (FB 4.0.0 only): -Effective user is TMP$C6078_0
|
||||
ALTER CHARACTER SET UTF8 SET DEFAULT COLLATION UNICODE_CI_AI;
|
||||
|
||||
---------------------------------------------------------------
|
||||
|
||||
-- 29.01.2020. Attempt to alter LOCAL MAPPING.
|
||||
-- Expected:
|
||||
-- Statement failed, SQLSTATE = 28000
|
||||
-- unsuccessful metadata update
|
||||
-- -ALTER MAPPING LOCAL_MAP_C6078 failed
|
||||
-- -Unable to perform operation
|
||||
-- 4.0.0: -System privilege CHANGE_MAPPING_RULES is missing
|
||||
-- 3.0.x: -Unable to perform operation. You must be either SYSDBA or owner of the database
|
||||
alter mapping local_map_c6078 using plugin srp from user tmp$c6078_1 to user ltost_2;
|
||||
|
||||
---------------------------------------------------------------
|
||||
|
||||
-- 29.01.2020. Attempt to alter GLOBAL MAPPING.
|
||||
-- Expected:
|
||||
-- Statement failed, SQLSTATE = 28000
|
||||
-- unsuccessful metadata update
|
||||
-- -ALTER MAPPING GLOBAL_MAP_C6078 failed
|
||||
-- -Unable to perform operation
|
||||
-- (FB 4.0.0): -System privilege CHANGE_MAPPING_RULES is missing
|
||||
-- (FB 3.0.x): -Unable to perform operation. You must be either SYSDBA or owner of the database
|
||||
alter global mapping global_map_c6078 using plugin srp from user tmp$c6078_2 to user gtost_2;
|
||||
commit;
|
||||
|
||||
-- cleanup:
|
||||
-- ########
|
||||
connect '%(dsn)s' user %(user_name)s password '%(user_password)s';
|
||||
|
||||
drop global mapping global_map_c6078;
|
||||
drop mapping local_map_c6078;
|
||||
commit;
|
||||
|
||||
-- drop user tmp$c6078_0 using plugin Srp;
|
||||
-- drop user tmp$c6078_1 using plugin Srp;
|
||||
-- drop user tmp$c6078_2 using plugin Srp;
|
||||
-- commit;
|
@ -11,7 +11,7 @@
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
from firebird.driver import TPB, Isolation
|
||||
from firebird.driver import tpb, Isolation
|
||||
|
||||
# version: 2.5
|
||||
# resources: None
|
||||
@ -86,20 +86,20 @@ def test_1(act_1: Action):
|
||||
exit;
|
||||
end
|
||||
'''
|
||||
tpb = TPB(isolation=Isolation.CONCURRENCY).get_buffer()
|
||||
custom_tpb = tpb(isolation=Isolation.CONCURRENCY)
|
||||
with act_1.db.connect() as con1, act_1.db.connect() as con2:
|
||||
con1.begin(tpb)
|
||||
con1.begin(custom_tpb)
|
||||
cur1 = con1.cursor()
|
||||
cur2 = con2.cursor()
|
||||
|
||||
cur1.execute(stm1)
|
||||
con1.commit()
|
||||
|
||||
con2.begin(tpb)
|
||||
con2.begin(custom_tpb)
|
||||
cur2.execute(stm2)
|
||||
con2.commit()
|
||||
|
||||
con1.begin(tpb)
|
||||
con1.begin(custom_tpb)
|
||||
cur1.execute(stm1)
|
||||
con1.commit()
|
||||
|
||||
|
@ -20,7 +20,7 @@
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
from firebird.driver import TPB, TraAccessMode, Isolation
|
||||
from firebird.driver import tpb, TraAccessMode, Isolation
|
||||
|
||||
# version: 2.5.7
|
||||
# resources: None
|
||||
@ -156,9 +156,9 @@ act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
@pytest.mark.version('>=2.5.7')
|
||||
def test_1(act_1: Action):
|
||||
with act_1.db.connect() as con:
|
||||
txparam_read = TPB(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0,
|
||||
access_mode=TraAccessMode.READ).get_buffer()
|
||||
txparam_write = TPB(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0).get_buffer()
|
||||
txparam_read = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0,
|
||||
access_mode=TraAccessMode.READ)
|
||||
txparam_write = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0)
|
||||
|
||||
tx_read = con.transaction_manager(txparam_read)
|
||||
cur_read = tx_read.cursor()
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
from firebird.driver import tpb, Isolation
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
@ -92,10 +93,11 @@ act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
def test_1(act_1: Action):
|
||||
with act_1.db.connect() as con_1:
|
||||
c_1 = con_1.cursor()
|
||||
c_1.execute('select * from sec$users')
|
||||
with act_1.db.connect() as con_2:
|
||||
custom_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0)
|
||||
#
|
||||
with act_1.db.connect() as con1:
|
||||
trn1 = con1.transaction_manager(custom_tpb)
|
||||
cur1 = trn1.cursor()
|
||||
cur1.execute('select sec$user_name from sec$users')
|
||||
with act_1.db.connect() as con2:
|
||||
pass # Connect should not raise an exception
|
||||
|
||||
|
||||
|
@ -12,7 +12,7 @@
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
from firebird.driver import TPB, Isolation
|
||||
from firebird.driver import tpb, Isolation
|
||||
|
||||
# version: 3.0.6
|
||||
# resources: None
|
||||
@ -227,7 +227,7 @@ def test_1(act_1: Action, capsys):
|
||||
"""
|
||||
act_1.isql(switches=[], input=ddl_script)
|
||||
#
|
||||
tpb = TPB(isolation=Isolation.READ_COMMITTED_NO_RECORD_VERSION, lock_timeout=0).get_buffer()
|
||||
custom_tpb = tpb(isolation=Isolation.READ_COMMITTED_NO_RECORD_VERSION, lock_timeout=0)
|
||||
with act_1.db.connect() as con:
|
||||
cur1 = con.cursor()
|
||||
cur1.execute('select x from sp_test(21)').fetchall()
|
||||
@ -239,7 +239,7 @@ def test_1(act_1: Action, capsys):
|
||||
'drop index test2_id_x_desc']
|
||||
for cmd in drop_commands:
|
||||
with act_1.db.connect() as con2:
|
||||
tx = con2.transaction_manager(default_tpb=tpb)
|
||||
tx = con2.transaction_manager(custom_tpb)
|
||||
tx.begin()
|
||||
cur2 = tx.cursor()
|
||||
try:
|
||||
|
@ -21,7 +21,7 @@
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
from firebird.driver import TPB, Isolation, TraAccessMode, DatabaseError
|
||||
from firebird.driver import tpb, Isolation, TraAccessMode, DatabaseError
|
||||
|
||||
# version: 2.5.5
|
||||
# resources: None
|
||||
@ -108,8 +108,8 @@ lock conflict on no wait transaction
|
||||
|
||||
@pytest.mark.version('>=2.5.5')
|
||||
def test_1(act_1: Action, capsys):
|
||||
custom_tpb = TPB(isolation=Isolation.READ_COMMITTED_RECORD_VERSION,
|
||||
access_mode=TraAccessMode.WRITE, lock_timeout=0).get_buffer()
|
||||
custom_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION,
|
||||
access_mode=TraAccessMode.WRITE, lock_timeout=0)
|
||||
with act_1.db.connect() as con1:
|
||||
tx1a = con1.transaction_manager(custom_tpb)
|
||||
cur1a = tx1a.cursor()
|
||||
|
@ -26,7 +26,7 @@
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
from firebird.driver import TPB, Isolation
|
||||
from firebird.driver import tpb, Isolation
|
||||
|
||||
# version: 2.5.6
|
||||
# resources: None
|
||||
@ -87,7 +87,7 @@ expected_stdout_1 = """
|
||||
def test_1(act_1: Action):
|
||||
act_1.db.set_async_write()
|
||||
#
|
||||
custom_tpb = TPB(isolation=Isolation.CONCURRENCY).get_buffer()
|
||||
custom_tpb = tpb(isolation=Isolation.CONCURRENCY)
|
||||
with act_1.db.connect(no_gc=True) as con:
|
||||
tx1 = con.transaction_manager(custom_tpb)
|
||||
tx2 = con.transaction_manager(custom_tpb)
|
||||
|
@ -34,6 +34,7 @@ init_script_1 = """
|
||||
"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
db_1_repl = db_factory(sql_dialect=3, init=init_script_1, filename='tmp_5645_repl.fd')
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
@ -167,13 +168,8 @@ expected_stdout_1 = """
|
||||
Records affected: 2
|
||||
"""
|
||||
|
||||
|
||||
db_1_repl = db_factory(sql_dialect=3, init=init_script_1, filename='tmp_5645_repl.fd')
|
||||
|
||||
|
||||
@pytest.mark.version('>=3.0.3')
|
||||
def test_1(act_1: Action, db_1_repl: Database):
|
||||
pytest.skip("Requires UDR udrcpp_example")
|
||||
ddl_for_replication = f"""
|
||||
create table replicate_config (
|
||||
name varchar(31) not null,
|
||||
@ -181,7 +177,7 @@ def test_1(act_1: Action, db_1_repl: Database):
|
||||
);
|
||||
|
||||
insert into replicate_config (name, data_source)
|
||||
values ('ds1', '{db_1_repl}');
|
||||
values ('ds1', '{db_1_repl.db_path}');
|
||||
|
||||
create trigger persons_replicate
|
||||
after insert on persons
|
||||
|
@ -2,35 +2,36 @@
|
||||
#
|
||||
# id: bugs.core_5907
|
||||
# title: Regression: can not launch trace if its 'database' section contains regexp pattern with curvy brackets to enclose quantifier
|
||||
# decription:
|
||||
# decription:
|
||||
# Database file name for check: {core_5907.97}.tmp // NB: outer curvy brackets ARE INCLUDED in this name.
|
||||
# This name should match to pattern: (\\{core_5907.[[:DIGIT:]]{2}\\}).tmp -- but we have to duplicate every "{" and "}".
|
||||
# Also, we have to duplicate '' otherwise it will be escaped by fbtest framework.
|
||||
# Checked on 4.0.0.1224: OK, 14.047s.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-5907
|
||||
# min_versions: ['4.0']
|
||||
# versions: 4.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 4.0
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('.*{CORE_5907.97}.TMP', '{CORE_5907.97}.TMP'), ('.*{core_5907.97}.tmp', '{CORE_5907.97}.TMP')]
|
||||
substitutions_1 = [('.*{CORE_5907.97}.FDB', '{CORE_5907.97}.FDB'),
|
||||
('.*{core_5907.97}.fdb', '{CORE_5907.97}.FDB')]
|
||||
|
||||
init_script_1 = """
|
||||
recreate table test(id int);
|
||||
commit;
|
||||
"""
|
||||
"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1, filename='{core_5907.97}.fdb')
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import re
|
||||
# import subprocess
|
||||
@ -38,32 +39,32 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# import shutil
|
||||
# from fdb import services
|
||||
# from subprocess import Popen
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# this_fdb = db_conn.database_name
|
||||
# test_fdb = os.path.join( os.path.split(this_fdb)[0], "{core_5907.97}.tmp") # name of copy will be: %FBT_REPO% mp\\{core_5907.97}.tmp
|
||||
#
|
||||
# test_fdb = os.path.join( os.path.split(this_fdb)[0], "{core_5907.97}.tmp") # name of copy will be: %FBT_REPO%\\tmp\\{core_5907.97}.tmp
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -74,12 +75,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# txt30 = '''# Trace config, format for 3.0. Generated auto, do not edit!
|
||||
# database=(%[\\\\\\\\/](\\{{core_5907.[[:DIGIT:]]{{2}}\\}}).tmp)
|
||||
# {
|
||||
@ -89,48 +90,48 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# log_connections = true
|
||||
# }
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# f_trc_cfg=open( os.path.join(context['temp_directory'],'tmp_trace_5907.cfg'), 'w')
|
||||
# f_trc_cfg.write(txt30)
|
||||
# flush_and_close( f_trc_cfg )
|
||||
#
|
||||
#
|
||||
# shutil.copy2( this_fdb, test_fdb )
|
||||
#
|
||||
#
|
||||
# # ##############################################################
|
||||
# # S T A R T T R A C E i n S E P A R A T E P R O C E S S
|
||||
# # ##############################################################
|
||||
#
|
||||
#
|
||||
# f_trc_log=open( os.path.join(context['temp_directory'],'tmp_trace_5907.log'), "w")
|
||||
# f_trc_err=open( os.path.join(context['temp_directory'],'tmp_trace_5907.err'), "w")
|
||||
#
|
||||
# p_trace = Popen( [ context['fbsvcmgr_path'],
|
||||
#
|
||||
# p_trace = Popen( [ context['fbsvcmgr_path'],
|
||||
# 'localhost:service_mgr',
|
||||
# 'action_trace_start',
|
||||
# 'action_trace_start',
|
||||
# 'trc_cfg', f_trc_cfg.name
|
||||
# ],
|
||||
# stdout = f_trc_log, stderr = f_trc_err
|
||||
# )
|
||||
#
|
||||
#
|
||||
# # this delay need for trace start and finish its output about invalid section in its config file:
|
||||
# time.sleep(1)
|
||||
#
|
||||
#
|
||||
# # ####################################################
|
||||
# # G E T A C T I V E T R A C E S E S S I O N I D
|
||||
# # ####################################################
|
||||
# # Save active trace session info into file for further parsing it and obtain session_id back (for stop):
|
||||
#
|
||||
#
|
||||
# f_trc_lst = open( os.path.join(context['temp_directory'],'tmp_trace_5907.lst'), 'w')
|
||||
# subprocess.call( [ context['fbsvcmgr_path'],
|
||||
# 'localhost:service_mgr',
|
||||
# subprocess.call( [ context['fbsvcmgr_path'],
|
||||
# 'localhost:service_mgr',
|
||||
# 'action_trace_list'
|
||||
# ],
|
||||
# stdout=f_trc_lst
|
||||
# )
|
||||
# flush_and_close( f_trc_lst )
|
||||
#
|
||||
#
|
||||
# # !!! DO NOT REMOVE THIS LINE !!!
|
||||
# #time.sleep(3)
|
||||
#
|
||||
#
|
||||
# trcssn=0
|
||||
# with open( f_trc_lst.name,'r') as f:
|
||||
# for line in f:
|
||||
@ -143,11 +144,11 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# break
|
||||
# # Result: `trcssn` is ID of active trace session. Now we have to terminate it:
|
||||
# #.............................................................................
|
||||
#
|
||||
#
|
||||
# #sql_cmd="insert into extdecimal(dec34_34) values (1)"
|
||||
#
|
||||
#
|
||||
# sql_cmd='select mon$database_name from mon$database'
|
||||
#
|
||||
#
|
||||
# con1=fdb.connect(dsn = 'localhost:' + test_fdb)
|
||||
# cur=con1.cursor()
|
||||
# try:
|
||||
@ -159,42 +160,42 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# print('CLIENT GOT ERROR:',i)
|
||||
# finally:
|
||||
# cur.close()
|
||||
#
|
||||
#
|
||||
# con1.close()
|
||||
# #.............................................................................
|
||||
#
|
||||
#
|
||||
# time.sleep(1)
|
||||
#
|
||||
#
|
||||
# # ####################################################
|
||||
# # S E N D R E Q U E S T T R A C E T O S T O P
|
||||
# # ####################################################
|
||||
# if trcssn>0:
|
||||
# fn_nul = open(os.devnull, 'w')
|
||||
# #f_trc_log=open( os.path.join(context['temp_directory'],'tmp_trace_5907.log'), "w")
|
||||
# subprocess.call( [ context['fbsvcmgr_path'],
|
||||
# subprocess.call( [ context['fbsvcmgr_path'],
|
||||
# 'localhost:service_mgr',
|
||||
# 'action_trace_stop','trc_id', trcssn
|
||||
# ],
|
||||
# ],
|
||||
# stdout=fn_nul
|
||||
# )
|
||||
# fn_nul.close()
|
||||
# # DO NOT REMOVE THIS LINE:
|
||||
# time.sleep(1)
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# p_trace.terminate()
|
||||
#
|
||||
#
|
||||
# flush_and_close( f_trc_log )
|
||||
# flush_and_close( f_trc_err )
|
||||
#
|
||||
#
|
||||
# # 1. Trace STDERR log should be EMPTY:
|
||||
# ######################################
|
||||
#
|
||||
#
|
||||
# # Example of STDERR when wrong database name pattern is spesified:
|
||||
# # Trace session ID 11 started
|
||||
# # Error creating trace session for database "":
|
||||
# # Passed text: illegal line <database=(%[\\/]({core_5907.[[:DIGIT:]]{2}}).tmp)>
|
||||
#
|
||||
#
|
||||
# f_list = ( f_trc_err, )
|
||||
# for i in range(len(f_list)):
|
||||
# f_name=f_list[i].name
|
||||
@ -202,7 +203,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# with open( f_name,'r') as f:
|
||||
# for line in f:
|
||||
# print("Unexpected STDERR, file "+f_name+": "+line)
|
||||
#
|
||||
#
|
||||
# # 2. Trace STDOUT log must contain one ATTACH and one DETACH events, e.g:
|
||||
# #########################################################################
|
||||
# # 2018-09-26T09:42:26.7340 (508:02122400) ATTACH_DATABASE
|
||||
@ -211,7 +212,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# # 2018-09-26T09:42:26.7500 (508:02122400) DETACH_DATABASE
|
||||
# # C:\\MIX\\FIREBIRD\\QA\\FBT-REPO\\TMP\\{CORE_5907.97}.TMP (ATT_10, SYSDBA:NONE, NONE, TCPv4:127.0.0.1/4159)
|
||||
# # C:\\Python27\\python.exe:2080
|
||||
#
|
||||
#
|
||||
# msg='Found expected '
|
||||
# with open( f_trc_log.name,'r') as f:
|
||||
# for line in f:
|
||||
@ -219,26 +220,46 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# print( msg + 'ATTACH.')
|
||||
# if 'DETACH_DATABASE' in line:
|
||||
# print( msg + 'DETACH.')
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# # CLEANUP
|
||||
# #########
|
||||
# time.sleep(1)
|
||||
# cleanup( (f_trc_cfg, f_trc_log, f_trc_err, f_trc_lst, test_fdb) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
{CORE_5907.97}.TMP
|
||||
{CORE_5907.97}.FDB
|
||||
Found expected ATTACH.
|
||||
Found expected DETACH.
|
||||
"""
|
||||
"""
|
||||
|
||||
trace_conf = ['database=(%[\\\\/](\\{{core_5907.[[:DIGIT:]]{{2}}\\}}).fdb)',
|
||||
'{',
|
||||
'enabled = true',
|
||||
'time_threshold = 0',
|
||||
'log_connections = true',
|
||||
'log_initfini = false',
|
||||
'}'
|
||||
]
|
||||
|
||||
@pytest.mark.version('>=4.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, capsys):
|
||||
with act_1.trace(config=trace_conf):
|
||||
act_1.isql(switches=[],
|
||||
input='set list on;select mon$database_name from mon$database;')
|
||||
print(act_1.stdout)
|
||||
#
|
||||
for line in act_1.trace_log:
|
||||
if 'ATTACH_DATABASE' in line:
|
||||
print('Found expected ATTACH.')
|
||||
if 'DETACH_DATABASE' in line:
|
||||
print('Found expected DETACH.')
|
||||
# Check
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,24 +2,26 @@
|
||||
#
|
||||
# id: bugs.core_5908
|
||||
# title: Enhance dynamic libraries loading related error messages
|
||||
# decription:
|
||||
# decription:
|
||||
# We intentionally try to load unit from non-existent UDR module with name "udrcpp_foo".
|
||||
# Message 'module not found' issued BEFORE fix - without any detalization.
|
||||
# Message 'module not found' issued BEFORE fix - without any detailization.
|
||||
# Current output should contain phrase: 'UDR module not loaded'.
|
||||
# Filtering is used for prevent output of localized message about missed UDR library.
|
||||
#
|
||||
#
|
||||
# Checked on:
|
||||
# 3.0.4.33053: OK, 13.968s.
|
||||
# 4.0.0.1210: OK, 2.375s.
|
||||
# Thanks to Alex for suggestion about test implementation.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-5908
|
||||
# min_versions: ['3.0.4']
|
||||
# versions: 3.0.4
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
import re
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
from firebird.driver import DatabaseError
|
||||
|
||||
# version: 3.0.4
|
||||
# resources: None
|
||||
@ -33,7 +35,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# test_script_1
|
||||
#---
|
||||
# import re
|
||||
#
|
||||
#
|
||||
# udr_sp_ddl='''
|
||||
# create or alter procedure gen_foo2 (
|
||||
# start_n integer not null,
|
||||
@ -42,11 +44,11 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# external name 'udrcpp_foo!gen_rows'
|
||||
# engine udr
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# allowed_patterns = (
|
||||
# re.compile('\\.*module\\s+not\\s+(found|loaded)\\.*', re.IGNORECASE),
|
||||
# )
|
||||
#
|
||||
#
|
||||
# try:
|
||||
# db_conn.execute_immediate( udr_sp_ddl )
|
||||
# db_conn.commit() # --------------------- this will fail with message about missed UDR livrary file.
|
||||
@ -61,18 +63,36 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# print( (' '.join(i.split()).upper()) )
|
||||
# finally:
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
- UDR MODULE NOT LOADED
|
||||
"""
|
||||
UDR module not loaded
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0.4')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, capsys):
|
||||
udr_ddl = """
|
||||
create or alter procedure gen_foo2 (
|
||||
start_n integer not null,
|
||||
end_n integer not null
|
||||
) returns( n integer not null )
|
||||
external name 'udrcpp_foo!gen_rows'
|
||||
engine udr
|
||||
"""
|
||||
pattern = re.compile('\\.*module\\s+not\\s+(found|loaded)\\.*', re.IGNORECASE)
|
||||
with act_1.db.connect() as con:
|
||||
try:
|
||||
con.execute_immediate(udr_ddl)
|
||||
con.commit()
|
||||
except DatabaseError as e:
|
||||
for line in str(e).splitlines():
|
||||
if pattern.search(line):
|
||||
print(line)
|
||||
# Check
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,31 +2,32 @@
|
||||
#
|
||||
# id: bugs.core_5926
|
||||
# title: Attempt to create mapping with non-ascii user name which is encoded in SINGLE-BYTE codepage leads to '-Malformed string'
|
||||
# decription:
|
||||
# decription:
|
||||
# Reproduced 'malformed string' error on: 3.0.4.33053, 4.0.0.1172.
|
||||
#
|
||||
#
|
||||
# 03-mar-2021. Re-implemented in order to have ability to run this test on Linux.
|
||||
# Test encodes to UTF8 all needed statements (SET NAMES; CONNECT; DDL and DML) and stores this text in .sql file.
|
||||
# NOTE: 'SET NAMES' contain character set that must be used for reproducing problem (WIN1252 in this test).
|
||||
# Then ISQL is launched in separate (child) process which performs all necessary actions (using required charset).
|
||||
# Result will be redirected to log(s) which will be opened further via codecs.open(...encoding='cp1252').
|
||||
# Finally, its content will be converted to UTF8 for showing in expected_stdout.
|
||||
#
|
||||
#
|
||||
# NB: different data are used for FB 3.x and 4.x because DDL in 4.x allows to store names with length up to 63 character.
|
||||
# See variables 'mapping_name' and 'non_ascii_user_name'.
|
||||
# FB 3.x restricts max_length of DB object name with value = 31 (bytes, not character!).
|
||||
#
|
||||
#
|
||||
# Checked on:
|
||||
# * Windows: 4.0.0.2377, 3.0.8.33420
|
||||
# * Linux: 4.0.0.2377, 3.0.8.33415
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-5926
|
||||
# min_versions: ['3.0.4']
|
||||
# versions: 3.0.4
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from pathlib import Path
|
||||
from firebird.qa import db_factory, python_act, Action, temp_file
|
||||
|
||||
# version: 3.0.4
|
||||
# resources: None
|
||||
@ -39,14 +40,14 @@ db_1 = db_factory(charset='WIN1252', sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import codecs
|
||||
# import subprocess
|
||||
# import time
|
||||
# engine = db_conn.engine_version
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# # 03.03.2021 REMOVING OS-VARIABLE ISC_USER IS MANDATORY HERE!
|
||||
# # This variable could be set by other .fbts which was performed before current within batch mode (i.e. when fbt_run is called from <rundaily>)
|
||||
# # NB: os.unsetenv('ISC_USER') actually does NOT affect on content of os.environ dictionary, see: https://docs.python.org/2/library/os.html
|
||||
@ -55,25 +56,25 @@ db_1 = db_factory(charset='WIN1252', sql_dialect=3, init=init_script_1)
|
||||
# del os.environ["ISC_USER"]
|
||||
# except KeyError as e:
|
||||
# pass
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -84,12 +85,12 @@ db_1 = db_factory(charset='WIN1252', sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# if engine < 4:
|
||||
# # Maximal length of user name in FB 3.x is 31 (charset unicode_fss).
|
||||
# #mapping_name = 'áâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ'
|
||||
@ -103,27 +104,27 @@ db_1 = db_factory(charset='WIN1252', sql_dialect=3, init=init_script_1)
|
||||
# mapping_name = 'áâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿŒ'
|
||||
# non_ascii_user_name = 'ÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿŒœŠšŸŽžƒ'
|
||||
# ascii_only_user_name = 'ABCDEFGHIJKLMNOPQRSTUWYXYZ12345ABCDEFGHIJKLMNOPQRSTUWYXYZ123456'
|
||||
#
|
||||
#
|
||||
# # plugin_for_mapping = 'win_sspi'
|
||||
# plugin_for_mapping = 'Srp'
|
||||
#
|
||||
#
|
||||
# sql_txt=''' set bail on;
|
||||
# set names win1252;
|
||||
# connect '%(dsn)s' user '%(user_name)s' password '%(user_password)s';
|
||||
#
|
||||
# create or alter mapping "%(mapping_name)s" using plugin %(plugin_for_mapping)s from user '%(non_ascii_user_name)s' to user "%(ascii_only_user_name)s";
|
||||
#
|
||||
# create or alter mapping "%(mapping_name)s" using plugin %(plugin_for_mapping)s from user '%(non_ascii_user_name)s' to user "%(ascii_only_user_name)s";
|
||||
# commit;
|
||||
# -- show mapping;
|
||||
# set count on;
|
||||
# set list on;
|
||||
# select
|
||||
# select
|
||||
# rdb$map_using
|
||||
# ,rdb$map_db
|
||||
# ,rdb$map_from_type
|
||||
# ,rdb$map_to_type
|
||||
# -- ,rdb$map_plugin
|
||||
# -- 03.03.2021: do NOT show because it differs for FB 3.x and 4.x: ,rdb$map_from
|
||||
# -- 03.03.2021: do NOT show because it differs for FB 3.x and 4.x: ,rdb$map_to
|
||||
# -- 03.03.2021: do NOT show because it differs for FB 3.x and 4.x: ,rdb$map_to
|
||||
# from rdb$auth_mapping
|
||||
# where
|
||||
# upper(rdb$map_name) = upper('%(mapping_name)s')
|
||||
@ -133,34 +134,35 @@ db_1 = db_factory(charset='WIN1252', sql_dialect=3, init=init_script_1)
|
||||
# ;
|
||||
# commit;
|
||||
# ''' % dict(globals(), **locals())
|
||||
#
|
||||
#
|
||||
# f_run_sql = open( os.path.join(context['temp_directory'], 'tmp_5926_win1252.sql'), 'w' )
|
||||
# f_run_sql.write( sql_txt.decode('utf8').encode('cp1252') )
|
||||
# flush_and_close( f_run_sql )
|
||||
#
|
||||
#
|
||||
# # result: file tmp_5926_win1252.sql is encoded in win1252
|
||||
#
|
||||
#
|
||||
# f_run_log = open( os.path.splitext(f_run_sql.name)[0]+'.log', 'w')
|
||||
# subprocess.call( [ context['isql_path'], '-q', '-i', f_run_sql.name ],
|
||||
# stdout = f_run_log,
|
||||
# stderr = subprocess.STDOUT
|
||||
# )
|
||||
# flush_and_close( f_run_log ) # result: output will be encoded in win1252
|
||||
#
|
||||
#
|
||||
# with codecs.open(f_run_log.name, 'r', encoding='cp1252' ) as f:
|
||||
# result_in_win1252 = f.readlines()
|
||||
#
|
||||
#
|
||||
# for i in result_in_win1252:
|
||||
# print( i.encode('utf8') )
|
||||
#
|
||||
#
|
||||
# # cleanup:
|
||||
# ###########
|
||||
# cleanup( (f_run_sql, f_run_log) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
RDB$MAP_USING P
|
||||
@ -169,11 +171,50 @@ expected_stdout_1 = """
|
||||
RDB$MAP_TO_TYPE 0
|
||||
|
||||
Records affected: 1
|
||||
"""
|
||||
"""
|
||||
|
||||
test_script = temp_file('test_script.sql')
|
||||
|
||||
@pytest.mark.version('>=3.0.4')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, test_script: Path):
|
||||
if act_1.is_version('<4'):
|
||||
# Maximal length of user name in FB 3.x is 31 (charset unicode_fss).
|
||||
#mapping_name = 'áâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ'
|
||||
mapping_name = 'áâãäåæçèéêëìíîï1'
|
||||
# mapping_name = 'áâãäåæçèéêëìíîïð'
|
||||
non_ascii_user_name = 'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞ'
|
||||
ascii_only_user_name = 'ABCDEFGHIJKLMNOPQRSTUWYXYZ12345'
|
||||
else:
|
||||
# Maximal length of user name in FB 4.x is 63 (charset utf8).
|
||||
#
|
||||
mapping_name = 'áâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿŒ'
|
||||
non_ascii_user_name = 'ÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿŒœŠšŸŽžƒ'
|
||||
ascii_only_user_name = 'ABCDEFGHIJKLMNOPQRSTUWYXYZ12345ABCDEFGHIJKLMNOPQRSTUWYXYZ123456'
|
||||
#
|
||||
plugin_for_mapping = 'Srp'
|
||||
test_script.write_text(f"""
|
||||
create or alter mapping "{mapping_name}" using plugin {plugin_for_mapping} from user '{non_ascii_user_name}' to user "{ascii_only_user_name}";
|
||||
commit;
|
||||
-- show mapping;
|
||||
set count on;
|
||||
set list on;
|
||||
select
|
||||
rdb$map_using
|
||||
,rdb$map_db
|
||||
,rdb$map_from_type
|
||||
,rdb$map_to_type
|
||||
-- ,rdb$map_plugin
|
||||
-- 03.03.2021: do NOT show because it differs for FB 3.x and 4.x: ,rdb$map_from
|
||||
-- 03.03.2021: do NOT show because it differs for FB 3.x and 4.x: ,rdb$map_to
|
||||
from rdb$auth_mapping
|
||||
where
|
||||
upper(rdb$map_name) = upper('{mapping_name}')
|
||||
and rdb$map_plugin = upper('{plugin_for_mapping}')
|
||||
and rdb$map_from = '{non_ascii_user_name}'
|
||||
and rdb$map_to containing '{ascii_only_user_name}'
|
||||
;
|
||||
commit;
|
||||
""", encoding='cp1252')
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.isql(switches=['-b', '-q'], input_file=test_script, charset='WIN1252')
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,7 +2,7 @@
|
||||
#
|
||||
# id: bugs.core_5935
|
||||
# title: Bugcheck 165 (cannot find tip page) // Classic and SuperClassic only
|
||||
# decription:
|
||||
# decription:
|
||||
# ::: NB :::
|
||||
# Bug can be reproduced only when FIRST of ISQL sessions is lacunhed with '-n' switch.
|
||||
# Second ISQL must be started *WITHOUT* this switch!
|
||||
@ -11,24 +11,25 @@
|
||||
# Because of this, we use here two transaction for second connection and, furthermore, we use
|
||||
# the same isolation levels for them, namely: SNAPSHOT for DML and READ COMMITTED for DDL.
|
||||
# This is done by using custom TPB objects with apropriate properties - see 'dml_tpb' and 'ddl_tpb'.
|
||||
#
|
||||
#
|
||||
# Database forced writes is changed here to OFF in order to make execution faster.
|
||||
#
|
||||
#
|
||||
# Confirmed bug on 3.0.4.32972 (build date: 11-may-2018), got:
|
||||
# SQLCODE: -902 / - ... consistency check (can't continue after bugcheck) / -902 / 335544333
|
||||
# firebird.log will contain after this:
|
||||
# SQLCODE: -902 / - ... consistency check (can't continue after bugcheck) / -902 / 335544333
|
||||
# firebird.log will contain after this:
|
||||
# internal Firebird consistency check (cannot find tip page (165), file: tra.cpp line: 2331)
|
||||
#
|
||||
#
|
||||
# Checked on 3.0.5.33084 -- all OK.
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-5935
|
||||
# min_versions: ['3.0.5']
|
||||
# versions: 3.0.5
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
from firebird.driver import tpb, Isolation
|
||||
|
||||
# version: 3.0.5
|
||||
# resources: None
|
||||
@ -41,41 +42,41 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import sys
|
||||
# from fdb import services
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# svc = fdb.services.connect(host = 'localhost:service_mgr')
|
||||
# svc.set_write_mode(database = db_conn.database_name, mode = services.WRITE_BUFFERED)
|
||||
# svc.close()
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #FB_CLNT=sys.argv[1]
|
||||
# #DB_NAME='localhost:e30'
|
||||
#
|
||||
#
|
||||
# dml_tpb = fdb.TPB()
|
||||
# dml_tpb.lock_resolution = fdb.isc_tpb_wait
|
||||
# dml_tpb.isolation_level = fdb.isc_tpb_concurrency
|
||||
#
|
||||
#
|
||||
# ddl_tpb = fdb.TPB() # READ_COMMITTED | NO_REC_VERSION | WAIT | READ_WRITE)
|
||||
# ddl_tpb.lock_resolution = fdb.isc_tpb_wait
|
||||
# ddl_tpb.isolation_level = (fdb.isc_tpb_read_committed, fdb.isc_tpb_no_rec_version)
|
||||
#
|
||||
#
|
||||
# con1 = fdb.connect(dsn = dsn) # DB_NAME, fb_library_name = FB_CLNT )
|
||||
#
|
||||
#
|
||||
# con1.execute_immediate('recreate table a (id int)')
|
||||
# con1.commit()
|
||||
#
|
||||
#
|
||||
# #---------------------------------------------------------
|
||||
#
|
||||
#
|
||||
# con1.execute_immediate('create index idx_a on a(id)')
|
||||
# con1.commit()
|
||||
#
|
||||
#
|
||||
# sql='''
|
||||
# create or alter procedure p_gen_tx(n int) as
|
||||
# declare i int = 0;
|
||||
@ -85,55 +86,55 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# i = i + 1;
|
||||
# end
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# con1.execute_immediate(sql)
|
||||
# con1.commit()
|
||||
# con1.close()
|
||||
#
|
||||
#
|
||||
# #----------------------------------------------------------
|
||||
#
|
||||
#
|
||||
# con1 = fdb.connect(dsn = dsn) # DB_NAME, fb_library_name = FB_CLNT )
|
||||
#
|
||||
#
|
||||
# tx1a = con1.trans( default_tpb = dml_tpb )
|
||||
# tx1a.begin()
|
||||
#
|
||||
#
|
||||
# cur1 = tx1a.cursor()
|
||||
# cur1.execute('delete from a')
|
||||
# tx1a.commit()
|
||||
#
|
||||
#
|
||||
# tx1a.begin()
|
||||
# cur1.execute("select current_transaction, rdb$get_context('SYSTEM', 'ISOLATION_LEVEL') from rdb$database")
|
||||
# for r in cur1:
|
||||
# pass
|
||||
#
|
||||
#
|
||||
# # ---------------------------------------------------------------
|
||||
#
|
||||
#
|
||||
# con2 = fdb.connect(dsn = dsn) # DB_NAME, fb_library_name = FB_CLNT )
|
||||
#
|
||||
#
|
||||
# tx2a = con2.trans( default_tpb = dml_tpb )
|
||||
# tx2b = con2.trans( default_tpb = ddl_tpb )
|
||||
#
|
||||
#
|
||||
# tx2a.begin()
|
||||
# tx2b.begin()
|
||||
#
|
||||
#
|
||||
# cur2 = tx2a.cursor()
|
||||
# cur2.callproc('p_gen_tx', (33000,) )
|
||||
# tx2a.commit()
|
||||
# tx2b.commit()
|
||||
#
|
||||
#
|
||||
# tx2a.begin()
|
||||
# tx2b.begin()
|
||||
#
|
||||
#
|
||||
# cur2.execute('insert into a(id) values(?)', (tx2a.transaction_id,) )
|
||||
# tx2a.commit()
|
||||
# tx2b.commit()
|
||||
#
|
||||
#
|
||||
# tx2a.begin()
|
||||
# tx2b.begin()
|
||||
# cur2.execute('set statistics index idx_a')
|
||||
# tx2a.commit()
|
||||
# tx2b.commit()
|
||||
#
|
||||
#
|
||||
# tx2a.begin()
|
||||
# tx2b.begin()
|
||||
# cur2.execute('select rdb$index_name, rdb$record_version from rdb$indices where rdb$relation_name = ?', ('A',) )
|
||||
@ -144,54 +145,144 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# pass
|
||||
# tx2a.commit()
|
||||
# tx2b.commit()
|
||||
#
|
||||
#
|
||||
# tx2a.begin()
|
||||
# tx2b.begin()
|
||||
# cur2.callproc('p_gen_tx', (33000,) )
|
||||
# tx2a.commit()
|
||||
# tx2b.commit()
|
||||
#
|
||||
#
|
||||
# # -----------------------------------------------------------------
|
||||
#
|
||||
#
|
||||
# tx1a.commit()
|
||||
#
|
||||
#
|
||||
# # -----------------------------------------------------------------
|
||||
#
|
||||
#
|
||||
# tx2a.begin()
|
||||
# tx2b.begin()
|
||||
# cur2.execute('select id from a where id > ?', (0,))
|
||||
# for r in cur2:
|
||||
# pass
|
||||
#
|
||||
#
|
||||
# # -----------------------------------------------------------------
|
||||
#
|
||||
#
|
||||
# tx1a.begin()
|
||||
# cur1.execute('select id from a where id > ?', (0,))
|
||||
# for r in cur1:
|
||||
# pass
|
||||
#
|
||||
#
|
||||
# cur1.close()
|
||||
# tx1a.rollback()
|
||||
# con1.close()
|
||||
#
|
||||
#
|
||||
# cur2.close()
|
||||
# tx2a.rollback()
|
||||
# tx2b.rollback()
|
||||
# con2.close()
|
||||
#
|
||||
#
|
||||
# print('Passed.')
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Passed.
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0.5')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action):
|
||||
act_1.db.set_async_write()
|
||||
# CONCURRENCY | WAIT | READ_WRITE
|
||||
dml_tpb = tpb(isolation=Isolation.CONCURRENCY)
|
||||
# READ_COMMITTED | NO_REC_VERSION | WAIT | READ_WRITE
|
||||
ddl_tpb = tpb(isolation=Isolation.READ_COMMITTED_NO_RECORD_VERSION)
|
||||
#
|
||||
with act_1.db.connect() as con:
|
||||
con.execute_immediate('recreate table a (id int)')
|
||||
con.commit()
|
||||
con.execute_immediate('create index idx_a on a(id)')
|
||||
con.commit()
|
||||
sql = """
|
||||
create or alter procedure p_gen_tx(n int) as
|
||||
declare i int = 0;
|
||||
begin
|
||||
while (i < n) do
|
||||
in autonomous transaction do
|
||||
i = i + 1;
|
||||
end
|
||||
"""
|
||||
con.execute_immediate(sql)
|
||||
con.commit()
|
||||
# Test
|
||||
con = act_1.db.connect()
|
||||
tx1a = con.transaction_manager(dml_tpb)
|
||||
tx1a.begin()
|
||||
cur1 = tx1a.cursor()
|
||||
cur1.execute('delete from a')
|
||||
tx1a.commit()
|
||||
#
|
||||
tx1a.begin()
|
||||
cur1.execute("select current_transaction, rdb$get_context('SYSTEM', 'ISOLATION_LEVEL') from rdb$database")
|
||||
cur1.fetchall()
|
||||
# ---
|
||||
con2 = act_1.db.connect()
|
||||
tx2a = con2.transaction_manager(dml_tpb)
|
||||
tx2b = con2.transaction_manager(ddl_tpb)
|
||||
#
|
||||
tx2a.begin()
|
||||
tx2b.begin()
|
||||
cur2 = tx2a.cursor()
|
||||
cur2.callproc('p_gen_tx', [33000])
|
||||
tx2a.commit()
|
||||
tx2b.commit()
|
||||
#
|
||||
tx2a.begin()
|
||||
tx2b.begin()
|
||||
cur2.execute('insert into a (id) values (?)', [tx2a.info.id])
|
||||
tx2a.commit()
|
||||
tx2b.commit()
|
||||
#
|
||||
tx2a.begin()
|
||||
tx2b.begin()
|
||||
cur2.execute('set statistics index idx_a')
|
||||
tx2a.commit()
|
||||
tx2b.commit()
|
||||
#
|
||||
tx2a.begin()
|
||||
tx2b.begin()
|
||||
cur2.execute('select rdb$index_name, rdb$record_version from rdb$indices where rdb$relation_name = ?', ['A'])
|
||||
cur2.fetchall()
|
||||
cur2.execute('select id from a where id > ?', [0])
|
||||
cur2.fetchall()
|
||||
tx2a.commit()
|
||||
tx2b.commit()
|
||||
#
|
||||
tx2a.begin()
|
||||
tx2b.begin()
|
||||
cur2 = tx2a.cursor()
|
||||
cur2.callproc('p_gen_tx', [33000])
|
||||
tx2a.commit()
|
||||
tx2b.commit()
|
||||
# ---
|
||||
tx1a.commit()
|
||||
# ---
|
||||
tx2a.begin()
|
||||
tx2b.begin()
|
||||
cur2.execute('select id from a where id > ?', [0])
|
||||
cur2.fetchall()
|
||||
# ---
|
||||
tx1a.begin()
|
||||
cur1.execute('select id from a where id > ?', [0])
|
||||
cur1.fetchall()
|
||||
#
|
||||
cur1.close()
|
||||
tx1a.rollback()
|
||||
con.close()
|
||||
#
|
||||
cur2.close()
|
||||
tx2a.rollback()
|
||||
tx2b.rollback()
|
||||
con2.close()
|
||||
# Passed
|
||||
|
@ -2,26 +2,26 @@
|
||||
#
|
||||
# id: bugs.core_5936
|
||||
# title: Firebird crashes, related to Bugcheck 165 (cannot find tip page)
|
||||
# decription:
|
||||
# decription:
|
||||
# NB. Ticket title: "Firebird server segfaults in the end of database backup" - has nothing to the actual reason of segfault.
|
||||
# Confirmed crash on:
|
||||
# * 2.5.8.27089 SuperClassic
|
||||
# * 2.5.9.27117 Classic and SuperClassic (build date: 29-sep-2018 - is earlier than fix: 08-oct-2018)
|
||||
#
|
||||
#
|
||||
# Got in firebird.log:
|
||||
# Access violation.
|
||||
# The code attempted to access a virtual
|
||||
# address without privilege to do so.
|
||||
# Operating system call ReleaseSemaphore failed. Error code 6
|
||||
#
|
||||
#
|
||||
# NOTE-1: custom transaction TPB required for this ticket: fdb.isc_tpb_concurrency, fdb.isc_tpb_wait
|
||||
#
|
||||
#
|
||||
# NOTE-2: current title of ticket ("Firebird server segfaults in the end of database backup") has no relation to backup action.
|
||||
# I left here only first two words from it :-)
|
||||
#
|
||||
#
|
||||
# Bug was fixed by one-line change in FB source, see:
|
||||
# https://github.com/FirebirdSQL/firebird/commit/676a52625c074ef15e197e7b7538755195a66905
|
||||
#
|
||||
#
|
||||
# Checked on:
|
||||
# 2.5.9.27119 SS: OK, 0.858s.
|
||||
# 2.5.9.27129 CS/SC: OK, 15...19s
|
||||
@ -29,18 +29,19 @@
|
||||
# 3.0.2.32658: OK, 3.309s.
|
||||
# 4.0.0.1501: OK, 5.602s.
|
||||
# 4.0.0.1421: OK, 6.920s.
|
||||
#
|
||||
#
|
||||
# 15.04.2021. Adapted for run both on Windows and Linux. Checked on:
|
||||
# Windows: 4.0.0.2416
|
||||
# Linux: 4.0.0.2416
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-5936
|
||||
# min_versions: ['2.5.9']
|
||||
# versions: 2.5.9
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
from firebird.driver import tpb, Isolation
|
||||
|
||||
# version: 2.5.9
|
||||
# resources: None
|
||||
@ -53,28 +54,28 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import subprocess
|
||||
# import fdb
|
||||
# from fdb import services
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# GEN_ROWS=17000 # ----- minimal value needed for making FB to crash
|
||||
#
|
||||
#
|
||||
# THIS_FDB=db_conn.database_name
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# svc = services.connect(host = 'localhost', user = user_name, password = user_password)
|
||||
#
|
||||
#
|
||||
# # Disable Forced Writes:
|
||||
# ########################
|
||||
# svc.set_write_mode( THIS_FDB, services.WRITE_BUFFERED)
|
||||
# svc.close()
|
||||
#
|
||||
#
|
||||
# ddl_script='''
|
||||
# create table a (id int);
|
||||
# create index idx_a on a computed by (id);
|
||||
@ -90,85 +91,138 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# set term ;^
|
||||
# commit;
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# f_ddl_script = open( os.path.join(context['temp_directory'],'tmp_5936_ddl.sql'), 'w')
|
||||
# f_ddl_script.write( ddl_script )
|
||||
# f_ddl_script.close()
|
||||
#
|
||||
#
|
||||
# subprocess.call( [context['isql_path'], dsn, '-i', f_ddl_script.name ] )
|
||||
#
|
||||
#
|
||||
# os.remove( f_ddl_script.name )
|
||||
#
|
||||
#
|
||||
# #----------------------------------------------------
|
||||
#
|
||||
#
|
||||
# con1 = fdb.connect( dsn = dsn )
|
||||
#
|
||||
#
|
||||
# custom_tpb = fdb.TPB()
|
||||
# custom_tpb.isolation_level = fdb.isc_tpb_concurrency
|
||||
# custom_tpb.lock_resolution = fdb.isc_tpb_wait
|
||||
#
|
||||
#
|
||||
# tx1 = con1.trans( default_tpb = custom_tpb )
|
||||
# tx1.begin()
|
||||
#
|
||||
#
|
||||
# cur1 = tx1.cursor()
|
||||
# cur1.execute( "select current_transaction, rdb$get_context('SYSTEM', 'ISOLATION_LEVEL') from rdb$database" )
|
||||
# for r in cur1:
|
||||
# pass
|
||||
#
|
||||
#
|
||||
# #-------------------------------------------
|
||||
#
|
||||
#
|
||||
# con2 = fdb.connect( dsn = dsn )
|
||||
# tx2 = con2.trans( default_tpb = custom_tpb )
|
||||
# tx2.begin()
|
||||
# cur2 = tx2.cursor()
|
||||
# cur2.callproc( 'p_gen_tx', (GEN_ROWS,) )
|
||||
# tx2.commit()
|
||||
#
|
||||
#
|
||||
# tx2.begin()
|
||||
# cur2.execute( 'insert into a values(current_transaction)' )
|
||||
# tx2.commit()
|
||||
#
|
||||
#
|
||||
# tx2.begin()
|
||||
# cur2.execute( 'set statistics index idx_a' )
|
||||
# tx2.commit()
|
||||
#
|
||||
#
|
||||
# tx2.begin()
|
||||
# cur2.execute( 'select * from a where id > 0')
|
||||
# for r in cur2:
|
||||
# pass
|
||||
#
|
||||
#
|
||||
# tx2.commit()
|
||||
#
|
||||
#
|
||||
# tx2.begin()
|
||||
# cur2.callproc( 'p_gen_tx', (GEN_ROWS,) )
|
||||
# tx2.commit()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# tx1.commit()
|
||||
# cur1.execute( 'select * from a where id > 0')
|
||||
# for r in cur1:
|
||||
# pass # ----------- WI-V2.5.8.27089 crashed here
|
||||
#
|
||||
#
|
||||
# print('Query completed.')
|
||||
# tx1.commit()
|
||||
#
|
||||
#
|
||||
# con1.close()
|
||||
# con2.close()
|
||||
# print('All fine.')
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
GEN_ROWS = 17000 # ----- minimal value needed for making FB to crash
|
||||
|
||||
expected_stdout_1 = """
|
||||
Query completed.
|
||||
All fine.
|
||||
"""
|
||||
"""
|
||||
|
||||
ddl_script = """
|
||||
create table a (id int);
|
||||
create index idx_a on a computed by (id);
|
||||
commit;
|
||||
set term ^;
|
||||
create procedure p_gen_tx(n int) as
|
||||
declare i int = 0;
|
||||
begin
|
||||
while (i < n) do
|
||||
in autonomous transaction do
|
||||
i = i + 1;
|
||||
end ^
|
||||
set term ;^
|
||||
commit;
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=2.5.9')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action):
|
||||
act_1.db.set_async_write()
|
||||
act_1.isql(switches=[], input=ddl_script)
|
||||
custom_tpb = tpb(isolation=Isolation.CONCURRENCY)
|
||||
#
|
||||
with act_1.db.connect() as con1:
|
||||
tx1 = con1.transaction_manager(custom_tpb)
|
||||
tx1.begin()
|
||||
cur1 = tx1.cursor()
|
||||
cur1.execute( "select current_transaction, rdb$get_context('SYSTEM', 'ISOLATION_LEVEL') from rdb$database" )
|
||||
cur1.fetchall()
|
||||
with act_1.db.connect() as con2:
|
||||
tx2 = con2.transaction_manager(custom_tpb)
|
||||
tx2.begin()
|
||||
cur2 = tx2.cursor()
|
||||
cur2.callproc('p_gen_tx', [GEN_ROWS])
|
||||
tx2.commit()
|
||||
#
|
||||
tx2.begin()
|
||||
cur2.execute('insert into a values (current_transaction)')
|
||||
tx2.commit()
|
||||
#
|
||||
tx2.begin()
|
||||
cur2.execute('set statistics index idx_a')
|
||||
tx2.commit()
|
||||
#
|
||||
tx2.begin()
|
||||
cur2.execute('select * from a where id > 0')
|
||||
cur2.fetchall()
|
||||
tx2.commit()
|
||||
#
|
||||
tx2.begin()
|
||||
cur2.callproc('p_gen_tx', [GEN_ROWS])
|
||||
tx2.commit()
|
||||
# ---
|
||||
tx1.commit()
|
||||
cur1.execute('select * from a where id > 0')
|
||||
cur1.fetchall() # WI-V2.5.8.27089 crashed here
|
||||
tx1.commit()
|
||||
|
@ -2,7 +2,7 @@
|
||||
#
|
||||
# id: bugs.core_5939
|
||||
# title: Crash for "gbak -se -b database nul"
|
||||
# decription:
|
||||
# decription:
|
||||
# Bug can be reproduced on WI-V2.5.9.27117 Classic (snapshot date: 29-sep-2018).
|
||||
# All fine on WI-V2.5.9.27129.
|
||||
# Also checked on:
|
||||
@ -12,14 +12,15 @@
|
||||
# 30sS, build 3.0.5.33115
|
||||
# 30sC, build 3.0.2.32658
|
||||
# 30Cs, build 3.0.4.33054
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-5939
|
||||
# min_versions: ['2.5.9']
|
||||
# versions: 2.5.9
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from pathlib import Path
|
||||
from firebird.qa import db_factory, python_act, Action, temp_file
|
||||
|
||||
# version: 2.5.9
|
||||
# resources: None
|
||||
@ -32,31 +33,33 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# tmpfbk = 'tmp_core_5939.fbk'
|
||||
# tmpfbk='$(DATABASE_LOCATION)'+tmpfbk
|
||||
#
|
||||
#
|
||||
# runProgram('gbak',['-b', dsn, tmpfbk, '-se'])
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stderr_1 = """
|
||||
gbak: ERROR:service name parameter missing
|
||||
gbak:Exiting before completion due to errors
|
||||
"""
|
||||
"""
|
||||
|
||||
fbk_file = temp_file('tmp_core_5939.fbk')
|
||||
|
||||
@pytest.mark.version('>=2.5.9')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, fbk_file: Path):
|
||||
act_1.expected_stderr = expected_stderr_1
|
||||
act_1.gbak(switches=['-b', act_1.db.dsn, str(fbk_file), '-se'])
|
||||
assert act_1.clean_stderr == act_1.clean_expected_stderr
|
||||
|
@ -2,21 +2,22 @@
|
||||
#
|
||||
# id: bugs.core_5949
|
||||
# title: Bugcheck could happen when read-only database with non-zero linger is set to read-write mode
|
||||
# decription:
|
||||
# decription:
|
||||
# Confirmed bug on 3.0.4.33053, got message in firebird.log:
|
||||
# ===
|
||||
# Database: ...\\FPT-REPO\\TMP\\BUGS.CORE_5949.FDB
|
||||
# internal Firebird consistency check (next transaction older than oldest active transaction (266), file: cch.cpp line: 4830)
|
||||
# ===
|
||||
# ===
|
||||
# Checked on 3.0.5.33084, 4.0.0.1249, 4.0.0.1340 -- works fine.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-5949
|
||||
# min_versions: ['3.0.5']
|
||||
# versions: 3.0.5
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
from firebird.driver import DbAccessMode
|
||||
|
||||
# version: 3.0.5
|
||||
# resources: None
|
||||
@ -29,15 +30,15 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import fdb
|
||||
# from fdb import services as fbsvc
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
# DB_NAME = '$(DATABASE_LOCATION)' + 'bugs.core_5949.fdb'
|
||||
#
|
||||
#
|
||||
# def change_db_access_mode( a_host, a_db_name, a_required_access ):
|
||||
# global fbsvc
|
||||
# svc=fbsvc.connect( host = a_host )
|
||||
@ -45,15 +46,15 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# svc.close()
|
||||
# return None
|
||||
# #------------------------------------
|
||||
#
|
||||
#
|
||||
# db_conn.execute_immediate('alter database set linger to 60')
|
||||
# db_conn.commit()
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #------------------------------------
|
||||
#
|
||||
#
|
||||
# change_db_access_mode( 'localhost', DB_NAME, fbsvc.ACCESS_READ_ONLY )
|
||||
#
|
||||
#
|
||||
# con=fdb.connect(dsn = dsn)
|
||||
# cur=con.cursor()
|
||||
# cur.execute('select r.rdb$linger, d.mon$read_only from rdb$database r cross join mon$database d')
|
||||
@ -61,24 +62,34 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# print(r[0],r[1])
|
||||
# con.commit()
|
||||
# con.close()
|
||||
#
|
||||
#
|
||||
# #------------------------------------
|
||||
# change_db_access_mode( 'localhost', DB_NAME, fbsvc.ACCESS_READ_WRITE )
|
||||
#
|
||||
#
|
||||
# print('COMPLETED.')
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
60 1
|
||||
COMPLETED.
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0.5')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action):
|
||||
with act_1.db.connect() as con:
|
||||
con.execute_immediate('alter database set linger to 60')
|
||||
con.commit()
|
||||
#
|
||||
with act_1.connect_server() as srv:
|
||||
srv.database.set_access_mode(database=act_1.db.db_path, mode=DbAccessMode.READ_ONLY)
|
||||
# Test
|
||||
with act_1.db.connect() as con:
|
||||
c = con.cursor()
|
||||
c.execute('select r.rdb$linger, d.mon$read_only from rdb$database r cross join mon$database d')
|
||||
result = c.fetchone()
|
||||
con.commit()
|
||||
assert result == (60, 1)
|
||||
|
@ -2,7 +2,7 @@
|
||||
#
|
||||
# id: bugs.core_5965
|
||||
# title: FB3 Optimiser chooses less efficient plan than FB2.5 optimiser
|
||||
# decription:
|
||||
# decription:
|
||||
# Filling of database with data from ticket can take noticable time.
|
||||
# Instead of this it was decided to extract form ZIP archieve .fbk and then to restore it.
|
||||
# Instead of actual execution we can only obtain PLAN by querying cursor read-only property "plan"
|
||||
@ -16,14 +16,16 @@
|
||||
# ===
|
||||
# Confirmed wrong plan for second expr in 4.0.0.1249, 3.0.4.33053
|
||||
# Works fine in 4.0.0.1340, 3.0.5.33084
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-5965
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
from firebird.qa import db_factory, python_act, Action, temp_file, Database
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
@ -33,39 +35,41 @@ substitutions_1 = []
|
||||
init_script_1 = """"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
db_1_tmp = db_factory(sql_dialect=3, filename='tmp_core_5965.fdb', do_not_create=True)
|
||||
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import fdb
|
||||
# import time
|
||||
# import zipfile
|
||||
# import difflib
|
||||
# import subprocess
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -76,22 +80,22 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# zf = zipfile.ZipFile( os.path.join(context['files_location'],'core_5965.zip') )
|
||||
# tmpfbk = 'core_5965.fbk'
|
||||
# zf.extract( tmpfbk, '$(DATABASE_LOCATION)')
|
||||
# zf.close()
|
||||
#
|
||||
#
|
||||
# tmpfbk='$(DATABASE_LOCATION)'+tmpfbk
|
||||
# tmpfdb='$(DATABASE_LOCATION)'+'tmp_core_5965.fdb'
|
||||
#
|
||||
#
|
||||
# f_restore_log=open( os.path.join(context['temp_directory'],'tmp_core_5965_restore.log'), 'w')
|
||||
# subprocess.check_call([context['fbsvcmgr_path'],"localhost:service_mgr",
|
||||
# "action_restore",
|
||||
@ -100,17 +104,17 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# "res_replace",
|
||||
# "verbose"
|
||||
# ],
|
||||
# stdout=f_restore_log,
|
||||
# stdout=f_restore_log,
|
||||
# stderr=subprocess.STDOUT)
|
||||
# flush_and_close( f_restore_log )
|
||||
#
|
||||
#
|
||||
# con=fdb.connect(dsn = 'localhost:'+tmpfdb)
|
||||
#
|
||||
#
|
||||
# # https://pythonhosted.org/fdb/reference.html#fdb.Cursor
|
||||
#
|
||||
#
|
||||
# cur_1=con.cursor()
|
||||
# cur_2=con.cursor()
|
||||
#
|
||||
#
|
||||
# sql_1='''
|
||||
# select 1
|
||||
# from opt_test
|
||||
@ -122,9 +126,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# order by order_no desc
|
||||
# ;
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# sql_2='''
|
||||
# select 2
|
||||
# select 2
|
||||
# from opt_test
|
||||
# where
|
||||
# sysid = 1 and
|
||||
@ -133,42 +137,63 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# cust_id = 73
|
||||
# order by order_no desc
|
||||
# ;
|
||||
#
|
||||
#
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# cur_1.execute(sql_1)
|
||||
# for r in cur_1:
|
||||
# pass
|
||||
# break
|
||||
#
|
||||
#
|
||||
# cur_2.execute(sql_2)
|
||||
# for r in cur_2:
|
||||
# pass
|
||||
# break
|
||||
#
|
||||
#
|
||||
# print( cur_1.plan )
|
||||
# print( cur_2.plan )
|
||||
#
|
||||
#
|
||||
# cur_1.close()
|
||||
# cur_2.close()
|
||||
# con.close()
|
||||
#
|
||||
#
|
||||
# # Cleanup:
|
||||
# ##########
|
||||
# time.sleep(1)
|
||||
# cleanup( (tmpfbk, tmpfdb, f_restore_log) )
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
PLAN SORT (OPT_TEST INDEX (O_CLID_CUSTTY_CUSTID))
|
||||
PLAN SORT (OPT_TEST INDEX (O_CLID_CUSTTY_CUSTID))
|
||||
"""
|
||||
"""
|
||||
|
||||
fbk_file = temp_file('core_5965.fbk')
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, fbk_file: Path, db_1_tmp: Database, capsys):
|
||||
zipped_fbk_file = zipfile.Path(act_1.vars['files'] / 'core_5965.zip',
|
||||
at='core_5965.fbk')
|
||||
fbk_file.write_bytes(zipped_fbk_file.read_bytes())
|
||||
#
|
||||
with act_1.connect_server() as srv:
|
||||
srv.database.restore(backup=fbk_file, database=db_1_tmp.db_path)
|
||||
srv.wait()
|
||||
# Test
|
||||
with db_1_tmp.connect() as con:
|
||||
c1 = con.cursor()
|
||||
c2 = con.cursor()
|
||||
c1.execute("select 1 from opt_test where clid = 23 and cust_type = 1 and cust_id = 73 order by order_no desc")
|
||||
c1.fetchall()
|
||||
print(c1.statement.plan)
|
||||
#
|
||||
c2.execute("select 2 from opt_test where sysid = 1 and clid = 23 and cust_type = 1 and cust_id = 73 order by order_no desc")
|
||||
c2.fetchall()
|
||||
print(c2.statement.plan)
|
||||
# Check
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,14 +2,14 @@
|
||||
#
|
||||
# id: bugs.core_5972
|
||||
# title: External engine trigger crashing server if table have computed field
|
||||
# decription:
|
||||
# decription:
|
||||
# We use supplied UDR example which operates with TRIGGER for replication purpuses.
|
||||
# Two databases are used here: one is 'main' (which is created by fbtest) and second
|
||||
# is auxiliary and serves as slave (replica).
|
||||
#
|
||||
#
|
||||
# We create table PERSONS in both databases, its DDL is taken from examples code.
|
||||
# This table will be normally replicated until we add COMPUTED BY field to it.
|
||||
#
|
||||
#
|
||||
# When such field is added and we issue INSERT command, standard exception must raise:
|
||||
# Statement failed, SQLSTATE = 42000
|
||||
# Execute statement error at isc_dsql_prepare :
|
||||
@ -19,12 +19,12 @@
|
||||
# 335544382 : COMP
|
||||
# 336397208 : At line 1, column 57
|
||||
# Statement : insert into "PERSONS" ("ID", "NAME", "ADDRESS", "INFO", "COMP") values (?, ?, ?, ?, ?)
|
||||
# Data source : Firebird::C:\\FBTESTING\\qa\\misc mprepl.fdb
|
||||
# Data source : Firebird::C:\\FBTESTING\\qa\\misc\\tmprepl.fdb
|
||||
# -At block line: ...
|
||||
# -At trigger 'PERSONS_REPLICATE'
|
||||
#
|
||||
#
|
||||
# We expect appearing of this exception (see try/except block): check its class and content of message.
|
||||
#
|
||||
#
|
||||
# Confirmed crash on 4.0.0.1346 (built 17-dec-2018).
|
||||
# Checked on 4.0.0.1391 (built 22-jan-2019): all fine, got expected exception.
|
||||
# Cheked also on:
|
||||
@ -38,47 +38,60 @@
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action, Database
|
||||
from firebird.driver import DatabaseError
|
||||
|
||||
# version: 3.0.6
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = [('[ \t]+', ' '), ('.* At block line.*', 'At block'), ('read-only column.*', 'read-only column'), ('Statement.*', 'Statement'), ('Data source.*', 'Data source'), ('.* At trigger.*', 'At trigger')]
|
||||
substitutions_1 = [('[ \t]+', ' '), ('.* At block line.*', 'At block'),
|
||||
('read-only column.*', 'read-only column'),
|
||||
('Statement.*', 'Statement'), ('Data source.*', 'Data source'),
|
||||
('.* At trigger.*', 'At trigger')]
|
||||
|
||||
init_script_1 = """"""
|
||||
init_script_1 = """
|
||||
create table persons (
|
||||
id integer not null,
|
||||
name varchar(60) not null,
|
||||
address varchar(60),
|
||||
info blob sub_type text,
|
||||
comp int computed by (1) -- COMPUTED_BY FIELD AS IT IS DESCRIBED IN THE TICKET
|
||||
);
|
||||
"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
db_1_repl = db_factory(sql_dialect=3, init=init_script_1, filename='core_5972_repl.fdb')
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import sys
|
||||
# import time
|
||||
# import subprocess
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
# this_db = db_conn.database_name
|
||||
# fb_major=db_conn.engine_version
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -89,12 +102,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# table_ddl='''
|
||||
# create table persons (
|
||||
# id integer not null,
|
||||
@ -104,51 +117,51 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# comp int computed by (1) -- COMPUTED_BY FIELD AS IT IS DESCRIBED IN THE TICKET
|
||||
# );
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# fdb_repl = os.path.join(context['temp_directory'],'tmp_5972_repl.fdb')
|
||||
# cleanup( (fdb_repl,) )
|
||||
#
|
||||
#
|
||||
# con_repl = fdb.create_database( dsn = 'localhost:%(fdb_repl)s' % locals() )
|
||||
# con_repl.execute_immediate( table_ddl )
|
||||
# con_repl.commit()
|
||||
# con_repl.close()
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# db_conn.execute_immediate( table_ddl )
|
||||
# db_conn.commit()
|
||||
#
|
||||
#
|
||||
# ddl_for_replication='''
|
||||
# create table replicate_config (
|
||||
# name varchar(31) not null,
|
||||
# data_source varchar(255) not null
|
||||
# );
|
||||
#
|
||||
#
|
||||
# insert into replicate_config (name, data_source)
|
||||
# values ('ds1', '%(fdb_repl)s');
|
||||
#
|
||||
#
|
||||
# create trigger persons_replicate
|
||||
# after insert on persons
|
||||
# external name 'udrcpp_example!replicate!ds1'
|
||||
# engine udr;
|
||||
#
|
||||
#
|
||||
# create trigger persons_replicate2
|
||||
# after insert on persons
|
||||
# external name 'udrcpp_example!replicate_persons!ds1'
|
||||
# engine udr;
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# ''' % locals()
|
||||
#
|
||||
#
|
||||
# f_apply_ddl_sql = open( os.path.join(context['temp_directory'],'tmp_5972.sql'), 'w', buffering = 0)
|
||||
# f_apply_ddl_sql.write( ddl_for_replication )
|
||||
# flush_and_close( f_apply_ddl_sql )
|
||||
#
|
||||
#
|
||||
# f_apply_ddl_log = open( '.'.join( (os.path.splitext( f_apply_ddl_sql.name )[0], 'log') ), 'w', buffering = 0)
|
||||
# subprocess.call( [ context['isql_path'], dsn, '-q', '-i', f_apply_ddl_sql.name ], stdout = f_apply_ddl_log, stderr = subprocess.STDOUT)
|
||||
# flush_and_close( f_apply_ddl_log )
|
||||
#
|
||||
#
|
||||
# #--------------------------------
|
||||
#
|
||||
#
|
||||
# cur = db_conn.cursor()
|
||||
# try:
|
||||
# cur.execute( "insert into persons values (1, 'One', 'some_address', 'some_blob_info')" )
|
||||
@ -156,35 +169,69 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# except Exception as e:
|
||||
# print('Got exception:', sys.exc_info()[0])
|
||||
# print(e[0])
|
||||
#
|
||||
#
|
||||
# finally:
|
||||
# db_conn.close()
|
||||
# cur.close()
|
||||
#
|
||||
#
|
||||
# if fb_major >= 4.0:
|
||||
# runProgram( context['isql_path'], ['-q', dsn], 'ALTER EXTERNAL CONNECTIONS POOL CLEAR ALL;' )
|
||||
#
|
||||
#
|
||||
# cleanup( (f_apply_ddl_sql.name, f_apply_ddl_log.name, fdb_repl) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Got exception: <class 'fdb.fbcore.DatabaseError'>
|
||||
Error while executing SQL statement:
|
||||
- SQLCODE: -901
|
||||
- Execute statement error at isc_dsql_prepare :
|
||||
Got exception: <class 'firebird.driver.types.DatabaseError'>
|
||||
Execute statement error at isc_dsql_prepare :
|
||||
335544359 : attempted update of read-only column PERSONS.COMP
|
||||
Statement : insert into "PERSONS" ("ID", "NAME", "ADDRESS", "INFO", "COMP") values (?, ?, ?, ?, ?)
|
||||
Data source : Firebird::C:\\FBTESTING\\qabt-repo mp mp_5972_repl.fdb
|
||||
- At block line: 9, col: 5
|
||||
- At trigger 'PERSONS_REPLICATE'
|
||||
"""
|
||||
Data source : Firebird::C:\\FBTESTING\\qa\\fbt-repo\\tmp\\tmp_5972_repl.fdb
|
||||
-At block line: 9, col: 5
|
||||
-At trigger 'PERSONS_REPLICATE'
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0.6')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
def test_1(act_1: Action, db_1_repl: Database, capsys):
|
||||
ddl_for_replication = f"""
|
||||
create table replicate_config (
|
||||
name varchar(31) not null,
|
||||
data_source varchar(255) not null
|
||||
);
|
||||
|
||||
insert into replicate_config (name, data_source)
|
||||
values ('ds1', '{db_1_repl.db_path}');
|
||||
|
||||
create trigger persons_replicate
|
||||
after insert on persons
|
||||
external name 'udrcpp_example!replicate!ds1'
|
||||
engine udr;
|
||||
|
||||
create trigger persons_replicate2
|
||||
after insert on persons
|
||||
external name 'udrcpp_example!replicate_persons!ds1'
|
||||
engine udr;
|
||||
commit;
|
||||
"""
|
||||
act_1.isql(switches=['-q'], input=ddl_for_replication)
|
||||
# Test
|
||||
with act_1.db.connect() as con:
|
||||
c = con.cursor()
|
||||
try:
|
||||
c.execute("insert into persons values (1, 'One', 'some_address', 'some_blob_info')")
|
||||
con.commit()
|
||||
except DatabaseError as e:
|
||||
print(f'Got exception: {e.__class__}')
|
||||
print(e)
|
||||
#
|
||||
if act_1.is_version('>=4'):
|
||||
act_1.reset()
|
||||
act_1.isql(switches=['-q'], input='ALTER EXTERNAL CONNECTIONS POOL CLEAR ALL;')
|
||||
#
|
||||
act_1.reset()
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,10 +2,10 @@
|
||||
#
|
||||
# id: bugs.core_5980
|
||||
# title: Firebird crashes due to concurrent operations with expression indices
|
||||
# decription:
|
||||
# decription:
|
||||
# Scenario for reproducing was given by Vlad, letter 25-02-2020 19:15.
|
||||
# Unfortuinately, this crash can occur only in developer build rather than release one.
|
||||
#
|
||||
#
|
||||
# Although issues from ticket can NOT be reproduced, it was encountered in 2.5.0.26074
|
||||
# that statements from here lead DB to be corrupted:
|
||||
# Error while commiting transaction:
|
||||
@ -17,16 +17,16 @@
|
||||
# 335544335
|
||||
# No such problem in 2.5.1 and above.
|
||||
# Decided to add this .fbt just for check that DB will not be corrupted.
|
||||
#
|
||||
#
|
||||
# TICKET ISSUE REMAINS IRREPRODUCIBLE (checked on following SuperServer builds: 2.5.1, 2.5.6, 2.5.9, 3.0.6, 4.0.0).
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-5980
|
||||
# min_versions: ['2.5.1']
|
||||
# versions: 2.5.1
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 2.5.1
|
||||
# resources: None
|
||||
@ -42,44 +42,51 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# import os
|
||||
# import sys
|
||||
# from fdb import services
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# fb_home = services.connect(host='localhost', user= user_name, password= user_password).get_home_directory()
|
||||
# if db_conn.engine_version < 3:
|
||||
# fb_home = os.path.join( fb_home, 'bin')
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
# con1 = fdb.connect( dsn = dsn )
|
||||
# con2 = fdb.connect( dsn = dsn )
|
||||
#
|
||||
#
|
||||
# con1.execute_immediate('recreate table t1(id int)')
|
||||
# con1.execute_immediate('create index t1_idx on t1 computed by (id + 0)')
|
||||
# con1.commit()
|
||||
#
|
||||
#
|
||||
# cur1 = con1.cursor()
|
||||
# cur1.execute( 'insert into t1(id) values(?)', (1,) )
|
||||
# con1.commit()
|
||||
#
|
||||
#
|
||||
# # this lead to corruption of database in 2.5.0
|
||||
# # page 0 is of wrong type (expected 6, found 1):
|
||||
# # -----------------------
|
||||
# con2.execute_immediate('alter index t1_idx active')
|
||||
# con2.commit()
|
||||
# con2.close()
|
||||
#
|
||||
#
|
||||
# con1.close()
|
||||
# cur1.close()
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
@pytest.mark.version('>=2.5.1')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action):
|
||||
with act_1.db.connect() as con1, act_1.db.connect() as con2:
|
||||
con1.execute_immediate('recreate table t1(id int)')
|
||||
con1.execute_immediate('create index t1_idx on t1 computed by (id + 0)')
|
||||
con1.commit()
|
||||
c = con1.cursor()
|
||||
c.execute('insert into t1(id) values(?)', [1])
|
||||
con1.commit()
|
||||
# this lead to corruption of database in 2.5.0
|
||||
# page 0 is of wrong type (expected 6, found 1)
|
||||
con2.execute_immediate('alter index t1_idx active')
|
||||
con2.commit()
|
||||
|
@ -2,44 +2,42 @@
|
||||
#
|
||||
# id: bugs.core_5991
|
||||
# title: Trace could not work correctly with quoted file names in trace configurations
|
||||
# decription:
|
||||
# decription:
|
||||
# Thank Vlad for suggestions.
|
||||
#
|
||||
#
|
||||
# NOTE-1. Bug will NOT appear if PATTERN is used in database-section!
|
||||
# In order to reproduce bug one need to create config file for trace with following
|
||||
# In order to reproduce bug one need to create config file for trace with following
|
||||
# _SINGLE_ file name in databases-section:
|
||||
# =====
|
||||
# database = 'C:\\FBTESTING\\qa
|
||||
# bt-repo mp mp_5991.o'clock.fdb'
|
||||
# database = 'C:\\FBTESTING\\qa\\fbt-repo\\tmp\\tmp_5991.o'clock.fdb'
|
||||
# {
|
||||
# enabled = true
|
||||
# time_threshold = 0
|
||||
# time_threshold = 0
|
||||
# log_initfini = false
|
||||
# log_connections = true
|
||||
# log_transactions = true
|
||||
# log_statement_finish = true
|
||||
# }
|
||||
# =====
|
||||
# (path 'C:\\FBTESTING\\qa
|
||||
# bt-repo mp' will be replaced with actual test DB location)
|
||||
#
|
||||
# (path 'C:\\FBTESTING\\qa\\fbt-repo\\tmp' will be replaced with actual test DB location)
|
||||
#
|
||||
# Then we start trace session.
|
||||
#
|
||||
#
|
||||
# NOTE-2: if this trace session will be forced to wait about 10 seconds, then error message will appear
|
||||
# with text "error while parsing trace configuration" but DB name will be securityN.fdb.
|
||||
# with text "error while parsing trace configuration" but DB name will be securityN.fdb.
|
||||
# Moreover, an operation with any DB which has name different than specified in database-section will
|
||||
# raise this error, and its text can be misleading that trace did not started at all or was terminated.
|
||||
# This is because another bug (not yet fixed) which Vlad mentioned privately in letter 26.02.19 23:37.
|
||||
#
|
||||
#
|
||||
# :::: NB :::::
|
||||
# We can IGNORE this error message despite it contains phrase "Error creating trace session" and go on.
|
||||
# Trace session actually *WILL* be created and we have to check this here by further actions with DB.
|
||||
# :::::::::::::
|
||||
#
|
||||
#
|
||||
# After this, we create database with the same name by calling fdb.create_database().
|
||||
# NOTE-3: we have to enclose DB file in double quotes and - moreover - duplicate single apostoph,
|
||||
# otherwise fdb driver will create DB without it, i.e.: "tmp_5991.oclock.fdb".
|
||||
#
|
||||
#
|
||||
# At the second step we do trivial statement and drop this database (tmp_5991.o'clock.fdb).
|
||||
# Finally, we wait at least two seconds because trace buffer must be flushed to disk, stop trace session
|
||||
# and then - open trace log for parsing it.
|
||||
@ -52,19 +50,20 @@
|
||||
# 6. DROP_DATABASE
|
||||
# We check each line of trace for matching to patterns (based on these phrases) and put result into Python dict.
|
||||
# Resulting dict must contain 'FOUND' and value for every of its keys (patterns).
|
||||
#
|
||||
#
|
||||
# Confirmed bug on 3.0.4.33054.
|
||||
# 01-mar-2021: adapted for work on Linux.
|
||||
# Checked on 4.0.0.2377 and 3.0.8.33415 (both Windows and Linux).
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-5991
|
||||
# min_versions: ['3.0.5']
|
||||
# versions: 3.0.5
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
import re
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 3.0.5
|
||||
# resources: None
|
||||
@ -73,7 +72,7 @@ substitutions_1 = [('Trying to create.*', '')]
|
||||
|
||||
init_script_1 = """"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1, filename="core_5991.o'clock.fdb")
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
@ -83,30 +82,30 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# import subprocess
|
||||
# from subprocess import Popen
|
||||
# from fdb import services
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# fdb_file=os.path.join( '$(DATABASE_LOCATION)', "tmp_5991.o'clock.fdb" )
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -117,54 +116,54 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# cleanup( fdb_file, )
|
||||
#
|
||||
#
|
||||
# if os.name == 'nt':
|
||||
# fdb_trace = fdb_file.replace('/','\\\\')
|
||||
# else:
|
||||
# fdb_trace = fdb_file
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
# # Prepare config for trace session that will be launched by call of FBSVCMGR:
|
||||
#
|
||||
#
|
||||
# txt = ''' database = '%(fdb_trace)s'
|
||||
# {
|
||||
# enabled = true
|
||||
# time_threshold = 0
|
||||
# time_threshold = 0
|
||||
# log_initfini = false
|
||||
# log_connections = true
|
||||
# log_transactions = true
|
||||
# log_statement_finish = true
|
||||
# }
|
||||
# ''' % locals()
|
||||
#
|
||||
#
|
||||
# trc_cfg=open( os.path.join(context['temp_directory'],'tmp_trace_5991.cfg'), 'w')
|
||||
# trc_cfg.write(txt)
|
||||
# flush_and_close( trc_cfg )
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
# # Async. launch of trace session using FBSVCMGR action_trace_start:
|
||||
#
|
||||
#
|
||||
# trc_log=open( os.path.join(context['temp_directory'],'tmp_trace_5991.log'), 'w')
|
||||
# # Execute a child program in a new process, redirecting STDERR to the same target as of STDOUT:
|
||||
# p_svcmgr = Popen( [ context['fbsvcmgr_path'], "localhost:service_mgr",
|
||||
# "action_trace_start",
|
||||
# "trc_cfg", trc_cfg.name
|
||||
# ],
|
||||
# stdout=trc_log,
|
||||
# stdout=trc_log,
|
||||
# stderr=subprocess.STDOUT
|
||||
# )
|
||||
#
|
||||
#
|
||||
# # 08.01.2020. This delay is mandatory, otherwise file with trace session info can remain (sometimes)
|
||||
# # empty when we will read it at the next step:
|
||||
# time.sleep(1)
|
||||
#
|
||||
#
|
||||
# # Determine active trace session ID (for further stop):
|
||||
# ########################
|
||||
# trc_lst=open( os.path.join(context['temp_directory'],'tmp_trace_5991.lst'), 'w')
|
||||
@ -173,32 +172,32 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# stdout=trc_lst, stderr=subprocess.STDOUT
|
||||
# )
|
||||
# flush_and_close( trc_lst )
|
||||
#
|
||||
# # Session ID: 5
|
||||
# # user:
|
||||
# # date: 2015-08-27 15:24:14
|
||||
# # flags: active, trace
|
||||
#
|
||||
#
|
||||
# # Session ID: 5
|
||||
# # user:
|
||||
# # date: 2015-08-27 15:24:14
|
||||
# # flags: active, trace
|
||||
#
|
||||
# sid_pattern = re.compile('Session\\s+ID[:]{0,1}\\s+\\d+', re.IGNORECASE)
|
||||
#
|
||||
#
|
||||
# trc_ssn=0
|
||||
# with open( trc_lst.name,'r') as f:
|
||||
# for line in f:
|
||||
# if sid_pattern.search( line ) and len( line.split() ) == 3:
|
||||
# trc_ssn = line.split()[2]
|
||||
# break
|
||||
#
|
||||
# # Result: `trc_ssn` is ID of active trace session.
|
||||
#
|
||||
# # Result: `trc_ssn` is ID of active trace session.
|
||||
# # We have to terminate trace session that is running on server BEFORE we termitane process `p_svcmgr`
|
||||
#
|
||||
#
|
||||
# if trc_ssn==0:
|
||||
# print("Error parsing trace session ID.")
|
||||
# flush_and_close( trc_log )
|
||||
#
|
||||
#
|
||||
# else:
|
||||
#
|
||||
#
|
||||
# ########### W O R K W I T H D A T A B A S E ########
|
||||
#
|
||||
#
|
||||
# print( 'Trying to create: "localhost:%s"' % fdb_file.replace("'","''") )
|
||||
# con = fdb.create_database( dsn = "localhost:%s" % fdb_file.replace("'","''") )
|
||||
# print( 'Database created OK.' )
|
||||
@ -206,40 +205,40 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# cur.execute( "select 'Database name contains single quote.' as result from mon$database where lower(mon$database_name) similar to '%[\\/](tmp_5991.o''clock).fdb'")
|
||||
# for r in cur:
|
||||
# print(r[0])
|
||||
#
|
||||
#
|
||||
# cur.close()
|
||||
# con.drop_database()
|
||||
# print( 'Database dropped OK.')
|
||||
# #####################################################################
|
||||
#
|
||||
# # ::: NB ::: Here we have to be idle at least 2s (two seconds) otherwise trace log will
|
||||
#
|
||||
# # ::: NB ::: Here we have to be idle at least 2s (two seconds) otherwise trace log will
|
||||
# # not contain some or all of messages about create DB, start Tx, ES, Tx and drop DB.
|
||||
# # See also discussion with hvlad, 08.01.2020 15:16
|
||||
# # (subj: "action_trace_stop does not flush trace log (fully or partially)")
|
||||
# time.sleep(2)
|
||||
#
|
||||
#
|
||||
# # Stop trace session:
|
||||
# #####################
|
||||
#
|
||||
#
|
||||
# trc_lst=open(trc_lst.name, "a")
|
||||
# trc_lst.seek(0,2)
|
||||
#
|
||||
#
|
||||
# subprocess.call( [ context['fbsvcmgr_path'], "localhost:service_mgr",
|
||||
# "action_trace_stop",
|
||||
# "trc_id",trc_ssn
|
||||
# ],
|
||||
# stdout=trc_lst,
|
||||
# stdout=trc_lst,
|
||||
# stderr=subprocess.STDOUT
|
||||
# )
|
||||
# flush_and_close( trc_lst )
|
||||
#
|
||||
#
|
||||
# # 23.02.2021. DELAY FOR AT LEAST 1 SECOND REQUIRED HERE!
|
||||
# # Otherwise trace log can remain empty.
|
||||
# time.sleep(1)
|
||||
#
|
||||
#
|
||||
# p_svcmgr.terminate()
|
||||
# flush_and_close( trc_log )
|
||||
#
|
||||
#
|
||||
# allowed_patterns = {
|
||||
# '1. TRACE_START' : re.compile('Trace\\s+session\\s+ID\\s+\\d+\\s+started\\.*', re.IGNORECASE)
|
||||
# ,'2. DB_CREATION' : re.compile('[.*]*CREATE_DATABASE\\.*', re.IGNORECASE)
|
||||
@ -248,19 +247,19 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# ,'5. TX_FINISH' : re.compile('[.*]*ROLLBACK_TRANSACTION\\.*', re.IGNORECASE)
|
||||
# ,'6. DB_REMOVAL' : re.compile('[.*]*DROP_DATABASE\\.*', re.IGNORECASE)
|
||||
# }
|
||||
#
|
||||
#
|
||||
# found_patterns={}
|
||||
#
|
||||
#
|
||||
# with open( trc_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# if line.rstrip().split():
|
||||
# for k,v in allowed_patterns.items():
|
||||
# if v.search(line):
|
||||
# found_patterns[k] = 'FOUND'
|
||||
#
|
||||
#
|
||||
# for k,v in sorted( found_patterns.items() ):
|
||||
# print( 'Pattern', k, ':', v)
|
||||
#
|
||||
#
|
||||
# if len( found_patterns ) < len( allowed_patterns ):
|
||||
# print('==== INCOMPLETE TRACE LOG: ====')
|
||||
# with open( trc_log.name,'r') as f:
|
||||
@ -268,34 +267,71 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# if line.rstrip().split():
|
||||
# print(' ' + line)
|
||||
# print('=' * 31)
|
||||
#
|
||||
#
|
||||
# #< cond "if trc_ssn>0"
|
||||
#
|
||||
#
|
||||
# # cleanup:
|
||||
# ##########
|
||||
# time.sleep(1)
|
||||
# cleanup( (trc_lst, trc_cfg, trc_log) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Trying to create: "localhost:C:/FBTESTING/qa/fbt-repo/tmp/tmp_5991.o''clock.fdb"
|
||||
Database created OK.
|
||||
Database name contains single quote.
|
||||
Database dropped OK.
|
||||
Pattern 1. TRACE_START : FOUND
|
||||
Pattern 2. DB_CREATION : FOUND
|
||||
Pattern 3. TX_START : FOUND
|
||||
Pattern 4. STATEMENT_DONE : FOUND
|
||||
Pattern 5. TX_FINISH : FOUND
|
||||
Pattern 6. DB_REMOVAL : FOUND
|
||||
"""
|
||||
Pattern 1. DB_ATTACH : FOUND
|
||||
Pattern 2. TX_START : FOUND
|
||||
Pattern 3. STATEMENT_DONE : FOUND
|
||||
Pattern 4. TX_FINISH : FOUND
|
||||
Pattern 5. DB_DETACH : FOUND
|
||||
"""
|
||||
|
||||
trace_1 = ['{',
|
||||
'enabled = true',
|
||||
'log_connections = true',
|
||||
'log_transactions = true',
|
||||
'log_statement_finish = true',
|
||||
'log_initfini = false',
|
||||
'time_threshold = 0',
|
||||
'}'
|
||||
]
|
||||
|
||||
@pytest.mark.version('>=3.0.5')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
def test_1(act_1: Action, capsys):
|
||||
trace_1.insert(0, f"database = '{act_1.db.db_path}'")
|
||||
with act_1.trace(config=trace_1):
|
||||
with act_1.db.connect() as con:
|
||||
c = con.cursor()
|
||||
for row in c.execute("select 'Database name contains single quote.' as result from mon$database where lower(mon$database_name) similar to '%[\\/](core_5991.o''clock).fdb'"):
|
||||
print(row[0])
|
||||
# Process trace
|
||||
allowed_patterns = {'1. DB_ATTACH': re.compile('[.*]*ATTACH_DATABASE\\.*', re.IGNORECASE),
|
||||
'2. TX_START': re.compile('[.*]*START_TRANSACTION\\.*', re.IGNORECASE),
|
||||
'3. STATEMENT_DONE': re.compile('[.*]*EXECUTE_STATEMENT_FINISH\\.*', re.IGNORECASE),
|
||||
'4. TX_FINISH': re.compile('[.*]*ROLLBACK_TRANSACTION\\.*', re.IGNORECASE),
|
||||
'5. DB_DETACH': re.compile('[.*]*DETACH_DATABASE\\.*', re.IGNORECASE),
|
||||
}
|
||||
found_patterns = {}
|
||||
for line in act_1.trace_log:
|
||||
if line.rstrip().split():
|
||||
for key, pattern in allowed_patterns.items():
|
||||
if pattern.search(line):
|
||||
found_patterns[key] = 'FOUND'
|
||||
|
||||
for key, status in sorted(found_patterns.items()):
|
||||
print(f'Pattern {key} : {status}')
|
||||
|
||||
if len(found_patterns) < len(allowed_patterns):
|
||||
print('==== INCOMPLETE TRACE LOG: ====')
|
||||
for line in act_1.trace_log:
|
||||
if line.strip():
|
||||
print(' ' + line)
|
||||
print('=' * 31)
|
||||
# Check
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
#
|
||||
# id: bugs.core_5995
|
||||
# title: Creator user name is empty in user trace sessions
|
||||
# decription:
|
||||
# decription:
|
||||
# We create trivial config for trace, start session and stop it.
|
||||
# Trace list must contain string: ' user: SYSDBA ' (without apostrophes).
|
||||
# We search this by string using pattern matching: such line MUST contain at least two words
|
||||
@ -11,14 +11,14 @@
|
||||
# Checked on:
|
||||
# 4.0.0.1421: OK, 5.186s.
|
||||
# 3.0.5.33106: OK, 4.070s.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-5995
|
||||
# min_versions: ['3.0.5']
|
||||
# versions: 3.0.5
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 3.0.5
|
||||
# resources: None
|
||||
@ -31,37 +31,37 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import re
|
||||
# import time
|
||||
# import subprocess
|
||||
# from subprocess import Popen
|
||||
# from fdb import services
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# fdb_file=db_conn.database_name
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -72,19 +72,19 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
# # Prepare config for trace session that will be launched by call of FBSVCMGR:
|
||||
#
|
||||
#
|
||||
# txt = ''' database = %[\\\\\\\\/]bugs.core_5995.fdb
|
||||
# {
|
||||
# enabled = true
|
||||
# time_threshold = 0
|
||||
# time_threshold = 0
|
||||
# log_initfini = false
|
||||
# log_statement_finish = true
|
||||
# }
|
||||
@ -92,25 +92,25 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# trc_cfg=open( os.path.join(context['temp_directory'],'tmp_trace_5995.cfg'), 'w')
|
||||
# trc_cfg.write(txt)
|
||||
# flush_and_close( trc_cfg )
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
# # Async. launch of trace session using FBSVCMGR action_trace_start:
|
||||
#
|
||||
#
|
||||
# trc_log=open( os.path.join(context['temp_directory'],'tmp_trace_5995.log'), 'w')
|
||||
# # Execute a child program in a new process, redirecting STDERR to the same target as of STDOUT:
|
||||
# p_svcmgr = Popen( [ context['fbsvcmgr_path'], "localhost:service_mgr",
|
||||
# "action_trace_start",
|
||||
# "trc_cfg", trc_cfg.name
|
||||
# ],
|
||||
# stdout=trc_log,
|
||||
# stdout=trc_log,
|
||||
# stderr=subprocess.STDOUT
|
||||
# )
|
||||
#
|
||||
#
|
||||
# # Wait! Trace session is initialized not instantly!
|
||||
# time.sleep(1)
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
#
|
||||
#
|
||||
# # Determine active trace session ID (for further stop):
|
||||
# trc_lst=open( os.path.join(context['temp_directory'],'tmp_trace_5995.lst'), 'w')
|
||||
# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr",
|
||||
@ -118,69 +118,71 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# stdout=trc_lst, stderr=subprocess.STDOUT
|
||||
# )
|
||||
# flush_and_close( trc_lst )
|
||||
#
|
||||
# # Session ID: 5
|
||||
# # user:
|
||||
# # date: 2015-08-27 15:24:14
|
||||
# # flags: active, trace
|
||||
#
|
||||
#
|
||||
# # Session ID: 5
|
||||
# # user:
|
||||
# # date: 2015-08-27 15:24:14
|
||||
# # flags: active, trace
|
||||
#
|
||||
# usr_pattern = re.compile('user[:]{0,1}\\s+\\S+', re.IGNORECASE)
|
||||
# sid_pattern = re.compile('Session\\s+ID[:]{0,1}\\s+\\d+', re.IGNORECASE)
|
||||
#
|
||||
#
|
||||
# trcssn=0
|
||||
# trcusr=''
|
||||
# with open( trc_lst.name,'r') as f:
|
||||
# for line in f:
|
||||
# if sid_pattern.search( line ) and len( line.split() ) == 3:
|
||||
# trcssn = line.split()[2]
|
||||
#
|
||||
#
|
||||
# if usr_pattern.search(line) and len( line.split() ) >= 2:
|
||||
# trcusr = line.split()[1]
|
||||
#
|
||||
#
|
||||
# if trcssn and trcusr:
|
||||
# break
|
||||
#
|
||||
# # Result: `trcssn` is ID of active trace session.
|
||||
#
|
||||
# # Result: `trcssn` is ID of active trace session.
|
||||
# # We have to terminate trace session that is running on server BEFORE we termitane process `p_svcmgr`
|
||||
# if trcssn==0:
|
||||
# print("Error parsing trace session ID.")
|
||||
# else:
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
# # Stop trace session:
|
||||
# #####################################################################
|
||||
#
|
||||
#
|
||||
# trc_lst=open(trc_lst.name, "a")
|
||||
# trc_lst.seek(0,2)
|
||||
# print( 'Trace was started by: ' + trcusr )
|
||||
#
|
||||
#
|
||||
# subprocess.call( [ context['fbsvcmgr_path'], "localhost:service_mgr",
|
||||
# "action_trace_stop",
|
||||
# "trc_id",trcssn
|
||||
# ],
|
||||
# stdout=trc_lst,
|
||||
# stdout=trc_lst,
|
||||
# stderr=subprocess.STDOUT
|
||||
# )
|
||||
# flush_and_close( trc_lst )
|
||||
# p_svcmgr.terminate()
|
||||
#
|
||||
#
|
||||
# flush_and_close( trc_log )
|
||||
#
|
||||
#
|
||||
# # cleanup:
|
||||
# ##########
|
||||
# time.sleep(1)
|
||||
# cleanup( (trc_lst, trc_cfg, trc_log) )
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Trace was started by: SYSDBA
|
||||
"""
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
trace_1 = ['log_initfini = false',
|
||||
'time_threshold = 0',
|
||||
'log_statement_finish = true',
|
||||
]
|
||||
|
||||
@pytest.mark.version('>=3.0.5')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action):
|
||||
with act_1.trace(db_events=trace_1), act_1.connect_server() as srv:
|
||||
assert len(srv.trace.sessions) == 1
|
||||
for session in srv.trace.sessions.values():
|
||||
assert session.user == 'SYSDBA'
|
||||
|
@ -2,28 +2,30 @@
|
||||
#
|
||||
# id: bugs.core_6000
|
||||
# title: gbak issues "Your user name and password are not defined" when command switch "-fe(tch_password) ..." is specified when run as service
|
||||
# decription:
|
||||
# decription:
|
||||
# ::: NOTE :::
|
||||
# Presense of ISC_PASSWORD variable had higher priority than '-fe password_file' command switch before this ticket was fixed.
|
||||
# This means that command "gbak -se ... -fe <file_with_invalid_password>" PASSED without errors!
|
||||
#
|
||||
#
|
||||
# Test creates two files, one with correct SYSDBA password and second with invalid (hope that such password: T0t@1lywr0ng - is not in use for SYSDBA).
|
||||
# Also, test exports default SYSDBA password ('masterkey' ) to ISC_PASSWORD variable.
|
||||
# Then we do following:
|
||||
# 1) "gbak -fe <invalid_password_file>" - this should FAIL with issuing "user name and password are not defined" in STDERR,
|
||||
# 1) "gbak -fe <invalid_password_file>" - this should FAIL with issuing "user name and password are not defined" in STDERR,
|
||||
# despite that ISC_USER isnot empty and contains valid password
|
||||
# 2) UNSET variable ISC_PASSWORD and run "gbak -fe <correct_password_file>" - this should PASS without any STDOUT or STDERR.
|
||||
#
|
||||
#
|
||||
# Confirmed wrong behaviour on: 4.0.0.1627, 3.0.5.33178
|
||||
# Works fine on: 4.0.0.1629, 3.438s; 3.0.5.33179, 2.859s.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-6000
|
||||
# min_versions: ['3.0.5']
|
||||
# versions: 3.0.5
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
import os
|
||||
from pathlib import Path
|
||||
from firebird.qa import db_factory, python_act, Action, temp_file
|
||||
|
||||
# version: 3.0.5
|
||||
# resources: None
|
||||
@ -36,37 +38,37 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import sys
|
||||
# import subprocess
|
||||
# from subprocess import Popen
|
||||
# import time
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# this_fdb = db_conn.database_name
|
||||
# this_fbk=os.path.join(context['temp_directory'],'tmp_core_6000.fbk')
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -77,83 +79,92 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# cleanup( this_fbk, )
|
||||
#
|
||||
#
|
||||
# f_psw_correct=open( os.path.join(context['temp_directory'],'tmp_psw_6000__OK.dat'), 'w')
|
||||
# f_psw_correct.write( user_password )
|
||||
# flush_and_close( f_psw_correct )
|
||||
#
|
||||
#
|
||||
# f_psw_invalid=open( os.path.join(context['temp_directory'],'tmp_psw_6000_BAD.dat'), 'w')
|
||||
# f_psw_invalid.write( 'T0t@1lywr0ng' )
|
||||
# flush_and_close( f_psw_invalid )
|
||||
#
|
||||
#
|
||||
# #---------------------- backup with '-fe <invalid_password_file>' --------------------------
|
||||
#
|
||||
#
|
||||
# f_log_invalid = open( os.path.join(context['temp_directory'],'tmp_isql_6000_BAD.log'), 'w')
|
||||
# f_err_invalid = open( os.path.join(context['temp_directory'],'tmp_isql_6000_BAD.err'), 'w')
|
||||
#
|
||||
#
|
||||
# subprocess.call( [context['gbak_path'], '-b', '-se', 'localhost:service_mgr', '-user', user_name ,'-fe', f_psw_invalid.name, this_fdb, this_fbk ],
|
||||
# stdout=f_log_invalid,
|
||||
# stderr=f_err_invalid
|
||||
# )
|
||||
# flush_and_close( f_err_invalid )
|
||||
# flush_and_close( f_log_invalid )
|
||||
#
|
||||
#
|
||||
# #---------------------- backup with '-fe <correct_password_file>' --------------------------
|
||||
#
|
||||
#
|
||||
# del os.environ["ISC_PASSWORD"]
|
||||
#
|
||||
#
|
||||
# f_log_correct = open( os.path.join(context['temp_directory'],'tmp_isql_6000__OK.log'), 'w')
|
||||
# f_err_correct = open( os.path.join(context['temp_directory'],'tmp_isql_6000__OK.err'), 'w')
|
||||
#
|
||||
#
|
||||
# subprocess.call( [context['gbak_path'], '-b', '-se', 'localhost:service_mgr', '-user', user_name ,'-fe', f_psw_correct.name, this_fdb, this_fbk ],
|
||||
# stdout=f_log_correct,
|
||||
# stderr=f_err_correct
|
||||
# )
|
||||
# flush_and_close( f_err_correct )
|
||||
# flush_and_close( f_log_correct )
|
||||
#
|
||||
#
|
||||
# # This file should be EMPTY:
|
||||
# ###########################
|
||||
# with open(f_log_invalid.name) as f:
|
||||
# for line in f:
|
||||
# print('UNEXPECTED STDOUT FOR INVALID PASSWORD: '+line)
|
||||
#
|
||||
#
|
||||
# with open(f_err_invalid.name) as f:
|
||||
# for line in f:
|
||||
# print('EXPECTED STDERR FOR INVALID PASSWORD: '+line)
|
||||
#
|
||||
#
|
||||
# with open(f_log_correct.name) as f:
|
||||
# for line in f:
|
||||
# print('UNEXPECTED STDOUT FOR CORRECT PASSWORD: '+line)
|
||||
#
|
||||
#
|
||||
# with open(f_err_correct.name) as f:
|
||||
# for line in f:
|
||||
# print('UNEXPECTED STDERR FOR CORRECT PASSWORD: '+line)
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# # CLEANUP
|
||||
# #########
|
||||
# time.sleep(1)
|
||||
# cleanup( ( f_psw_correct, f_psw_invalid, f_log_correct, f_err_correct, f_log_invalid, f_err_invalid, this_fbk ) )
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
EXPECTED STDERR FOR INVALID PASSWORD: gbak: ERROR:Your user name and password are not defined. Ask your database administrator to set up a Firebird login.
|
||||
EXPECTED STDERR FOR INVALID PASSWORD: gbak:Exiting before completion due to errors
|
||||
"""
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stderr_1 = """
|
||||
gbak: ERROR:Your user name and password are not defined. Ask your database administrator to set up a Firebird login.
|
||||
gbak:Exiting before completion due to errors
|
||||
"""
|
||||
|
||||
pwd_file = temp_file('pwd.dat')
|
||||
fbk_file = temp_file('tmp_core_6000.fbk')
|
||||
|
||||
@pytest.mark.version('>=3.0.5')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, pwd_file: Path, fbk_file: Path):
|
||||
pwd_file.write_text('T0t@1lywr0ng')
|
||||
with act_1.envar('ISC_PASSWORD', act_1.db.password):
|
||||
act_1.expected_stderr = expected_stderr_1
|
||||
act_1.gbak(switches=['-b', '-se', 'localhost:service_mgr', '-user', act_1.db.user,
|
||||
'-fe', str(pwd_file), act_1.db.dsn, str(fbk_file)], credentials=False)
|
||||
assert act_1.clean_stderr == act_1.clean_expected_stderr
|
||||
pwd_file.write_text(act_1.db.password)
|
||||
act_1.gbak(switches=['-b', '-se', 'localhost:service_mgr', '-user', act_1.db.user,
|
||||
'-fe', str(pwd_file), act_1.db.dsn, str(fbk_file)], credentials=False)
|
||||
|
@ -2,21 +2,22 @@
|
||||
#
|
||||
# id: bugs.core_6003
|
||||
# title: RDB$GET_TRANSACTION_CN works different in Super and Classic
|
||||
# decription:
|
||||
# decription:
|
||||
# Confirmed bug on 4.0.0.2411 CS: got null instead of positive number.
|
||||
# Checked on intermediate build 4.0.0.2416 (08-apr-2021 09:56) - all OK.
|
||||
#
|
||||
#
|
||||
# NB-1: bug can be reproduced using ISQL but it must be lacunhed with '-n' command switch.
|
||||
# NB-2: connection-1 (which finally asks value of rdb$get_transaction_cn(<Tx2>)) must start Tx1
|
||||
# *BEFORE* connection-2 will start his Tx2.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-6003
|
||||
# min_versions: ['4.0']
|
||||
# versions: 4.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
from firebird.driver import tpb, Isolation
|
||||
|
||||
# version: 4.0
|
||||
# resources: None
|
||||
@ -29,64 +30,72 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import sys
|
||||
# import fdb
|
||||
# import time
|
||||
# import subprocess
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = 'SYSDBA'
|
||||
# os.environ["ISC_PASSWORD"] = 'masterkey'
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #FB_CLNT=sys.argv[1]
|
||||
# DB_NAME = dsn # 'localhost:e40'
|
||||
#
|
||||
#
|
||||
# custom_tpb = fdb.TPB()
|
||||
# custom_tpb.lock_resolution = fdb.isc_tpb_wait
|
||||
# custom_tpb.isolation_level = fdb.isc_tpb_concurrency
|
||||
#
|
||||
#
|
||||
# con1 = fdb.connect(dsn=DB_NAME) # , fb_library_name = FB_CLNT )
|
||||
# tx1a = con1.trans( default_tpb = custom_tpb )
|
||||
# tx1a.begin()
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# con2 = fdb.connect(dsn=DB_NAME) # , fb_library_name = FB_CLNT )
|
||||
# tx2a = con2.trans( default_tpb = custom_tpb )
|
||||
# tx2a.begin()
|
||||
#
|
||||
#
|
||||
# cur2 = tx2a.cursor()
|
||||
# cur2.execute('select current_transaction from rdb$database')
|
||||
# for r in cur2:
|
||||
# trn2 = int(r[0])
|
||||
# tx2a.commit()
|
||||
#
|
||||
#
|
||||
# # >>> DO NOT put it here! tx1a must be started BEFORE tx2a! >>> tx1a.begin()
|
||||
#
|
||||
#
|
||||
# cur1 = tx1a.cursor()
|
||||
# cur1.execute("select 'Result is ' || iif( rdb$get_transaction_cn(%d) is null, 'INCORRECT: NULL.', 'expected: NOT null.') from rdb$database" % trn2)
|
||||
# for r in cur1:
|
||||
# print(r[0])
|
||||
# tx1a.commit()
|
||||
#
|
||||
#
|
||||
# #print(con1.firebird_version)
|
||||
# cur1.close()
|
||||
# cur2.close()
|
||||
# con1.close()
|
||||
# con2.close()
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Result is expected: NOT null.
|
||||
"""
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
@pytest.mark.version('>=4.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action):
|
||||
custom_tpb = tpb(isolation=Isolation.CONCURRENCY)
|
||||
with act_1.db.connect() as con1:
|
||||
tx1a = con1.transaction_manager(custom_tpb)
|
||||
tx1a.begin() # tx1a must be started BEFORE tx2a!
|
||||
with act_1.db.connect() as con2:
|
||||
tx2a = con2.transaction_manager(custom_tpb)
|
||||
tx2a.begin()
|
||||
#
|
||||
cur2 = tx2a.cursor()
|
||||
trn2 = cur2.execute('select current_transaction from rdb$database').fetchone()[0]
|
||||
tx2a.commit()
|
||||
#
|
||||
cur1 = tx1a.cursor()
|
||||
cur1.execute(f"select 'Result is ' || iif( rdb$get_transaction_cn({trn2}) is null, 'INCORRECT: NULL.', 'expected: NOT null.') from rdb$database")
|
||||
assert cur1.fetchone()[0] == 'Result is expected: NOT null.'
|
||||
|
@ -2,33 +2,34 @@
|
||||
#
|
||||
# id: bugs.core_6018
|
||||
# title: Make it possible to start multiple transactions (possibly in different attachments) using the same initial transaction snapshot
|
||||
# decription:
|
||||
# decription:
|
||||
# We open first connect using FDB and set custom transaction parameter block which is used to start SNAPSHOT transaction.
|
||||
# Within this first transaction (tx1a) we insert into test table record with value = -2 and commit this Tx.
|
||||
# Then we do start next transaction (also SNAPSHOT; its name = 'tx1b') and obtain value of RDB$GET_CONTEXT('SYSTEM', 'SNAPSHOT_NUMBER').
|
||||
# Also, in this second 'tx1b' we add one more record into table with value = -1 using autonomous transaction --> BOTH records should be seen
|
||||
# Also, in this second 'tx1b' we add one more record into table with value = -1 using autonomous transaction --> BOTH records should be seen
|
||||
# in another attachment that will be started after this moment.
|
||||
# But if this (new) attachment will start Tx with requirement to use snapshot that was for Tx1a then it must see only FIRST record with value=-2.
|
||||
#
|
||||
#
|
||||
# We launch then ISQL for establish another transaction and make it perform two transactions:
|
||||
# 1) 'set transaction snapshot' --> must extract both records from test table
|
||||
# === vs ===
|
||||
# === vs ===
|
||||
# 2) 'set transaction snapshot at number %(snap_num)s' --> must extract only FIRST record with value = -2.
|
||||
#
|
||||
#
|
||||
# THis is done TWO times: when based snapshot is KNOWN (i.e. tx1b is alive) and after tx1b is committed and base is unknown.
|
||||
# Second ISQL launch must issue error:
|
||||
# Statement failed, SQLSTATE = 0B000
|
||||
# Transaction's base snapshot number does not exist
|
||||
#
|
||||
#
|
||||
# Checked on: 4.0.0.1457 (SS,CS) -- works fine.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-6018
|
||||
# min_versions: ['4.0']
|
||||
# versions: 4.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
from firebird.driver import tpb, Isolation
|
||||
|
||||
# version: 4.0
|
||||
# resources: None
|
||||
@ -36,9 +37,9 @@ from firebird.qa import db_factory, isql_act, Action
|
||||
substitutions_1 = []
|
||||
|
||||
init_script_1 = """
|
||||
recreate table tsn(sn int);
|
||||
recreate table tsn (sn int);
|
||||
commit;
|
||||
"""
|
||||
"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
@ -50,70 +51,71 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# import subprocess
|
||||
# from subprocess import Popen
|
||||
# from fdb import services
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# customTPB = ( [ fdb.isc_tpb_concurrency, fdb.isc_tpb_nowait ] )
|
||||
# con1 = fdb.connect( dsn = dsn )
|
||||
#
|
||||
#
|
||||
# tx1a=con1.trans( default_tpb = customTPB )
|
||||
#
|
||||
#
|
||||
# cur1=tx1a.cursor()
|
||||
# cur1.execute('insert into tsn(sn) values( -2 )' )
|
||||
# tx1a.commit()
|
||||
#
|
||||
#
|
||||
# sql_get_sn='''
|
||||
# execute block returns(o_sn bigint) as
|
||||
# begin
|
||||
# o_sn = RDB$GET_CONTEXT('SYSTEM', 'SNAPSHOT_NUMBER');
|
||||
# suspend;
|
||||
#
|
||||
#
|
||||
# in autonomous transaction do
|
||||
# insert into tsn(sn) values( -1 );
|
||||
# end
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# tx1b=con1.trans( default_tpb = customTPB )
|
||||
# cur1=tx1b.cursor()
|
||||
# cur1.execute( sql_get_sn )
|
||||
#
|
||||
#
|
||||
# snap_num = -1
|
||||
# for r in cur1:
|
||||
# snap_num = r[0]
|
||||
# # print( r[0] )
|
||||
#
|
||||
#
|
||||
# for m in ('yet exists', 'does not exists'):
|
||||
# sql_chk_sn='''
|
||||
# -- NB!! looks strange but it seems that this 'SET BAIL ON' does not work here because
|
||||
# -- both records will be extracted in any case. // todo later: check it!
|
||||
# --set bail on;
|
||||
# set count on;
|
||||
# commit;
|
||||
# set count on;
|
||||
# commit;
|
||||
# set transaction snapshot;
|
||||
# select 'Tx base snapshot: %(m)s' as msg, t.sn as set_tx_snapshot_without_num from tsn t order by sn;
|
||||
# commit;
|
||||
# set transaction snapshot at number %(snap_num)s;
|
||||
# select 'Tx base snapshot: %(m)s' as msg, t.sn as set_tx_snapshot_at_number_N from tsn t order by sn;
|
||||
# commit;
|
||||
# commit;
|
||||
# quit;
|
||||
# ''' % ( locals() )
|
||||
#
|
||||
#
|
||||
# #print(sql_chk_sn)
|
||||
#
|
||||
#
|
||||
# runProgram('isql', [ dsn, '-q' ], sql_chk_sn)
|
||||
# if tx1b:
|
||||
# tx1b.commit()
|
||||
#
|
||||
#
|
||||
# cur1.close()
|
||||
# con1.close()
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1_a = """
|
||||
MSG SET_TX_SNAPSHOT_WITHOUT_NUM
|
||||
============================ ===========================
|
||||
Tx base snapshot: yet exists -2
|
||||
@ -123,7 +125,9 @@ expected_stdout_1 = """
|
||||
============================ ===========================
|
||||
Tx base snapshot: yet exists -2
|
||||
Records affected: 1
|
||||
"""
|
||||
|
||||
expected_stdout_1_b = """
|
||||
MSG SET_TX_SNAPSHOT_WITHOUT_NUM
|
||||
================================= ===========================
|
||||
Tx base snapshot: does not exists -2
|
||||
@ -134,15 +138,57 @@ expected_stdout_1 = """
|
||||
Tx base snapshot: does not exists -2
|
||||
Tx base snapshot: does not exists -1
|
||||
Records affected: 2
|
||||
"""
|
||||
"""
|
||||
|
||||
expected_stderr_1 = """
|
||||
Statement failed, SQLSTATE = 0B000
|
||||
Transaction's base snapshot number does not exist
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=4.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
def test_1(act_1: Action):
|
||||
custom_tpb = tpb(isolation=Isolation.CONCURRENCY, lock_timeout=0)
|
||||
with act_1.db.connect() as con1:
|
||||
tx1a = con1.transaction_manager(custom_tpb)
|
||||
tx1a.begin()
|
||||
cur1 = tx1a.cursor()
|
||||
cur1.execute('insert into tsn (sn) values( -2 )')
|
||||
tx1a.commit()
|
||||
#
|
||||
sql_get_sn = """
|
||||
execute block returns(o_sn bigint) as
|
||||
begin
|
||||
o_sn = RDB$GET_CONTEXT('SYSTEM', 'SNAPSHOT_NUMBER');
|
||||
suspend;
|
||||
|
||||
in autonomous transaction do
|
||||
insert into tsn(sn) values( -1 );
|
||||
end
|
||||
"""
|
||||
tx1b = con1.transaction_manager(custom_tpb)
|
||||
cur1 = tx1b.cursor()
|
||||
snap_num = cur1.execute(sql_get_sn).fetchone()[0]
|
||||
#
|
||||
for msg, expect_out, expect_err in [('yet exists', expected_stdout_1_a, ''),
|
||||
('does not exists', expected_stdout_1_b, expected_stderr_1)]:
|
||||
sql_chk_sn = f"""
|
||||
-- NB!! looks strange but it seems that this 'SET BAIL ON' does not work here because
|
||||
-- both records will be extracted in any case. // todo later: check it!
|
||||
--set bail on;
|
||||
set count on;
|
||||
commit;
|
||||
set transaction snapshot;
|
||||
select 'Tx base snapshot: {msg}' as msg, t.sn as set_tx_snapshot_without_num from tsn t order by sn;
|
||||
commit;
|
||||
set transaction snapshot at number {snap_num};
|
||||
select 'Tx base snapshot: {msg}' as msg, t.sn as set_tx_snapshot_at_number_N from tsn t order by sn;
|
||||
commit;
|
||||
quit;
|
||||
"""
|
||||
act_1.reset()
|
||||
act_1.expected_stdout = expect_out
|
||||
act_1.expected_stderr = expect_err
|
||||
act_1.isql(switches=['-q'], input=sql_chk_sn)
|
||||
if tx1b.is_active():
|
||||
tx1b.commit()
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,7 +2,7 @@
|
||||
#
|
||||
# id: bugs.core_6023
|
||||
# title: FB4 unable to overwrite older ods database
|
||||
# decription:
|
||||
# decription:
|
||||
# Database (employee.fdb) with ODS 11.2 has been compressed to .zip and stored in {FBT_REPO}/files subdirectory.
|
||||
# Test unpacks this .fdb and:
|
||||
# 1) tries to print its header (using gstat -h) - it must fail with "Wrong ODS version, expected NN, encountered 11"
|
||||
@ -10,16 +10,16 @@
|
||||
# 2) makes attempt to replace this .fdb by following action:
|
||||
# <current_FB>/gbak -b <dsn> stdout | <current_FB>/gbak -rep stdin <fdb_with_ODS_11_2>
|
||||
# 3) tries to make connection to just restored DB and write result.
|
||||
#
|
||||
#
|
||||
# If replacement was successfull then connection *must* be estabished and it is enough to print SIGN(current_connection).
|
||||
# Outcome of this ("1") means that all completed OK.
|
||||
#
|
||||
#
|
||||
# Confirmed bug on 4.0.0.1803: attempt to restore fails with:
|
||||
# gbak: ERROR:unsupported on-disk structure for file ...; found 11.2, support 13.0
|
||||
# gbak: ERROR: IProvider::attachDatabase failed when loading mapping cache
|
||||
# gbak: ERROR:failed to create database localhost:...
|
||||
# gbak:Exiting before completion due to errors
|
||||
#
|
||||
#
|
||||
# ::: CAUTION :::
|
||||
# DO NOT try to run this test on any other FB build just after check build 4.0.0.1803!
|
||||
# One need to STOP instance 4.0.0.1803 before this or wait for at least 130 seconds,
|
||||
@ -29,16 +29,18 @@
|
||||
# See letter to Vlad et al 16.02.2021 09:02
|
||||
# ("Crash of 4.0.0.2365 when attempt to get server version just after doing the same on 4.0.0.1803")
|
||||
# :::::::::::::::
|
||||
#
|
||||
#
|
||||
# Checked on 4.0.0.2365 - all OK.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-6023
|
||||
# min_versions: ['4.0']
|
||||
# versions: 4.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
from firebird.qa import db_factory, python_act, Action, temp_file
|
||||
|
||||
# version: 4.0
|
||||
# resources: None
|
||||
@ -47,89 +49,112 @@ substitutions_1 = [('[ \t]+', ' '), ('expected [\\d]+', 'expected NN')]
|
||||
|
||||
init_script_1 = """"""
|
||||
|
||||
# Database is extracted from zip
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import sys
|
||||
# import zipfile
|
||||
# import time
|
||||
# import subprocess
|
||||
# from subprocess import PIPE
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# #-----------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close(file_handle):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# os.fsync(file_handle.fileno())
|
||||
#
|
||||
#
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
# if os.path.isfile( f_names_list[i]):
|
||||
# os.remove( f_names_list[i] )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
# #print(db_conn.firebird_version)
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# zf = zipfile.ZipFile( os.path.join(context['files_location'],'core_6023-ods-11_2-fdb.zip') )
|
||||
# tmp_fdb_to_replace = os.path.join( '$(DATABASE_LOCATION)', 'core_6023-ods-11_2.fdb' )
|
||||
#
|
||||
#
|
||||
# zf.extract( os.path.split(tmp_fdb_to_replace)[1], '$(DATABASE_LOCATION)')
|
||||
# zf.close()
|
||||
#
|
||||
#
|
||||
# # Ensure that we really have deal with .fdb file with old ODS.
|
||||
# # Must issue: "Wrong ODS version, expected 13, encountered 11":
|
||||
# runProgram('gstat',['-h', tmp_fdb_to_replace])
|
||||
#
|
||||
#
|
||||
# f_restore_with_replace=open( os.path.join(context['temp_directory'],'tmp_6023_rep.err'), 'w')
|
||||
#
|
||||
#
|
||||
# p_sender = subprocess.Popen( [ context['gbak_path'], '-b', dsn, 'stdout' ], stdout=PIPE)
|
||||
# p_getter = subprocess.Popen( [ context['gbak_path'], '-rep', 'stdin', 'localhost:' + os.path.join( '$(DATABASE_LOCATION)', tmp_fdb_to_replace ) ], stdin = p_sender.stdout, stdout = PIPE, stderr = f_restore_with_replace)
|
||||
# p_sender.stdout.close()
|
||||
# p_getter_stdout, p_getter_stderr = p_getter.communicate()
|
||||
#
|
||||
#
|
||||
# flush_and_close(f_restore_with_replace)
|
||||
#
|
||||
#
|
||||
# with open(f_restore_with_replace.name) as f:
|
||||
# for line in f:
|
||||
# print('UNEXPECTED STDERR: '+line)
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# runProgram( 'isql',['-q', 'localhost:' + os.path.join( '$(DATABASE_LOCATION)', tmp_fdb_to_replace )], 'set list on; select sign(current_connection) as restore_with_replace_result from rdb$database;' )
|
||||
#
|
||||
#
|
||||
# cleanup( (tmp_fdb_to_replace, f_restore_with_replace.name) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
RESTORE_WITH_REPLACE_RESULT 1
|
||||
"""
|
||||
"""
|
||||
|
||||
expected_stderr_1 = """
|
||||
Wrong ODS version, expected 13, encountered 11
|
||||
"""
|
||||
"""
|
||||
|
||||
fdb_112_file = temp_file('core_6023-ods-11_2.fdb')
|
||||
fbk_file = temp_file('core_6023.fbk')
|
||||
|
||||
@pytest.mark.version('>=4.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, fdb_112_file: Path, fbk_file: Path):
|
||||
zipped_fdb_file = zipfile.Path(act_1.vars['files'] / 'core_6023-ods-11_2-fdb.zip',
|
||||
at='core_6023-ods-11_2.fdb')
|
||||
fdb_112_file.write_bytes(zipped_fdb_file.read_bytes())
|
||||
# Change permissions
|
||||
fdb_112_file.chmod(16895)
|
||||
# Ensure that we really have deal with .fdb file with old ODS.
|
||||
act_1.expected_stderr = expected_stderr_1
|
||||
act_1.gstat(switches=['-h', str(fdb_112_file)], connect_db=False)
|
||||
assert act_1.clean_stderr == act_1.clean_expected_stderr
|
||||
# Backup work database and restore over extracted db
|
||||
act_1.reset()
|
||||
act_1.gbak(switches=['-b', act_1.db.dsn, str(fbk_file)])
|
||||
act_1.reset()
|
||||
act_1.gbak(switches=['-rep', str(fbk_file), f'localhost:{fdb_112_file}'])
|
||||
#
|
||||
act_1.reset()
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.isql(switches=['-q', f'localhost:{fdb_112_file}'], connect_db=False,
|
||||
input='set list on; select sign(current_connection) as restore_with_replace_result from rdb$database;')
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,14 +2,14 @@
|
||||
#
|
||||
# id: bugs.core_6028
|
||||
# title: Trigger on system table restored in FB3 database and can't be deleted
|
||||
# decription:
|
||||
# decription:
|
||||
# We restore here database that was created in FB 2.5.9 and contained triggers for tables RDB$RELATION_FIELDS, MON$STATEMENTS and MON$ATTACHMENTS.
|
||||
# Table RDB$RELATION_FIELDS had triggers BEFORE INSERT and AFTER INSERT. Monitoring tabled had triggers BEFORE DELETE and AFTER DELETE.
|
||||
# Also, table 'TLOG' is in this database, and this table serves as log for actions: create/drop table; delete from mon$statements and delete from mon$attachments.
|
||||
# For DDL like 'create table test(x int)' and 'drop table test' table TLOG will contain two records which are added there by triggers on RDB$RELATION_FIELDS.
|
||||
# Further, if we create addition connection and run statement which returns at least one record (like 'select ... from rdb$database') then in 2.5 two recors
|
||||
# had been added into TLOG for each of: 'DELETE FROM MON$STATEMENTS' and 'DELETE FROM MON$ATTACHMENTS'.
|
||||
#
|
||||
#
|
||||
# Finally, BEFORE fix of this ticket issue (e.g. in WI-V3.0.5.33109):
|
||||
# 1) restored database contained following triggers: TRG_MON_ATTACHMENTS*, TRG_MON_STATEMENTS* and TRG_RDB_REL_FIELDS*
|
||||
# 2) statements 'create table' and 'drop table' led to logging following records in TLOG:
|
||||
@ -17,14 +17,14 @@
|
||||
# rdb$relation_fields: record has been created
|
||||
# rdb$relation_fields: record is to be removed
|
||||
# rdb$relation_fields: record has been removed
|
||||
# 3) command 'delete from mon$statement' (when there was another non-system connection with one running or completed statement)
|
||||
# 3) command 'delete from mon$statement' (when there was another non-system connection with one running or completed statement)
|
||||
# led to logging these records in TLOG:
|
||||
# mon$statements: record is to be removed
|
||||
# mon$statements: record has been removed
|
||||
# 4) command 'delete from mon$attachments' (when there was another non-system connection) led to logging these records in TLOG:
|
||||
# mon$attachments: record is to be removed
|
||||
# mon$attachments: record has been removed
|
||||
#
|
||||
#
|
||||
# All of above mentioned should NOT appear in a database that is restored AFTER this ticket was fixed.
|
||||
# Finally, we try to create three new triggers for tables rdb$relation-fields, mon$statements and mon$attachments.
|
||||
# All of these attempts must FAIL with:
|
||||
@ -35,14 +35,17 @@
|
||||
# ========
|
||||
# Ticket issue confirmed on: 3.0.5.33109
|
||||
# Checked on 3.0.5.33115: OK, 3.721s.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-6028
|
||||
# min_versions: ['3.0.5']
|
||||
# versions: 3.0.5
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
from firebird.qa import db_factory, python_act, Action, temp_file, Database
|
||||
from firebird.driver import SrvRestoreFlag, DatabaseError
|
||||
|
||||
# version: 3.0.5
|
||||
# resources: None
|
||||
@ -52,39 +55,40 @@ substitutions_1 = []
|
||||
init_script_1 = """"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
db_1_tmp = db_factory(sql_dialect=3, filename='tmp_core_602.fdb', do_not_create=True)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import fdb
|
||||
# import time
|
||||
# import zipfile
|
||||
# import difflib
|
||||
# import subprocess
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -95,20 +99,20 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# zf = zipfile.ZipFile( os.path.join(context['files_location'],'core_6028_25.zip') )
|
||||
# tmpfbk = 'core_6028_25.fbk'
|
||||
# zf.extract( tmpfbk, '$(DATABASE_LOCATION)')
|
||||
# zf.close()
|
||||
#
|
||||
#
|
||||
# tmpfbk='$(DATABASE_LOCATION)'+tmpfbk
|
||||
# tmpfdb='$(DATABASE_LOCATION)'+'tmp_core_6028.fdb'
|
||||
#
|
||||
#
|
||||
# f_restore_log=open( os.path.join(context['temp_directory'],'tmp_core_6028_restore.log'), 'w')
|
||||
# subprocess.check_call([context['fbsvcmgr_path'],"localhost:service_mgr",
|
||||
# "action_restore",
|
||||
@ -117,48 +121,48 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# "res_replace",
|
||||
# "verbose"
|
||||
# ],
|
||||
# stdout=f_restore_log,
|
||||
# stdout=f_restore_log,
|
||||
# stderr=subprocess.STDOUT)
|
||||
# flush_and_close( f_restore_log )
|
||||
#
|
||||
#
|
||||
# # https://pythonhosted.org/fdb/reference.html#fdb.Cursor
|
||||
#
|
||||
#
|
||||
# con_worker=fdb.connect(dsn = 'localhost:'+tmpfdb)
|
||||
# con_worker_attachment_id = con_worker.attachment_id
|
||||
#
|
||||
#
|
||||
# con_worker.execute_immediate( 'create table test(id int)' )
|
||||
# con_worker.commit()
|
||||
# con_worker.execute_immediate( 'drop table test' )
|
||||
# con_worker.commit()
|
||||
#
|
||||
#
|
||||
# cur_worker=con_worker.cursor()
|
||||
# cur_worker.execute( "select coalesce(rt.rdb$trigger_name, 'NO USER-DEFINED TRIGGERS IN JUST RESTORED DATABASE.') from rdb$database rd left join rdb$triggers rt on rt.rdb$system_flag is distinct from 1 order by 1" )
|
||||
# for r in cur_worker:
|
||||
# print( r[0] )
|
||||
#
|
||||
#
|
||||
# con_killer=fdb.connect(dsn = 'localhost:'+tmpfdb)
|
||||
# cur_killer=con_killer.cursor()
|
||||
#
|
||||
#
|
||||
# cur_killer.execute( 'delete from mon$statements s where s.mon$attachment_id = %d' % con_worker_attachment_id )
|
||||
# con_killer.commit()
|
||||
#
|
||||
#
|
||||
# cur_killer.execute( 'delete from mon$attachments a where a.mon$attachment_id = %d' % con_worker_attachment_id )
|
||||
# con_killer.commit()
|
||||
#
|
||||
#
|
||||
# cur_killer.execute( "select coalesce(t.action, 'NO ACTIONS WAS LOGGED IN THE TABLE TLOG.') as sys_tabs_action from rdb$database rd left join tlog t on 1=1" )
|
||||
# for r in cur_killer:
|
||||
# print( r[0] )
|
||||
#
|
||||
#
|
||||
# try:
|
||||
# cur_worker.close()
|
||||
# con_worker.close()
|
||||
# except Exception,e:
|
||||
# pass
|
||||
#
|
||||
#
|
||||
# #-----------------------
|
||||
#
|
||||
#
|
||||
# ddl_probes=[]
|
||||
#
|
||||
#
|
||||
# ddl_probes.append(
|
||||
# '''
|
||||
# create or alter trigger new_trg_rdb_rel_flds_bi for rdb$relation_fields active before insert position 0 as
|
||||
@ -167,7 +171,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# end
|
||||
# '''
|
||||
# )
|
||||
#
|
||||
#
|
||||
# ddl_probes.append(
|
||||
# '''
|
||||
# create or alter trigger new_trg_mon_stm_bd for mon$statements active before delete position 0 as
|
||||
@ -176,7 +180,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# end
|
||||
# '''
|
||||
# )
|
||||
#
|
||||
#
|
||||
# ddl_probes.append(
|
||||
# '''
|
||||
# create or alter trigger new_trg_mon_att_bd for mon$attachments active before delete position 0 as
|
||||
@ -185,7 +189,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# end
|
||||
# '''
|
||||
# )
|
||||
#
|
||||
#
|
||||
# for s in ddl_probes:
|
||||
# try:
|
||||
# con_killer.execute_immediate( s )
|
||||
@ -193,39 +197,112 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# print( e[0].split('\\n')[-1] ) # Get last substring from error message: "- no permission for ALTER access to TABLE RDB$RELATION_FIELDS"
|
||||
# print( e[1] ) # SQLCODE: -607
|
||||
# print( e[2] ) # gdscode: 335544351L
|
||||
#
|
||||
#
|
||||
# #-----------------------
|
||||
#
|
||||
#
|
||||
# cur_killer.close()
|
||||
# con_killer.close()
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# # Cleanup:
|
||||
# ##########
|
||||
# time.sleep(1)
|
||||
# cleanup( (tmpfbk, tmpfdb, f_restore_log) )
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
NO USER-DEFINED TRIGGERS IN JUST RESTORED DATABASE.
|
||||
NO ACTIONS WAS LOGGED IN THE TABLE TLOG.
|
||||
unsuccessful metadata update
|
||||
-CREATE OR ALTER TRIGGER NEW_TRG_RDB_REL_FLDS_BI failed
|
||||
-no permission for ALTER access to TABLE RDB$RELATION_FIELDS
|
||||
-607
|
||||
(335544351, 336397272, 335544352)
|
||||
unsuccessful metadata update
|
||||
-CREATE OR ALTER TRIGGER NEW_TRG_MON_STM_BD failed
|
||||
-no permission for ALTER access to TABLE MON$STATEMENTS
|
||||
-607
|
||||
(335544351, 336397272, 335544352)
|
||||
unsuccessful metadata update
|
||||
-CREATE OR ALTER TRIGGER NEW_TRG_MON_ATT_BD failed
|
||||
-no permission for ALTER access to TABLE MON$ATTACHMENTS
|
||||
-607
|
||||
(335544351, 336397272, 335544352)
|
||||
"""
|
||||
|
||||
- no permission for ALTER access to TABLE RDB$RELATION_FIELDS
|
||||
-607
|
||||
335544351
|
||||
- no permission for ALTER access to TABLE MON$STATEMENTS
|
||||
-607
|
||||
335544351
|
||||
- no permission for ALTER access to TABLE MON$ATTACHMENTS
|
||||
-607
|
||||
335544351
|
||||
"""
|
||||
fbk_file_1 = temp_file('core_6028_25.fbk')
|
||||
|
||||
ddl_probes = ["""
|
||||
create or alter trigger new_trg_rdb_rel_flds_bi for rdb$relation_fields active before insert position 0 as
|
||||
begin
|
||||
insert into tlog(id, action) values( gen_id(g, 111), 'rdb$relation_fields: record is to be created' );
|
||||
end
|
||||
""", """
|
||||
create or alter trigger new_trg_mon_stm_bd for mon$statements active before delete position 0 as
|
||||
begin
|
||||
insert into tlog(id, action) values( gen_id(g, 222), 'mon$statements: record is to be removed' );
|
||||
end
|
||||
""", """
|
||||
create or alter trigger new_trg_mon_att_bd for mon$attachments active before delete position 0 as
|
||||
begin
|
||||
insert into tlog(id, action) values( gen_id(g, 333), 'mon$attachments: record is to be removed' );
|
||||
end
|
||||
"""]
|
||||
|
||||
@pytest.mark.version('>=3.0.5')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, fbk_file_1: Path, db_1_tmp: Database, capsys):
|
||||
zipped_fbk_file = zipfile.Path(act_1.vars['files'] / 'core_6028_25.zip',
|
||||
at='core_6028_25.fbk')
|
||||
fbk_file_1.write_bytes(zipped_fbk_file.read_bytes())
|
||||
#
|
||||
with act_1.connect_server() as srv:
|
||||
srv.database.restore(backup=fbk_file_1, database=db_1_tmp.db_path,
|
||||
flags=SrvRestoreFlag.REPLACE)
|
||||
srv.wait()
|
||||
#
|
||||
con_worker = db_1_tmp.connect()
|
||||
con_worker_attachment_id = con_worker.info.id
|
||||
con_worker.execute_immediate('create table test(id int)')
|
||||
con_worker.commit()
|
||||
con_worker.execute_immediate('drop table test')
|
||||
con_worker.commit()
|
||||
#
|
||||
cur_worker=con_worker.cursor()
|
||||
cur_worker.execute("select coalesce(rt.rdb$trigger_name, 'NO USER-DEFINED TRIGGERS IN JUST RESTORED DATABASE.') from rdb$database rd left join rdb$triggers rt on rt.rdb$system_flag is distinct from 1 order by 1")
|
||||
for r in cur_worker:
|
||||
print(r[0])
|
||||
#
|
||||
with db_1_tmp.connect() as con_killer:
|
||||
cur_killer = con_killer.cursor()
|
||||
cur_killer.execute(f'delete from mon$statements s where s.mon$attachment_id = {con_worker_attachment_id}')
|
||||
con_killer.commit()
|
||||
cur_killer.execute(f'delete from mon$attachments a where a.mon$attachment_id = {con_worker_attachment_id}')
|
||||
con_killer.commit()
|
||||
cur_killer.execute("select coalesce(t.action, 'NO ACTIONS WAS LOGGED IN THE TABLE TLOG.') as sys_tabs_action from rdb$database rd left join tlog t on 1=1")
|
||||
for r in cur_killer:
|
||||
print(r[0])
|
||||
#
|
||||
try:
|
||||
cur_worker.close()
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
con_worker.close()
|
||||
except Exception:
|
||||
pass
|
||||
#
|
||||
for cmd in ddl_probes:
|
||||
try:
|
||||
con_killer.execute_immediate(cmd)
|
||||
except DatabaseError as e:
|
||||
print(e)
|
||||
print(e.sqlcode)
|
||||
print(e.gds_codes)
|
||||
# Check
|
||||
act_1.reset()
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,64 +2,69 @@
|
||||
#
|
||||
# id: bugs.core_6040
|
||||
# title: Metadata script extracted using ISQL is invalid/incorrect when table has COMPUTED BY field
|
||||
# decription:
|
||||
# decription:
|
||||
# Confirmed bug on 3.0.5.33118, 4.0.0.1485
|
||||
# NB: 'collate' clause must present in DDL of computed_by field, otherwise extracted metadata script will be correct.
|
||||
#
|
||||
#
|
||||
# Checked on:
|
||||
# 4.0.0.1487: OK, 3.674s.
|
||||
# 3.0.5.33120: OK, 2.622s.
|
||||
#
|
||||
#
|
||||
# 15.04.2021. Adapted for run both on Windows and Linux. Checked on:
|
||||
# Windows: 4.0.0.2416
|
||||
# Linux: 4.0.0.2416
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-6040
|
||||
# min_versions: ['3.0.5']
|
||||
# versions: 3.0.5
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 3.0.5
|
||||
# resources: None
|
||||
|
||||
substitutions_1 = []
|
||||
|
||||
init_script_1 = """"""
|
||||
init_script_1 = """
|
||||
recreate table users (
|
||||
f01 varchar(32) character set win1252 not null collate win_ptbr
|
||||
,f02 computed by ( f01 collate win_ptbr )
|
||||
);
|
||||
"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1, charset='win1252')
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import time
|
||||
# import subprocess
|
||||
# import fdb
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -71,20 +76,20 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# print('type(f_names_list[i])=',type(f_names_list[i]))
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# tmpfdb='$(DATABASE_LOCATION)'+'tmp_core_6040.fdb'
|
||||
#
|
||||
#
|
||||
# cleanup( (tmpfdb,) )
|
||||
#
|
||||
#
|
||||
# con = fdb.create_database( dsn = 'localhost:'+tmpfdb, charset = 'win1252' )
|
||||
# con.close()
|
||||
# con=fdb.connect( dsn = 'localhost:'+tmpfdb )
|
||||
#
|
||||
#
|
||||
# sql_ddl='''
|
||||
# recreate table users (
|
||||
# f01 varchar(32) character set win1252 not null collate win_ptbr
|
||||
@ -93,58 +98,64 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# '''
|
||||
# con.execute_immediate(sql_ddl)
|
||||
# con.commit()
|
||||
#
|
||||
#
|
||||
# f_meta_sql = open( os.path.join(context['temp_directory'],'tmp_meta_6040.sql'), 'w')
|
||||
# f_meta_err = open( os.path.join(context['temp_directory'],'tmp_meta_6040.err'), 'w')
|
||||
#
|
||||
#
|
||||
# subprocess.call( [ context['isql_path'], "-x", "-ch", "win1252", 'localhost:'+tmpfdb],
|
||||
# stdout = f_meta_sql,
|
||||
# stderr = f_meta_err
|
||||
# )
|
||||
#
|
||||
#
|
||||
# flush_and_close( f_meta_sql )
|
||||
# flush_and_close( f_meta_err )
|
||||
#
|
||||
#
|
||||
# con.execute_immediate('drop table users')
|
||||
# con.commit()
|
||||
# con.close()
|
||||
#
|
||||
#
|
||||
# f_apply_log = open( os.path.join(context['temp_directory'],'tmp_apply_6040.log'), 'w')
|
||||
# f_apply_err = open( os.path.join(context['temp_directory'],'tmp_apply_6040.err'), 'w')
|
||||
#
|
||||
#
|
||||
# subprocess.call( [ context['isql_path'], "-ch", "win1252", 'localhost:'+tmpfdb, "-i", f_meta_sql.name ],
|
||||
# stdout = f_apply_log,
|
||||
# stderr = f_apply_err
|
||||
# )
|
||||
#
|
||||
#
|
||||
# flush_and_close( f_apply_log )
|
||||
# flush_and_close( f_apply_err )
|
||||
#
|
||||
#
|
||||
# with open( f_meta_err.name,'r') as f:
|
||||
# for line in f:
|
||||
# print("METADATA EXTRACTION PROBLEM, STDERR: "+line)
|
||||
#
|
||||
#
|
||||
# with open( f_apply_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# print("METADATA APPLYING PROBLEM, STDOUT: "+line)
|
||||
#
|
||||
#
|
||||
# with open( f_apply_err.name,'r') as f:
|
||||
# for line in f:
|
||||
# print("METADATA APPLYING PROBLEM, STDERR: "+line)
|
||||
#
|
||||
#
|
||||
# # cleanup:
|
||||
# ##########
|
||||
# time.sleep(1)
|
||||
# cleanup( (f_meta_sql, f_meta_err, f_apply_log, f_apply_err, tmpfdb, ) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
|
||||
@pytest.mark.version('>=3.0.5')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action):
|
||||
act_1.isql(switches=['-x'], charset='win1252')
|
||||
meta = act_1.stdout
|
||||
#
|
||||
with act_1.db.connect() as con:
|
||||
con.execute_immediate('drop table users')
|
||||
con.commit()
|
||||
#
|
||||
act_1.reset()
|
||||
act_1.isql(switches=[], charset='win1252', input=meta)
|
||||
|
@ -2,10 +2,10 @@
|
||||
#
|
||||
# id: bugs.core_6043
|
||||
# title: GTTs do not release used space
|
||||
# decription:
|
||||
# decription:
|
||||
# === For FB 3.x ===
|
||||
# Test obtains full path to $fb_home via FBSVCMGR info_get_env.
|
||||
# Then it makes copy of file 'databases.conf' that is in $fb_home directory because
|
||||
# Then it makes copy of file 'databases.conf' that is in $fb_home directory because
|
||||
# following lines will be added to that 'databases.conf':
|
||||
# ===
|
||||
# tmp_6043_keep = ...
|
||||
@ -16,35 +16,35 @@
|
||||
# We check that:
|
||||
# * COMMIT RETAIN preserves record that was inserted in the statement before this commit;
|
||||
# * ROLLBACK RETAIN does NOT delete record that was inserted before COMMIT RETAIN.
|
||||
#
|
||||
#
|
||||
# Then we check the same for ClearGTTAtRetaining = 1 (i.e. for default value) - just to ensure that it works.
|
||||
# Finally, previous databases.conf file is restored in initial state.
|
||||
#
|
||||
#
|
||||
# === For FB 4.x ===
|
||||
# It is enough just to run ISQL; databases.conf can be left unchanged.
|
||||
#
|
||||
#
|
||||
# 13.12.2019.
|
||||
# It seems that we have to DISABLE BUFFERING in any IO operation which relates to preparing scripts, configs or logs.
|
||||
# Otherwise sporadic runtime errors can occur: I/O error during "CreateFile (open)" operation for file "..."
|
||||
#
|
||||
#
|
||||
# Explanation:
|
||||
# https://docs.python.org/2/library/functions.html#open
|
||||
# https://stackoverflow.com/questions/18984092/python-2-7-write-to-file-instantly/41506739
|
||||
#
|
||||
#
|
||||
# Checked on:
|
||||
# 4.0.0.1687 SS: 1.536s.
|
||||
# 4.0.0.1685 CS: 2.026s.
|
||||
# 3.0.5.33207 SS: 1.435s.
|
||||
# 3.0.5.33152 SC: 1.243s.
|
||||
# 3.0.5.33206 CS: 2.626s.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-6043
|
||||
# min_versions: ['3.0']
|
||||
# versions: 3.0, 4.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, isql_act, python_act, Action
|
||||
|
||||
# version: 3.0
|
||||
# resources: None
|
||||
@ -62,34 +62,34 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# import time
|
||||
# import shutil
|
||||
# from fdb import services
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# this_db = db_conn.database_name
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# svc = services.connect(host='localhost', user= user_name, password= user_password)
|
||||
# fb_home = svc.get_home_directory()
|
||||
# svc.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb'):
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -100,59 +100,59 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# tmp_fdb_keep=os.path.join(context['temp_directory'],'tmp_6043.keep_GTT_data.fdb')
|
||||
# tmp_fdb_kill=os.path.join(context['temp_directory'],'tmp_6043.kill_GTT_data.fdb')
|
||||
#
|
||||
#
|
||||
# shutil.copy2( this_db, tmp_fdb_keep )
|
||||
# shutil.copy2( this_db, tmp_fdb_kill )
|
||||
#
|
||||
#
|
||||
# dbconf = os.path.join( fb_home, 'databases.conf')
|
||||
# dbcbak = os.path.join( fb_home, 'databases.bak')
|
||||
#
|
||||
#
|
||||
# # Resut: fb_home is full path to FB instance home (with trailing slash).
|
||||
# shutil.copy2( dbconf, dbcbak )
|
||||
#
|
||||
#
|
||||
# # ----------------------------------------------------------------------
|
||||
#
|
||||
#
|
||||
# isql_script='''
|
||||
# set list on;
|
||||
#
|
||||
#
|
||||
# recreate global temporary table gtt (id int) on commit delete rows;
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# set count off;
|
||||
# insert into gtt values (3);
|
||||
# commit retain;
|
||||
#
|
||||
#
|
||||
# set count on;
|
||||
# select * from gtt; -- point 1
|
||||
#
|
||||
#
|
||||
# set count off;
|
||||
# insert into gtt values (4);
|
||||
# rollback retain;
|
||||
#
|
||||
#
|
||||
# set count on;
|
||||
# select * from gtt; -- point 2
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# f_isql_cmd=open( os.path.join(context['temp_directory'],'tmp_isql_6043.sql'), 'w')
|
||||
# f_isql_cmd.write( isql_script )
|
||||
# flush_and_close( f_isql_cmd )
|
||||
#
|
||||
#
|
||||
# # --------------------------------------------------------------------
|
||||
#
|
||||
#
|
||||
# f_dbconf=open( dbconf,'a')
|
||||
# f_dbconf.seek(0, 2)
|
||||
#
|
||||
#
|
||||
# alias_data='''
|
||||
#
|
||||
#
|
||||
# # Created temply by fbtest, CORE-6043. Should be removed auto.
|
||||
# # WARNING! DO NOT ADD YET ANOTHER ALIAS FOR THE SAME DATABASE!
|
||||
# # Attempt to connect to any of these aliases will fail with message:
|
||||
@ -162,7 +162,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# # =======
|
||||
# # Server log will contain:
|
||||
# # File databases.conf contains bad data: Duplicated configuration for database <file>
|
||||
#
|
||||
#
|
||||
# tmp_6043_keep = %(tmp_fdb_keep)s
|
||||
# {
|
||||
# # Value of 0 makes engine to not clear GTT data on COMMIT/ROLLBACK RETAINING and let application to see it.
|
||||
@ -171,57 +171,58 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# # be removed at Firebird 5.
|
||||
# ClearGTTAtRetaining = 0
|
||||
# }
|
||||
#
|
||||
#
|
||||
# tmp_6043_kill = %(tmp_fdb_kill)s
|
||||
# {
|
||||
# # Check that 1 really works as default value, i.e. clears GTT data on commit/rollback retaining.
|
||||
# ClearGTTAtRetaining = 1
|
||||
# }
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# ''' % locals()
|
||||
#
|
||||
#
|
||||
# f_dbconf.write(alias_data)
|
||||
# flush_and_close( f_dbconf )
|
||||
#
|
||||
#
|
||||
# # 4debug: shutil.copy2( fb_home+'databases.conf', fb_home+'databases.conf.check_it' )
|
||||
#
|
||||
#
|
||||
# # NB: buffering = 0 - we want this file be immediately on disk after closing in order to avoid excessive waiting for it
|
||||
# ###################
|
||||
# f_isql_keep_log=open( os.path.join(context['temp_directory'],'tmp_6043.keep_GTT_data.log'), 'w')
|
||||
# subprocess.call([ context['isql_path'], 'localhost:tmp_6043_keep', "-q", "-i", f_isql_cmd.name], stdout=f_isql_keep_log, stderr=subprocess.STDOUT)
|
||||
# flush_and_close( f_isql_keep_log )
|
||||
#
|
||||
#
|
||||
# with open( f_isql_keep_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# if line.split():
|
||||
# print( 'When ClearGTTAtRetaining = 0: ' + line )
|
||||
#
|
||||
#
|
||||
# ####################################################################
|
||||
#
|
||||
#
|
||||
# # NB: buffering = 0 - we want this file be immediately on disk after closing in order to avoid excessive waiting for it
|
||||
# f_isql_kill_log=open( os.path.join(context['temp_directory'],'tmp_6043.kill_GTT_data.log'), 'w')
|
||||
# subprocess.call([context['isql_path'], 'localhost:tmp_6043_kill', "-q", "-i", f_isql_cmd.name], stdout=f_isql_kill_log, stderr=subprocess.STDOUT)
|
||||
# flush_and_close( f_isql_kill_log )
|
||||
#
|
||||
#
|
||||
# with open( f_isql_kill_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# if line.split():
|
||||
# print( 'When ClearGTTAtRetaining = 1: ' + line )
|
||||
#
|
||||
#
|
||||
# ####################################################################
|
||||
#
|
||||
#
|
||||
# # Restore previous content:
|
||||
# shutil.move( dbcbak, dbconf )
|
||||
#
|
||||
#
|
||||
# #####################################################################
|
||||
# # Cleanup:
|
||||
# time.sleep(1)
|
||||
# cleanup( ( f_isql_keep_log, f_isql_kill_log, f_isql_cmd, tmp_fdb_keep, tmp_fdb_kill ) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
When ClearGTTAtRetaining = 0: ID 3
|
||||
@ -233,10 +234,9 @@ expected_stdout_1 = """
|
||||
When ClearGTTAtRetaining = 1: Records affected: 0
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
@pytest.mark.version('>=3.0,<4')
|
||||
def test_1(act_1: Action):
|
||||
pytest.skip("Requires changes to databases.conf")
|
||||
|
||||
|
||||
# version: 4.0
|
||||
|
@ -2,54 +2,54 @@
|
||||
#
|
||||
# id: bugs.core_6048
|
||||
# title: Provide ability to see current state of DB encryption
|
||||
# decription:
|
||||
# decription:
|
||||
# Test database that is created by fbtest framework will be encrypted here using IBSurgeon Demo Encryption package
|
||||
# ( https://ib-aid.com/download-demo-firebird-encryption-plugin/ ; https://ib-aid.com/download/crypt/CryptTest.zip )
|
||||
# License file plugins\\dbcrypt.conf with unlimited expiration was provided by IBSurgeon to Firebird Foundation (FF).
|
||||
# This file was preliminary stored in FF Test machine.
|
||||
# Test assumes that this file and all neccessary libraries already were stored into FB_HOME and %FB_HOME%\\plugins.
|
||||
#
|
||||
#
|
||||
# Anyone who wants to run this test on his own machine must
|
||||
# 1) download https://ib-aid.com/download/crypt/CryptTest.zip AND
|
||||
# 1) download https://ib-aid.com/download/crypt/CryptTest.zip AND
|
||||
# 2) PURCHASE LICENSE and get from IBSurgeon file plugins\\dbcrypt.conf with apropriate expiration date and other info.
|
||||
#
|
||||
#
|
||||
# ################################################ ! ! ! N O T E ! ! ! ##############################################
|
||||
# FF tests storage (aka "fbt-repo") does not (and will not) contain any license file for IBSurgeon Demo Encryption package!
|
||||
# #########################################################################################################################
|
||||
#
|
||||
#
|
||||
# Checked on:
|
||||
# 4.0.0.1575: OK, 3.024s.
|
||||
#
|
||||
#
|
||||
# === NOTE-1 ===
|
||||
# In case of "Crypt plugin DBCRYPT failed to load/607/335544351" check that all
|
||||
# In case of "Crypt plugin DBCRYPT failed to load/607/335544351" check that all
|
||||
# needed files from IBSurgeon Demo Encryption package exist in %FB_HOME% and %FB_HOME%\\plugins
|
||||
# %FB_HOME%:
|
||||
# 283136 fbcrypt.dll
|
||||
# 2905600 libcrypto-1_1-x64.dll
|
||||
# 481792 libssl-1_1-x64.dll
|
||||
#
|
||||
#
|
||||
# %FB_HOME%\\plugins:
|
||||
# 297984 dbcrypt.dll
|
||||
# 306176 keyholder.dll
|
||||
# 108 DbCrypt.conf
|
||||
# 856 keyholder.conf
|
||||
#
|
||||
#
|
||||
# === NOTE-2 ===
|
||||
# Version of DbCrypt.dll of october-2018 must be replaced because it has hard-coded
|
||||
# Version of DbCrypt.dll of october-2018 must be replaced because it has hard-coded
|
||||
# date of expiration rather than reading it from DbCrypt.conf !!
|
||||
#
|
||||
#
|
||||
# === NOTE-3 ===
|
||||
# firebird.conf must contain following line:
|
||||
# KeyHolderPlugin = KeyHolder
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-6048
|
||||
# min_versions: ['4.0']
|
||||
# versions: 4.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 4.0
|
||||
# resources: None
|
||||
@ -62,14 +62,14 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import time
|
||||
# import subprocess
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# # 27.02.2021.
|
||||
# # Name of encryption plugin depends on OS:
|
||||
# # * for Windows we (currently) use plugin by IBSurgeon, its name is 'dbcrypt';
|
||||
@ -83,27 +83,27 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# # ** 'fbSampleDbCrypt' for FB 4.x+
|
||||
# #
|
||||
# PLUGIN_NAME = 'dbcrypt' if os.name == 'nt' else ( '"DbCrypt_example"' if db_conn.engine_version < 4 else '"fbSampleDbCrypt"' )
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -114,14 +114,14 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# sql_scrypt='''
|
||||
# set list on;
|
||||
# recreate table test(x bigint unique);
|
||||
@ -137,7 +137,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# -- #########################################################
|
||||
# in autonomous transaction do
|
||||
# insert into test(x) values(:r); -- this will cause delay because of duplicate in index
|
||||
# when any do
|
||||
# when any do
|
||||
# begin
|
||||
# -- nop --
|
||||
# end
|
||||
@ -146,7 +146,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# ^
|
||||
# set term ;^
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# alter database encrypt with %(PLUGIN_NAME)s key Red;
|
||||
# commit;
|
||||
# set transaction lock timeout 2; -- THIS LOCK TIMEOUT SERVES ONLY FOR DELAY
|
||||
@ -154,7 +154,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# rollback;
|
||||
# select mon$crypt_state as "Is database encrypted ?" from mon$database;
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# alter database decrypt;
|
||||
# commit;
|
||||
# set transaction lock timeout 2; -- THIS LOCK TIMEOUT SERVES ONLY FOR DELAY
|
||||
@ -162,40 +162,40 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# rollback;
|
||||
# select mon$crypt_state as "Is database encrypted ?" from mon$database;
|
||||
# ''' % locals()
|
||||
#
|
||||
#
|
||||
# f_sql_cmd = open(os.path.join(context['temp_directory'],'tmp_core_6048.sql'), 'w')
|
||||
# f_sql_cmd.write(sql_scrypt)
|
||||
# flush_and_close( f_sql_cmd )
|
||||
#
|
||||
#
|
||||
# f_sql_log = open( os.path.join(context['temp_directory'],'tmp_core_6048.log'), 'w')
|
||||
#
|
||||
#
|
||||
# subprocess.call( [ context['isql_path'], dsn, "-n", "-q", "-i", f_sql_cmd.name ],
|
||||
# stdout = f_sql_log,
|
||||
# stderr = subprocess.STDOUT
|
||||
# )
|
||||
# flush_and_close( f_sql_log )
|
||||
#
|
||||
#
|
||||
# with open(f_sql_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# print(line)
|
||||
#
|
||||
#
|
||||
# # cleanup:
|
||||
# ##########
|
||||
# time.sleep(1)
|
||||
# cleanup( (f_sql_cmd,f_sql_log) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Is database encrypted ? 1
|
||||
Is database encrypted ? 0
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=4.0')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
def test_1(act_1: Action):
|
||||
pytest.skip("Requires encryption plugin")
|
||||
|
||||
|
||||
|
@ -2,13 +2,13 @@
|
||||
#
|
||||
# id: bugs.core_6078
|
||||
# title: Permissions for create or alter statements are not checked
|
||||
# decription:
|
||||
# decription:
|
||||
# The problem occured for all kind of DB objects which allow 'create OR ALTER' statement to be applied against themselves.
|
||||
# Test creates non-privileged user and checks for all such objects that this user can NOT create any object because missing
|
||||
# privilege to do this.
|
||||
# Confirmed bug on 3.0.5.33140 and 4.0.0.1532.
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# Refactored 20.01.2020:
|
||||
# 1. Changed code in order to make its code more flexible because 3.0 and 4.0 have significant differences in stdout/stderr.
|
||||
# Common SQL code was stored in fbt-repo
|
||||
@ -17,18 +17,23 @@
|
||||
# sql_text % dict(globals(), **locals()
|
||||
# Then we save this variable to temporarily .sql script and run it.
|
||||
# 2. Added check for other kinds of DB objects: users, database, domain, table, column of table, charset and local/global mappings.
|
||||
#
|
||||
#
|
||||
# Checked on:
|
||||
# 4.0.0.1748 SS: 2.346s.
|
||||
# 3.0.6.33236 SS: 1.898s.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-6078
|
||||
# min_versions: ['3.0.5']
|
||||
# versions: 3.0.5, 4.0
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from pathlib import Path
|
||||
from firebird.qa import db_factory, python_act, Action, user_factory, User
|
||||
|
||||
user_0 = user_factory(name='tmp$c6078_0', password='123')
|
||||
user_1 = user_factory(name='tmp$c6078_1', password='123')
|
||||
user_2 = user_factory(name='tmp$c6078_2', password='456')
|
||||
|
||||
# version: 3.0.5
|
||||
# resources: None
|
||||
@ -41,36 +46,36 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import sys
|
||||
# import time
|
||||
# import subprocess
|
||||
# from fdb import services
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# this_db = db_conn.database_name
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -81,37 +86,38 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# f_sql=open(os.path.join(context['files_location'],'core_6078.sql'),'r')
|
||||
# sql_text = f_sql.read()
|
||||
# flush_and_close( f_sql )
|
||||
#
|
||||
#
|
||||
# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_core_6078.sql'), 'w', buffering = 0)
|
||||
# f_sql_chk.write( sql_text % dict(globals(), **locals()) )
|
||||
# flush_and_close( f_sql_chk )
|
||||
#
|
||||
#
|
||||
# f_sql_log = open( '.'.join( (os.path.splitext( f_sql_chk.name )[0], 'log') ), 'w', buffering = 0)
|
||||
# subprocess.call( [ context['isql_path'], '-q', '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = subprocess.STDOUT)
|
||||
# flush_and_close( f_sql_log )
|
||||
#
|
||||
#
|
||||
# with open( f_sql_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# if line.strip():
|
||||
# print( line )
|
||||
#
|
||||
#
|
||||
# # cleanup
|
||||
# #########
|
||||
# time.sleep(1)
|
||||
# cleanup( (f_sql_log, f_sql_chk) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Statement failed, SQLSTATE = 28000
|
||||
@ -121,7 +127,7 @@ expected_stdout_1 = """
|
||||
Statement failed, SQLSTATE = 28000
|
||||
unsuccessful metadata update
|
||||
-ALTER DATABASE failed
|
||||
-no permission for ALTER access to DATABASE
|
||||
-no permission for ALTER access to DATABASE
|
||||
|
||||
Statement failed, SQLSTATE = 28000
|
||||
unsuccessful metadata update
|
||||
@ -156,15 +162,15 @@ expected_stdout_1 = """
|
||||
Statement failed, SQLSTATE = 28000
|
||||
unsuccessful metadata update
|
||||
-CREATE OR ALTER TRIGGER TRG$START failed
|
||||
-no permission for ALTER access to DATABASE
|
||||
-no permission for ALTER access to DATABASE
|
||||
|
||||
Statement failed, SQLSTATE = 28000
|
||||
unsuccessful metadata update
|
||||
-CREATE OR ALTER TRIGGER TRIG_DDL_SP failed
|
||||
-no permission for ALTER access to DATABASE
|
||||
-no permission for ALTER access to DATABASE
|
||||
|
||||
|
||||
ALTERED_TRIGGER_NAME TEST_BI
|
||||
ALTERED_TRIGGER_NAME TEST_BI
|
||||
ALTERED_TRIGGER_SOURCE c:3d0
|
||||
as
|
||||
begin
|
||||
@ -262,12 +268,18 @@ expected_stdout_1 = """
|
||||
unsuccessful metadata update
|
||||
-ALTER MAPPING GLOBAL_MAP_C6078 failed
|
||||
-Unable to perform operation. You must be either SYSDBA or owner of the database
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0.5')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
@pytest.mark.version('>=3.0.5,<4')
|
||||
def test_1(act_1: Action, user_0: User, user_1: User, user_2: User):
|
||||
script_vars = {'dsn': act_1.db.dsn,
|
||||
'user_name': act_1.db.user,
|
||||
'user_password': act_1.db.password,}
|
||||
script_file = act_1.vars['files'] / 'core_6078.sql'
|
||||
script = script_file.read_text() % script_vars
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.isql(switches=['-q'], input=script, combine_output=True)
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
||||
|
||||
# version: 4.0
|
||||
@ -281,36 +293,36 @@ db_2 = db_factory(sql_dialect=3, init=init_script_2)
|
||||
|
||||
# test_script_2
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import sys
|
||||
# import time
|
||||
# import subprocess
|
||||
# from fdb import services
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# this_db = db_conn.database_name
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -321,39 +333,40 @@ db_2 = db_factory(sql_dialect=3, init=init_script_2)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# f_sql=open(os.path.join(context['files_location'],'core_6078.sql'),'r')
|
||||
# sql_text = f_sql.read()
|
||||
# flush_and_close( f_sql )
|
||||
#
|
||||
#
|
||||
# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_core_6078.sql'), 'w', buffering = 0)
|
||||
# f_sql_chk.write( sql_text % dict(globals(), **locals()) )
|
||||
# flush_and_close( f_sql_chk )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# f_sql_log = open( '.'.join( (os.path.splitext( f_sql_chk.name )[0], 'log') ), 'w', buffering = 0)
|
||||
# subprocess.call( [ context['isql_path'], '-q', '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = subprocess.STDOUT)
|
||||
# flush_and_close( f_sql_log )
|
||||
#
|
||||
#
|
||||
# with open( f_sql_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# if line.strip():
|
||||
# print( line )
|
||||
#
|
||||
#
|
||||
# # cleanup:
|
||||
# ##########
|
||||
# time.sleep(1)
|
||||
# cleanup( (f_sql_log, f_sql_chk) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_2 = python_act('db_2', test_script_2, substitutions=substitutions_2)
|
||||
|
||||
act_2 = python_act('db_2', substitutions=substitutions_2)
|
||||
|
||||
expected_stdout_2 = """
|
||||
Statement failed, SQLSTATE = 28000
|
||||
@ -364,7 +377,7 @@ expected_stdout_2 = """
|
||||
Statement failed, SQLSTATE = 28000
|
||||
unsuccessful metadata update
|
||||
-ALTER DATABASE failed
|
||||
-no permission for ALTER access to DATABASE
|
||||
-no permission for ALTER access to DATABASE
|
||||
|
||||
Statement failed, SQLSTATE = 28000
|
||||
unsuccessful metadata update
|
||||
@ -405,15 +418,15 @@ expected_stdout_2 = """
|
||||
Statement failed, SQLSTATE = 28000
|
||||
unsuccessful metadata update
|
||||
-CREATE OR ALTER TRIGGER TRG$START failed
|
||||
-no permission for ALTER access to DATABASE
|
||||
-no permission for ALTER access to DATABASE
|
||||
|
||||
Statement failed, SQLSTATE = 28000
|
||||
unsuccessful metadata update
|
||||
-CREATE OR ALTER TRIGGER TRIG_DDL_SP failed
|
||||
-no permission for ALTER access to DATABASE
|
||||
-no permission for ALTER access to DATABASE
|
||||
|
||||
|
||||
ALTERED_TRIGGER_NAME TEST_BI
|
||||
ALTERED_TRIGGER_NAME TEST_BI
|
||||
ALTERED_TRIGGER_SOURCE c:3cc
|
||||
as
|
||||
begin
|
||||
@ -508,11 +521,16 @@ expected_stdout_2 = """
|
||||
-ALTER MAPPING GLOBAL_MAP_C6078 failed
|
||||
-Unable to perform operation
|
||||
-System privilege CHANGE_MAPPING_RULES is missing
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=4.0')
|
||||
@pytest.mark.xfail
|
||||
def test_2(db_2):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
def test_2(act_2: Action, user_0: User, user_1: User, user_2: User):
|
||||
script_vars = {'dsn': act_2.db.dsn,
|
||||
'user_name': act_2.db.user,
|
||||
'user_password': act_2.db.password,}
|
||||
script_file = act_2.vars['files'] / 'core_6078.sql'
|
||||
script = script_file.read_text() % script_vars
|
||||
act_2.expected_stdout = expected_stdout_2
|
||||
act_2.isql(switches=['-q'], input=script, combine_output=True)
|
||||
assert act_2.clean_stdout == act_2.clean_expected_stdout
|
||||
|
||||
|
@ -2,25 +2,26 @@
|
||||
#
|
||||
# id: bugs.core_6089
|
||||
# title: BLOBs are unnecessarily copied during UPDATE after a table format change
|
||||
# decription:
|
||||
# decription:
|
||||
# It's not easy to obtain BLOB_ID using only fdb. Rather in ISQL blob_id will be shown always (even if we do not want this :)).
|
||||
# This test runs ISQL with commands that were provided in the ticket and parses its result by extracting only column BLOB_ID.
|
||||
# Each BLOB_ID is added to set(), so eventually we can get total number of UNIQUE blob IDs that were generated during test.
|
||||
# This number must be equal to number of records in the table (three in this test).
|
||||
#
|
||||
#
|
||||
# Confirmed bug on: 4.0.0.1535; 3.0.5.33142.
|
||||
# Works fine on:
|
||||
# 4.0.0.1556: OK, 3.384s.
|
||||
# 3.0.5.33152: OK, 2.617s.
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-6089
|
||||
# min_versions: ['3.0.5']
|
||||
# versions: 3.0.5
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
import re
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 3.0.5
|
||||
# resources: None
|
||||
@ -33,34 +34,34 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import re
|
||||
# import subprocess
|
||||
# import time
|
||||
# import fdb
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -71,14 +72,14 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# allowed_patterns = ( re.compile('COL2_BLOB_ID\\s+\\S+', re.IGNORECASE), )
|
||||
#
|
||||
#
|
||||
# sql_txt='''
|
||||
# set bail on;
|
||||
# set list on;
|
||||
@ -86,17 +87,17 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# recreate table t (col1 int, col2 blob);
|
||||
# recreate view v as select col2 as col2_blob_id from t; -- NB: alias for column have to be matched to re.compile() argument
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# insert into t values (1, '1');
|
||||
# insert into t values (2, '2');
|
||||
# insert into t values (3, '3');
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# select v.* from v;
|
||||
# update t set col1 = -col1;
|
||||
# select v.* from v;
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# rollback;
|
||||
# alter table t add col3 date;
|
||||
# select v.* from v;
|
||||
@ -104,39 +105,66 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# select v.* from v; -- bug was here
|
||||
# quit;
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# f_isql_cmd=open( os.path.join(context['temp_directory'],'tmp_6089.sql'), 'w')
|
||||
# f_isql_cmd.write( sql_txt )
|
||||
# flush_and_close( f_isql_cmd )
|
||||
#
|
||||
#
|
||||
# f_isql_log=open( os.path.join(context['temp_directory'],'tmp_6089.log'), 'w')
|
||||
#
|
||||
#
|
||||
# subprocess.call([context['isql_path'], dsn, "-q", "-i", f_isql_cmd.name], stdout=f_isql_log, stderr=subprocess.STDOUT)
|
||||
# flush_and_close( f_isql_log )
|
||||
#
|
||||
#
|
||||
# blob_id_set=set()
|
||||
# with open( f_isql_log.name,'r') as f:
|
||||
# for line in f:
|
||||
# match2some = filter( None, [ p.search(line) for p in allowed_patterns ] )
|
||||
# if match2some:
|
||||
# blob_id_set.add( line.split()[1] )
|
||||
#
|
||||
#
|
||||
# print( 'Number of unique blob IDs: ' + str(len(blob_id_set)) )
|
||||
#
|
||||
#
|
||||
# # Cleanup.
|
||||
# ##########
|
||||
# cleanup( (f_isql_cmd, f_isql_log) )
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Number of unique blob IDs: 3
|
||||
"""
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
test_script_1 = """
|
||||
set bail on;
|
||||
set list on;
|
||||
set blob off;
|
||||
recreate table t (col1 int, col2 blob);
|
||||
recreate view v as select col2 as col2_blob_id from t; -- NB: alias for column have to be matched to re.compile() argument
|
||||
commit;
|
||||
|
||||
insert into t values (1, '1');
|
||||
insert into t values (2, '2');
|
||||
insert into t values (3, '3');
|
||||
commit;
|
||||
|
||||
select v.* from v;
|
||||
update t set col1 = -col1;
|
||||
select v.* from v;
|
||||
|
||||
|
||||
rollback;
|
||||
alter table t add col3 date;
|
||||
select v.* from v;
|
||||
update t set col1 = -col1;
|
||||
select v.* from v; -- bug was here
|
||||
quit;
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0.5')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action):
|
||||
pattern = re.compile('COL2_BLOB_ID\\s+\\S+', re.IGNORECASE)
|
||||
blob_id_set = set()
|
||||
act_1.isql(switches=['-q'], input=test_script_1)
|
||||
for line in act_1.stdout.splitlines():
|
||||
if pattern.search(line):
|
||||
blob_id_set.add(line.split()[1])
|
||||
# Check
|
||||
assert len(blob_id_set) == 3
|
||||
|
@ -2,25 +2,26 @@
|
||||
#
|
||||
# id: bugs.core_6090
|
||||
# title: BLOB fields may be suddenly set to NULLs during UPDATE after a table format change
|
||||
# decription:
|
||||
# decription:
|
||||
# It's not easy to obtain BLOB_ID using only fdb. Rather in ISQL blob_id will be shown always (even if we do not want this :)).
|
||||
# This test runs ISQL with commands that were provided in the ticket and parses its result by extracting only column BLOB_ID.
|
||||
# Each BLOB_ID is added to set(), so eventually we can get total number of UNIQUE blob IDs that were generated during test.
|
||||
# This number must be equal to number of records in the table (three in this test).
|
||||
# Beside of this, we check that all blobs are not null, see 'null_blob_cnt' counter.
|
||||
#
|
||||
#
|
||||
# Confirmed bug on: 4.0.0.1535; 3.0.5.33142.
|
||||
# Works fine on:
|
||||
# 4.0.0.1556: OK, 3.342s.
|
||||
# 3.0.5.33152: OK, 2.652s.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-6090
|
||||
# min_versions: ['3.0.5']
|
||||
# versions: 3.0.5
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
import re
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 3.0.5
|
||||
# resources: None
|
||||
@ -33,36 +34,36 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import re
|
||||
# import subprocess
|
||||
# import time
|
||||
# import fdb
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -73,63 +74,63 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# allowed_patterns = ( re.compile('BLOB_ID\\s+\\S+', re.IGNORECASE), )
|
||||
#
|
||||
#
|
||||
# sql_txt='''
|
||||
# set bail on;
|
||||
# set blob all;
|
||||
# set list on;
|
||||
#
|
||||
#
|
||||
# recreate view v as select 1 x from rdb$database;
|
||||
# commit;
|
||||
# recreate table test (n1 int, n2 int, n3 int, blob_id blob);
|
||||
# recreate view v as select blob_id from test;
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# insert into test values (0, 0, null, '0:foo');
|
||||
# insert into test values (1, 1, 1, '1:rio');
|
||||
# insert into test values (2, 2, 2, '2:bar');
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# select 1 as point, v.* from v;
|
||||
#
|
||||
#
|
||||
# update test set n1 = 1 where n2 >= 0; -- n1 should be set to 1 in all three rows
|
||||
# select 2 as point, v.* from v;
|
||||
# rollback;
|
||||
#
|
||||
#
|
||||
# update test set n1 = 1 where n2 >= 0 and n3 >= 0; -- n1 should be set to 1 in 2nd and 3rd rows
|
||||
# select 3 as point, v.* from v;
|
||||
# rollback;
|
||||
#
|
||||
#
|
||||
# alter table test add col5 date;
|
||||
# commit;
|
||||
#
|
||||
#
|
||||
# update test set n1 = 1 where n2 >= 0; -- n1 should be set to 1 in all three rows
|
||||
# select 4 as point, v.* from v; -- Here blob_id were changed because of other bug, see CORE-6089, but contents is correct
|
||||
# rollback;
|
||||
#
|
||||
#
|
||||
# update test set n1 = 1 where n2 >= 0 and n3 >= 0;
|
||||
# -- n1 should be set to 1 in 2nd and 3rd rows
|
||||
# select 5 as point, v.* from v; -- BUG: BLOB_ID in the second row was nullified!!!
|
||||
#
|
||||
# select 5 as point, v.* from v; -- BUG: BLOB_ID in the second row was nullified!!!
|
||||
#
|
||||
# quit;
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# f_isql_cmd=open( os.path.join(context['temp_directory'],'tmp_6090.sql'), 'w')
|
||||
# f_isql_cmd.write( sql_txt )
|
||||
# flush_and_close( f_isql_cmd )
|
||||
#
|
||||
#
|
||||
# f_isql_log=open( os.path.join(context['temp_directory'],'tmp_6090.log'), 'w')
|
||||
#
|
||||
#
|
||||
# subprocess.call([context['isql_path'], dsn, "-q", "-i", f_isql_cmd.name], stdout=f_isql_log, stderr=subprocess.STDOUT)
|
||||
# flush_and_close( f_isql_log )
|
||||
#
|
||||
#
|
||||
# blob_id_set=set()
|
||||
# null_blob_cnt=0
|
||||
# with open( f_isql_log.name,'r') as f:
|
||||
@ -139,26 +140,75 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# blob_id_set.add( line.split()[1] )
|
||||
# if '<null>' in line.lower():
|
||||
# null_blob_cnt += 1
|
||||
#
|
||||
#
|
||||
# print( 'Number of unique blob IDs: ' + str(len(blob_id_set)) )
|
||||
# print( 'Number of nullified blobs: ' + str(null_blob_cnt) )
|
||||
#
|
||||
#
|
||||
# # Cleanup.
|
||||
# ##########
|
||||
# time.sleep(1)
|
||||
# cleanup( (f_isql_cmd, f_isql_log) )
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Number of unique blob IDs: 3
|
||||
Number of nullified blobs: 0
|
||||
"""
|
||||
"""
|
||||
|
||||
test_script_1 = """
|
||||
set bail on;
|
||||
set blob all;
|
||||
set list on;
|
||||
|
||||
recreate view v as select 1 x from rdb$database;
|
||||
commit;
|
||||
recreate table test (n1 int, n2 int, n3 int, blob_id blob);
|
||||
recreate view v as select blob_id from test;
|
||||
commit;
|
||||
|
||||
insert into test values (0, 0, null, '0:foo');
|
||||
insert into test values (1, 1, 1, '1:rio');
|
||||
insert into test values (2, 2, 2, '2:bar');
|
||||
commit;
|
||||
|
||||
select 1 as point, v.* from v;
|
||||
|
||||
update test set n1 = 1 where n2 >= 0; -- n1 should be set to 1 in all three rows
|
||||
select 2 as point, v.* from v;
|
||||
rollback;
|
||||
|
||||
update test set n1 = 1 where n2 >= 0 and n3 >= 0; -- n1 should be set to 1 in 2nd and 3rd rows
|
||||
select 3 as point, v.* from v;
|
||||
rollback;
|
||||
|
||||
alter table test add col5 date;
|
||||
commit;
|
||||
|
||||
update test set n1 = 1 where n2 >= 0; -- n1 should be set to 1 in all three rows
|
||||
select 4 as point, v.* from v; -- Here blob_id were changed because of other bug, see CORE-6089, but contents is correct
|
||||
rollback;
|
||||
|
||||
update test set n1 = 1 where n2 >= 0 and n3 >= 0;
|
||||
-- n1 should be set to 1 in 2nd and 3rd rows
|
||||
select 5 as point, v.* from v; -- BUG: BLOB_ID in the second row was nullified!!!
|
||||
|
||||
quit;
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0.5')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action):
|
||||
pattern = re.compile('BLOB_ID\\s+\\S+', re.IGNORECASE)
|
||||
blob_id_set = set()
|
||||
null_blob_cnt = 0
|
||||
act_1.isql(switches=['-q'], input=test_script_1)
|
||||
for line in act_1.stdout.splitlines():
|
||||
if pattern.search(line):
|
||||
blob_id_set.add(line.split()[1])
|
||||
if '<null>' in line.lower():
|
||||
null_blob_cnt += 1
|
||||
# Check
|
||||
assert len(blob_id_set) == 3
|
||||
assert null_blob_cnt == 0
|
||||
|
@ -2,19 +2,19 @@
|
||||
#
|
||||
# id: bugs.core_6095
|
||||
# title: Extend trace record for COMMIT/ROLLBACK RETAINING to allow chaining of transaction ids
|
||||
# decription:
|
||||
# decription:
|
||||
# Test prepares trace config with requrement to watch only for TRANSACTION events.
|
||||
# Then it starts trace session and makes several changes withing retained Tx.
|
||||
# (this is done by invocation con.commit() method with argument 'retaining = True').
|
||||
#
|
||||
#
|
||||
# Every COMMIT_RETAINING event in the trace log must contain following *new* elements:
|
||||
# 1) "INIT_" token with ID of transaction that originated changes; it must be shown in the same line with "TRA_" info;
|
||||
# 2) "New number <NN>" - ID that will be assigned to the next transaction in this 'chain'; it must be shown in separate line.
|
||||
#
|
||||
#
|
||||
# All lines containing "INIT_" must have the same value of transaction that started changes but this value itself can depend
|
||||
# on FB major version and (maybe) of server mode: CS / SC /SS. For this reason we must save this number as special 'base' that
|
||||
# will be subtracted from concrete values during parsing of trace lines - see 'tx_base' variable here.
|
||||
#
|
||||
#
|
||||
# We parse trace log and pay attention for lines like: "(TRA_nnn, INIT_mmm, ..." and "New number <XXX>".
|
||||
# Each interesting numbers are extracted from these lines and <tx_base> is subtracted from them.
|
||||
# Finally, we display resulting values.
|
||||
@ -22,16 +22,17 @@
|
||||
# 2) number after phrase "Tx that finished now" must be:
|
||||
# 2.1) LESS for 1 than value in the next line: "NEW NUMBER" for subsequent Tx..." - for all DML statements EXCEPT LAST;
|
||||
# 2.2) EQUALS to "NEW NUMBER" for subsequent Tx..." for LAST statement because it does not change anything (updates empty table);
|
||||
#
|
||||
#
|
||||
# Checked on 4.0.0.1784 SS: 6.327s; 3.0.6.33255 SS: 5.039s.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-6095
|
||||
# min_versions: ['3.0.6']
|
||||
# versions: 3.0.6
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
import re
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 3.0.6
|
||||
# resources: None
|
||||
@ -54,7 +55,7 @@ init_script_1 = """
|
||||
^
|
||||
set term ;^
|
||||
commit;
|
||||
"""
|
||||
"""
|
||||
|
||||
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
@ -66,30 +67,30 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# import subprocess
|
||||
# from subprocess import Popen
|
||||
# from fdb import services
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# fdb_name = os.path.split(db_conn.database_name)[1]
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for i in range(len( f_names_list )):
|
||||
@ -100,12 +101,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# # Prepare config for trace session that will be launched:
|
||||
# #########################################################
|
||||
# txt = '''
|
||||
@ -113,15 +114,15 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# {
|
||||
# enabled = true
|
||||
# log_initfini = false
|
||||
# time_threshold = 0
|
||||
# time_threshold = 0
|
||||
# log_transactions = true
|
||||
# }
|
||||
# ''' % locals()
|
||||
#
|
||||
#
|
||||
# trc_cfg=open( os.path.join(context['temp_directory'],'tmp_trace_6095.cfg'), 'w')
|
||||
# trc_cfg.write(txt)
|
||||
# flush_and_close( trc_cfg )
|
||||
#
|
||||
#
|
||||
# # Async. launch of trace session using FBSVCMGR action_trace_start:
|
||||
# ###################################################################
|
||||
# trc_log=open( os.path.join(context['temp_directory'],'tmp_trace_6095.log'), 'w', buffering = 0)
|
||||
@ -130,14 +131,14 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# "action_trace_start",
|
||||
# "trc_cfg", trc_cfg.name
|
||||
# ],
|
||||
# stdout=trc_log,
|
||||
# stdout=trc_log,
|
||||
# stderr=subprocess.STDOUT
|
||||
# )
|
||||
#
|
||||
#
|
||||
# # 08.01.2020. This delay is mandatory, otherwise file with trace session info can remain (sometimes)
|
||||
# # empty when we will read it at the next step:
|
||||
# time.sleep(1)
|
||||
#
|
||||
#
|
||||
# # Determine active trace session ID (for further stop):
|
||||
# #######################################################
|
||||
# trc_lst=open( os.path.join(context['temp_directory'],'tmp_trace_6095.lst'), 'w', buffering = 0)
|
||||
@ -146,79 +147,79 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# stdout=trc_lst, stderr=subprocess.STDOUT
|
||||
# )
|
||||
# flush_and_close( trc_lst )
|
||||
#
|
||||
# # Session ID: 5
|
||||
# # user:
|
||||
# # date: 2015-08-27 15:24:14
|
||||
# # flags: active, trace
|
||||
#
|
||||
#
|
||||
# # Session ID: 5
|
||||
# # user:
|
||||
# # date: 2015-08-27 15:24:14
|
||||
# # flags: active, trace
|
||||
#
|
||||
# sid_pattern = re.compile('Session\\s+ID[:]{0,1}\\s+\\d+', re.IGNORECASE)
|
||||
#
|
||||
#
|
||||
# trc_ssn=0
|
||||
# with open( trc_lst.name,'r') as f:
|
||||
# for line in f:
|
||||
# if sid_pattern.search( line ) and len( line.split() ) == 3:
|
||||
# trc_ssn = line.split()[2]
|
||||
# break
|
||||
#
|
||||
# # Result: `trc_ssn` is ID of active trace session.
|
||||
#
|
||||
# # Result: `trc_ssn` is ID of active trace session.
|
||||
# # We have to terminate trace session that is running on server BEFORE we termitane process `p_svcmgr`
|
||||
#
|
||||
#
|
||||
# if trc_ssn==0:
|
||||
# print("Error parsing trace session ID.")
|
||||
#
|
||||
#
|
||||
# else:
|
||||
#
|
||||
#
|
||||
# ########### W O R K W I T H D A T A B A S E ########
|
||||
#
|
||||
#
|
||||
# con = fdb.connect( dsn = dsn )
|
||||
# cur = con.cursor()
|
||||
#
|
||||
#
|
||||
# con.execute_immediate( 'insert into test(x) values(123)' )
|
||||
# con.commit( retaining = True ) # (TRA_12, ... ; next line: "New number 13"
|
||||
#
|
||||
#
|
||||
# cur.callproc( 'sp_worker', (456,) ) # (TRA_13, INIT_12, ...
|
||||
# con.commit( retaining = True ) # (TRA_13, INIT_12, ... ; next line: "New number 14"
|
||||
#
|
||||
#
|
||||
# con.execute_immediate( 'delete from test' ) # (TRA_14, INIT_12, ...
|
||||
# con.commit( retaining = True ) # (TRA_14, INIT_12, ... ; next line: "New number 15"
|
||||
#
|
||||
#
|
||||
# # This statement does not change anything:
|
||||
# con.execute_immediate( 'update test set x = -x' ) # (TRA_15, INIT_12, ...
|
||||
# con.commit( retaining = True ) # (TRA_15, INIT_12, ... ; next line: "New number 15" -- THE SAME AS PREVIOUS!
|
||||
#
|
||||
#
|
||||
# cur.close()
|
||||
# #####################################################################
|
||||
#
|
||||
# # ::: NB ::: Here we have to be idle at least 2s (two seconds) otherwise trace log will
|
||||
#
|
||||
# # ::: NB ::: Here we have to be idle at least 2s (two seconds) otherwise trace log will
|
||||
# # not contain some or all of messages about create DB, start Tx, ES, Tx and drop DB.
|
||||
# # See also discussion with hvlad, 08.01.2020 15:16
|
||||
# # (subj: "action_trace_stop does not flush trace log (fully or partially)")
|
||||
# time.sleep(2)
|
||||
#
|
||||
#
|
||||
# # Stop trace session:
|
||||
# #####################
|
||||
#
|
||||
#
|
||||
# trc_lst=open(trc_lst.name, "a")
|
||||
# trc_lst.seek(0,2)
|
||||
#
|
||||
#
|
||||
# subprocess.call( [ context['fbsvcmgr_path'], "localhost:service_mgr",
|
||||
# "action_trace_stop",
|
||||
# "trc_id",trc_ssn
|
||||
# ],
|
||||
# stdout=trc_lst,
|
||||
# stdout=trc_lst,
|
||||
# stderr=subprocess.STDOUT
|
||||
# )
|
||||
# flush_and_close( trc_lst )
|
||||
#
|
||||
#
|
||||
# p_svcmgr.terminate()
|
||||
#
|
||||
#
|
||||
# allowed_patterns = [
|
||||
#
|
||||
#
|
||||
# allowed_patterns = [
|
||||
# re.compile('\\s*\\(TRA_\\d+,', re.IGNORECASE)
|
||||
# ,re.compile('\\s*New\\s+number\\s+\\d+\\s*', re.IGNORECASE)
|
||||
# ]
|
||||
#
|
||||
#
|
||||
# tx_base = -1
|
||||
# with open( trc_log.name,'r') as f:
|
||||
# for line in f:
|
||||
@ -231,9 +232,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# # 1) for tx WITHOUT retaining: ['TRA', '12', 'READ', 'COMMITTED', '|', 'REC', 'VERSION', '|', 'WAIT', '|', 'READ', 'WRITE)']
|
||||
# # 2) for tx which is RETAINED: ['TRA', '13', 'INIT', '12', 'READ', 'COMMITTED', '|', 'REC', 'VERSION', '|', 'WAIT', '|', 'READ', 'WRITE)']
|
||||
# # 0 1 2 3
|
||||
#
|
||||
#
|
||||
# tx_base = int(words[1]) if tx_base == -1 else tx_base
|
||||
#
|
||||
#
|
||||
# if words[2] == 'INIT':
|
||||
# tx_origin_of_changes = int(words[3]) - tx_base
|
||||
# tx_that_finished_now = int(words[1]) - tx_base
|
||||
@ -241,19 +242,20 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# elif 'number' in line:
|
||||
# tx_for_subsequent_changes = int(line.split()[2]) - tx_base # New number 15 --> 15
|
||||
# print('Found record with "NEW NUMBER" for subsequent Tx numbers: ', tx_for_subsequent_changes)
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# #< cond "if trc_ssn>0"
|
||||
# flush_and_close( trc_log )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# # cleanup:
|
||||
# ##########
|
||||
# time.sleep(1)
|
||||
# cleanup( (trc_lst, trc_cfg, trc_log) )
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Found record with "NEW NUMBER" for subsequent Tx numbers: 1
|
||||
@ -263,11 +265,52 @@ expected_stdout_1 = """
|
||||
Found record with "NEW NUMBER" for subsequent Tx numbers: 3
|
||||
Found "INIT_" token in "TRA_" record. Tx that is origin of changes: 0 ; Tx that finished now: 3
|
||||
Found record with "NEW NUMBER" for subsequent Tx numbers: 3
|
||||
"""
|
||||
Found "INIT_" token in "TRA_" record. Tx that is origin of changes: 0 ; Tx that finished now: 3
|
||||
"""
|
||||
|
||||
trace_1 = ['log_initfini = false',
|
||||
'log_transactions = true',
|
||||
'time_threshold = 0'
|
||||
]
|
||||
|
||||
@pytest.mark.version('>=3.0.6')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, capsys):
|
||||
allowed_patterns = [re.compile('\\s*\\(TRA_\\d+,', re.IGNORECASE),
|
||||
re.compile('\\s*New\\s+number\\s+\\d+\\s*', re.IGNORECASE),
|
||||
]
|
||||
with act_1.trace(db_events=trace_1):
|
||||
with act_1.db.connect() as con:
|
||||
cur = con.cursor()
|
||||
con.execute_immediate('insert into test(x) values(123)')
|
||||
con.commit(retaining = True) # (TRA_12, ... ; next line: "New number 13"
|
||||
cur.callproc('sp_worker', [456]) # (TRA_13, INIT_12, ...
|
||||
con.commit(retaining = True) # (TRA_13, INIT_12, ... ; next line: "New number 14"
|
||||
con.execute_immediate('delete from test') # (TRA_14, INIT_12, ...
|
||||
con.commit(retaining = True) # (TRA_14, INIT_12, ... ; next line: "New number 15"
|
||||
# This statement does not change anything:
|
||||
con.execute_immediate('update test set x = -x') # (TRA_15, INIT_12, ...
|
||||
con.commit(retaining = True) # (TRA_15, INIT_12, ... ; next line: "New number 15" -- THE SAME AS PREVIOUS!
|
||||
# Process trace
|
||||
tx_base = -1
|
||||
for line in act_1.trace_log:
|
||||
if line.rstrip().split():
|
||||
for p in allowed_patterns:
|
||||
if p.search(line):
|
||||
if '(TRA_' in line:
|
||||
words = line.replace(',',' ').replace('_',' ').replace('(',' ').split()
|
||||
# Result:
|
||||
# 1) for tx WITHOUT retaining: ['TRA', '12', 'READ', 'COMMITTED', '|', 'REC', 'VERSION', '|', 'WAIT', '|', 'READ', 'WRITE)']
|
||||
# 2) for tx which is RETAINED: ['TRA', '13', 'INIT', '12', 'READ', 'COMMITTED', '|', 'REC', 'VERSION', '|', 'WAIT', '|', 'READ', 'WRITE)']
|
||||
# 0 1 2 3
|
||||
tx_base = int(words[1]) if tx_base == -1 else tx_base
|
||||
if words[2] == 'INIT':
|
||||
tx_origin_of_changes = int(words[3]) - tx_base
|
||||
tx_that_finished_now = int(words[1]) - tx_base
|
||||
print('Found "INIT_" token in "TRA_" record. Tx that is origin of changes: ', tx_origin_of_changes, '; Tx that finished now:', tx_that_finished_now)
|
||||
elif 'number' in line:
|
||||
tx_for_subsequent_changes = int(line.split()[2]) - tx_base # New number 15 --> 15
|
||||
print('Found record with "NEW NUMBER" for subsequent Tx numbers: ', tx_for_subsequent_changes)
|
||||
#
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,17 +2,17 @@
|
||||
#
|
||||
# id: bugs.core_6108
|
||||
# title: Regression: FB3 throws "Datatypes are not comparable in expression" in procedure parameters
|
||||
# decription:
|
||||
# decription:
|
||||
# Confirmed bug on 4.0.0.1567; 3.0.5.33160.
|
||||
# Works fine on 4.0.0.1573; 3.0.x is still affected
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-6108
|
||||
# min_versions: ['2.5']
|
||||
# versions: 2.5
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 2.5
|
||||
# resources: None
|
||||
@ -25,7 +25,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# proc_ddl='''
|
||||
# create or alter procedure test_proc ( a_dts timestamp) returns ( o_dts timestamp) as
|
||||
# begin
|
||||
@ -33,29 +33,46 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
# suspend;
|
||||
# end
|
||||
# '''
|
||||
#
|
||||
#
|
||||
# db_conn.execute_immediate( proc_ddl )
|
||||
# db_conn.commit()
|
||||
#
|
||||
#
|
||||
# cur=db_conn.cursor()
|
||||
#
|
||||
#
|
||||
# sttm="select o_dts from test_proc('2019-'|| COALESCE( ?, 1) ||'-01' )"
|
||||
# cur.execute( sttm, ( 3, ) )
|
||||
# for r in cur:
|
||||
# print(r[0])
|
||||
# cur.close()
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
2019-03-01 00:00:00
|
||||
"""
|
||||
"""
|
||||
|
||||
proc_ddl = """
|
||||
create or alter procedure test_proc ( a_dts timestamp) returns ( o_dts timestamp) as
|
||||
begin
|
||||
o_dts = a_dts;
|
||||
suspend;
|
||||
end
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=2.5')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
def test_1(act_1: Action, capsys):
|
||||
with act_1.db.connect() as con:
|
||||
con.execute_immediate(proc_ddl)
|
||||
con.commit()
|
||||
c = con.cursor()
|
||||
for row in c.execute("select o_dts from test_proc('2019-'|| COALESCE( ?, 1) ||'-01' )", [3]):
|
||||
print(row[0])
|
||||
#
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
||||
|
||||
|
@ -2,25 +2,26 @@
|
||||
#
|
||||
# id: bugs.core_6116
|
||||
# title: The Metadata script extracted using ISQL of a database restored from a Firebird 2.5.9 Backup is invalid/incorrect when table has COMPUTED BY field
|
||||
# decription:
|
||||
# decription:
|
||||
# Test uses backup of preliminary created database in FB 2.5.9, DDL is the same as in the ticket.
|
||||
# This .fbk is restored and we launch ISQL -X in order to get metadata. Then we check that two
|
||||
# This .fbk is restored and we launch ISQL -X in order to get metadata. Then we check that two
|
||||
# in this script with "COMPUTED BY" phrase contain non zero number as width of this field:
|
||||
# 1) line that belongs to CREATE TABLE statement:
|
||||
# FULL_NAME VARCHAR(100) ... COMPUTED BY ...
|
||||
# 2) line with ALTER COLUMN statement:
|
||||
# ALTER FULL_NAME TYPE VARCHAR(100) ... COMPUTED BY ...
|
||||
#
|
||||
#
|
||||
# Confirmed bug on: 4.0.0.1723; 3.0.5.33225: found "VARCHAR(0)" in above mentioned lines.
|
||||
# Checked on: 4.0.0.1737; 3.0.6.33236 - works fine.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-6116
|
||||
# min_versions: ['3.0.6']
|
||||
# versions: 3.0.6
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
import re
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
|
||||
# version: 3.0.6
|
||||
# resources: None
|
||||
@ -37,30 +38,30 @@ db_1 = db_factory(from_backup='core6116-25.fbk', init=init_script_1)
|
||||
# import time
|
||||
# import subprocess
|
||||
# import re
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
#
|
||||
#
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def flush_and_close( file_handle ):
|
||||
# # https://docs.python.org/2/library/os.html#os.fsync
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # If you're starting with a Python file object f,
|
||||
# # first do f.flush(), and
|
||||
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
||||
# global os
|
||||
#
|
||||
#
|
||||
# file_handle.flush()
|
||||
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
||||
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
||||
# os.fsync(file_handle.fileno())
|
||||
# file_handle.close()
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
# def cleanup( f_names_list ):
|
||||
# global os
|
||||
# for f in f_names_list:
|
||||
@ -71,55 +72,68 @@ db_1 = db_factory(from_backup='core6116-25.fbk', init=init_script_1)
|
||||
# else:
|
||||
# print('Unrecognized type of element:', f, ' - can not be treated as file.')
|
||||
# del_name = None
|
||||
#
|
||||
#
|
||||
# if del_name and os.path.isfile( del_name ):
|
||||
# os.remove( del_name )
|
||||
#
|
||||
#
|
||||
# #--------------------------------------------
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# f_metadata_sql=open( os.path.join(context['temp_directory'],'tmp_meta_6116.sql'), 'w')
|
||||
# subprocess.call([ context['isql_path'], '-x', dsn ], stdout=f_metadata_sql, stderr=subprocess.STDOUT)
|
||||
# flush_and_close( f_metadata_sql )
|
||||
#
|
||||
#
|
||||
# # FULL_NAME VARCHAR(0) CHARACTER SET WIN1252 COMPUTED BY
|
||||
# comp_field_initial_ptn = re.compile( 'FULL_NAME\\s+VARCHAR\\(\\d+\\).*COMPUTED BY', re.IGNORECASE )
|
||||
# comp_field_altered_ptn = re.compile( 'ALTER\\s+FULL_NAME\\s+TYPE\\s+VARCHAR\\(\\d+\\).*COMPUTED BY', re.IGNORECASE )
|
||||
#
|
||||
#
|
||||
# # CREATE TABLE statement must contain line:
|
||||
# # FULL_NAME VARCHAR(100) CHARACTER SET WIN1252 COMPUTED BY (CAST(NULL AS VARCHAR(1) CHARACTER SET WIN1252) COLLATE WIN_PTBR),
|
||||
# # ALTER FULL_NAME statement must contain line:
|
||||
# # ALTER FULL_NAME TYPE VARCHAR(100) CHARACTER SET WIN1252 COMPUTED BY ((first_name || ' ' || last_name || ' (' || user_name || ')') collate win_ptbr);
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# # This should be empty:
|
||||
# with open( f_metadata_sql.name,'r') as f:
|
||||
# for line in f:
|
||||
# if comp_field_initial_ptn.search(line):
|
||||
# words = line.replace('(',' ').replace(')',' ').split() # ['FULL_NAME', 'VARCHAR', '0', ... , 'COMPUTED', 'BY']
|
||||
# print( 'Length in "CREATE TABLE" statement: ' + words[2] )
|
||||
#
|
||||
#
|
||||
# if comp_field_altered_ptn.search(line):
|
||||
# words = line.replace('(',' ').replace(')',' ').split() # ['ALTER', 'FULL_NAME', 'TYPE', 'VARCHAR', '0', ... , 'COMPUTED', 'BY']
|
||||
# print( 'Length in "ALTER COLUMN" statement: ' + words[4] )
|
||||
#
|
||||
#
|
||||
# # cleanup:
|
||||
# ##########
|
||||
# time.sleep(1)
|
||||
# cleanup( (f_metadata_sql,) )
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
Length in "CREATE TABLE" statement: 100
|
||||
Length in "ALTER COLUMN" statement: 100
|
||||
"""
|
||||
"""
|
||||
|
||||
@pytest.mark.version('>=3.0.6')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
|
||||
|
||||
def test_1(act_1: Action, capsys):
|
||||
comp_field_initial_ptn = re.compile( 'FULL_NAME\\s+VARCHAR\\(\\d+\\).*COMPUTED BY', re.IGNORECASE )
|
||||
comp_field_altered_ptn = re.compile( 'ALTER\\s+FULL_NAME\\s+TYPE\\s+VARCHAR\\(\\d+\\).*COMPUTED BY', re.IGNORECASE )
|
||||
#
|
||||
act_1.isql(switches=['-x'])
|
||||
for line in act_1.stdout.splitlines():
|
||||
if comp_field_initial_ptn.search(line):
|
||||
words = line.replace('(',' ').replace(')',' ').split() # ['FULL_NAME', 'VARCHAR', '0', ... , 'COMPUTED', 'BY']
|
||||
print(f'Length in "CREATE TABLE" statement: {words[2]}')
|
||||
if comp_field_altered_ptn.search(line):
|
||||
words = line.replace('(',' ').replace(')',' ').split() # ['ALTER', 'FULL_NAME', 'TYPE', 'VARCHAR', '0', ... , 'COMPUTED', 'BY']
|
||||
print(f'Length in "ALTER COLUMN" statement: {words[4]}')
|
||||
# Check
|
||||
act_1.reset()
|
||||
act_1.expected_stdout = expected_stdout_1
|
||||
act_1.stdout = capsys.readouterr().out
|
||||
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
||||
|
@ -2,25 +2,26 @@
|
||||
#
|
||||
# id: bugs.core_6138
|
||||
# title: Inconsistent behavior regarding visibility of master record on detail inserts
|
||||
# decription:
|
||||
# decription:
|
||||
# Confirmed bug on: 3.0.5.33152 (built 14.09.19), 4.0.0.1598 (built 08.09.19):
|
||||
# no error raised when second attach tried to insert record into child table
|
||||
# no error raised when second attach tried to insert record into child table
|
||||
# after first attach did commit (but main record was not visible to 2nd attach
|
||||
# because of SNAPSHOT isolation level).
|
||||
#
|
||||
#
|
||||
# Works fine on:
|
||||
# 4.0.0.1639 SS: 1.745s.
|
||||
# 4.0.0.1633 CS: 2.266s.
|
||||
# 3.0.5.33183 SS: 1.265s.
|
||||
# 3.0.5.33178 CS: 1.611s.
|
||||
#
|
||||
#
|
||||
# tracker_id: CORE-6138
|
||||
# min_versions: ['3.0.5']
|
||||
# versions: 3.0.5
|
||||
# qmid: None
|
||||
|
||||
import pytest
|
||||
from firebird.qa import db_factory, isql_act, Action
|
||||
from firebird.qa import db_factory, python_act, Action
|
||||
from firebird.driver import tpb, Isolation, DatabaseError
|
||||
|
||||
# version: 3.0.5
|
||||
# resources: None
|
||||
@ -33,68 +34,82 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
||||
|
||||
# test_script_1
|
||||
#---
|
||||
#
|
||||
#
|
||||
# import os
|
||||
# import sys
|
||||
#
|
||||
#
|
||||
# os.environ["ISC_USER"] = user_name
|
||||
# os.environ["ISC_PASSWORD"] = user_password
|
||||
# db_conn.close()
|
||||
#
|
||||
#
|
||||
# custom_tpb = fdb.TPB()
|
||||
# custom_tpb.isolation_level = fdb.isc_tpb_concurrency
|
||||
# custom_tpb.lock_resolution = fdb.isc_tpb_nowait
|
||||
#
|
||||
#
|
||||
# con1=fdb.connect( dsn = dsn )
|
||||
# con2=fdb.connect( dsn = dsn )
|
||||
# con2.begin( tpb = custom_tpb )
|
||||
#
|
||||
#
|
||||
# #print( 'FDB version: ' + fdb.__version__ )
|
||||
# #print( 'Firebird version: ' + con1.firebird_version )
|
||||
#
|
||||
#
|
||||
# con1.execute_immediate( 'create table a (id int primary key)' )
|
||||
# con1.execute_immediate( 'create table b (id int primary key, id_a int, constraint fk_b__a foreign key(id_a) references a(id) on update cascade on delete cascade)' )
|
||||
# con1.commit()
|
||||
#
|
||||
#
|
||||
# con1.begin( tpb = custom_tpb )
|
||||
# cur1=con1.cursor()
|
||||
# cur1.execute('insert into a(id) values( ? )', ( 1, ) )
|
||||
#
|
||||
#
|
||||
# con2.commit()
|
||||
# con2.begin(tpb = custom_tpb )
|
||||
# cur2=con2.cursor()
|
||||
# cur2.execute('select id from a')
|
||||
#
|
||||
#
|
||||
# con1.commit()
|
||||
# try:
|
||||
# cur2.execute( 'insert into b (id, id_a) values (?, ?)', (1, 1,) )
|
||||
# print('UNEXPECTED SUCCESS: CHILD RECORD INSERTED W/O ERROR.')
|
||||
# except Exception as e:
|
||||
#
|
||||
#
|
||||
# print( ('EXPECTED: FK violation encountered.' if '335544466' in repr(e) else 'Unknown/unexpected exception.') )
|
||||
#
|
||||
#
|
||||
# # print( x for x in e ) # Python 3.x: TypeError: 'DatabaseError' object is not iterable
|
||||
# # print( e[0] ) # Python 3.x: TypeError: 'DatabaseError' object is not subscriptable
|
||||
#
|
||||
#
|
||||
# finally:
|
||||
# cur2.close()
|
||||
#
|
||||
#
|
||||
# con2.close()
|
||||
#
|
||||
#
|
||||
# cur1.close()
|
||||
# con1.close()
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
#---
|
||||
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
|
||||
|
||||
expected_stdout_1 = """
|
||||
EXPECTED: FK violation encountered.
|
||||
"""
|
||||
act_1 = python_act('db_1', substitutions=substitutions_1)
|
||||
|
||||
@pytest.mark.version('>=3.0.5')
|
||||
@pytest.mark.xfail
|
||||
def test_1(db_1):
|
||||
pytest.fail("Test not IMPLEMENTED")
|
||||
def test_1(act_1: Action):
|
||||
custom_tpb = tpb(isolation=Isolation.CONCURRENCY, lock_timeout=0)
|
||||
with act_1.db.connect() as con1, act_1.db.connect() as con2:
|
||||
con2.begin(custom_tpb)
|
||||
|
||||
con1.execute_immediate('create table a (id int primary key)')
|
||||
con1.execute_immediate('create table b (id int primary key, id_a int, constraint fk_b__a foreign key(id_a) references a(id) on update cascade on delete cascade)')
|
||||
con1.commit()
|
||||
|
||||
con1.begin(custom_tpb)
|
||||
cur1 = con1.cursor()
|
||||
cur1.execute('insert into a(id) values( ? )', [1])
|
||||
|
||||
con2.commit()
|
||||
con2.begin(custom_tpb)
|
||||
cur2 = con2.cursor()
|
||||
cur2.execute('select id from a')
|
||||
|
||||
con1.commit()
|
||||
|
||||
with pytest.raises(DatabaseError, match='.*violation of FOREIGN KEY constraint.*'):
|
||||
cur2.execute('insert into b (id, id_a) values (?, ?)', [1, 1])
|
||||
|
Loading…
Reference in New Issue
Block a user