6
0
mirror of https://github.com/FirebirdSQL/firebird-qa.git synced 2025-01-22 13:33:07 +01:00

More python tests and some enhancements

This commit is contained in:
Pavel Císař 2021-12-09 19:26:42 +01:00
parent 76b49666fa
commit 0711661a93
53 changed files with 2753 additions and 1599 deletions

BIN
files/core_5207.zip Normal file

Binary file not shown.

Binary file not shown.

BIN
files/core_5618.zip Normal file

Binary file not shown.

BIN
files/core_5637.zip Normal file

Binary file not shown.

BIN
files/core_5659.zip Normal file

Binary file not shown.

Binary file not shown.

View File

@ -25,7 +25,7 @@
import pytest import pytest
from firebird.qa import db_factory, python_act, Action from firebird.qa import db_factory, python_act, Action
from firebird.driver import DbWriteMode, SrvRestoreFlag from firebird.driver import SrvRestoreFlag
#from difflib import unified_diff #from difflib import unified_diff
from io import BytesIO from io import BytesIO
@ -521,8 +521,7 @@ act_1 = python_act('db_1', substitutions=substitutions_1)
@pytest.mark.version('>=4.0') @pytest.mark.version('>=4.0')
def test_1(act_1: Action): def test_1(act_1: Action):
# CHANGE FW to OFF # CHANGE FW to OFF
with act_1.connect_server() as srv: act_1.db.set_async_write()
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.ASYNC)
# 1. FIRST RUN DML_TEST # 1. FIRST RUN DML_TEST
act_1.script = test_script_1 act_1.script = test_script_1
act_1.execute() act_1.execute()

View File

@ -30,7 +30,7 @@
import pytest import pytest
from firebird.qa import db_factory, python_act, Action from firebird.qa import db_factory, python_act, Action
from firebird.driver import DbWriteMode, DbInfoCode from firebird.driver import DbInfoCode
# version: 2.5 # version: 2.5
# resources: None # resources: None
@ -172,8 +172,7 @@ act_1 = python_act('db_1', substitutions=substitutions_1)
@pytest.mark.version('>=2.5') @pytest.mark.version('>=2.5')
def test_1(act_1: Action): def test_1(act_1: Action):
# Change FW to OFF in order to speed up initial data filling: # Change FW to OFF in order to speed up initial data filling:
with act_1.connect_server() as srv: act_1.db.set_async_write()
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.ASYNC)
# prepare DB for testing: create lot of tables: # prepare DB for testing: create lot of tables:
num_of_tables = 1000 num_of_tables = 1000
sql_ddl = f''' sql_ddl = f'''

View File

@ -279,6 +279,3 @@ def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1 act_1.expected_stdout = expected_stdout_1
act_1.trace_to_stdout() act_1.trace_to_stdout()
assert act_1.clean_stdout == act_1.clean_expected_stdout assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -42,7 +42,6 @@
import pytest import pytest
from firebird.qa import db_factory, python_act, Action from firebird.qa import db_factory, python_act, Action
from firebird.driver import DbWriteMode
# version: 2.5.2 # version: 2.5.2
# resources: None # resources: None
@ -458,8 +457,7 @@ trace_1 = ['log_transactions = true',
def test_1(act_1: Action, capsys): def test_1(act_1: Action, capsys):
NUM_ROWS_TO_BE_ADDED = 45000 NUM_ROWS_TO_BE_ADDED = 45000
# Change FW to OFF in order to speed up initial data filling # Change FW to OFF in order to speed up initial data filling
with act_1.connect_server() as srv: act_1.db.set_async_write()
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.ASYNC)
# Make initial data filling into PERMANENT table for retrieving later number of data pages # Make initial data filling into PERMANENT table for retrieving later number of data pages
# (it should be the same for any kind of tables, including GTTs): # (it should be the same for any kind of tables, including GTTs):
with act_1.db.connect() as con: with act_1.db.connect() as con:

View File

@ -56,7 +56,7 @@ import re
import subprocess import subprocess
from datetime import datetime from datetime import datetime
from firebird.qa import db_factory, python_act, Action from firebird.qa import db_factory, python_act, Action
from firebird.driver import DbWriteMode, ShutdownMethod, ShutdownMode from firebird.driver import ShutdownMethod, ShutdownMode
# version: 3.0 # version: 3.0
# resources: None # resources: None
@ -572,8 +572,7 @@ def test_1(act_1: Action, capsys):
""" """
act_1.isql(switches=[], input=sql_ddl) act_1.isql(switches=[], input=sql_ddl)
# Temporay change FW to OFF in order to make DML faster: # Temporay change FW to OFF in order to make DML faster:
with act_1.connect_server() as srv: act_1.db.set_async_write()
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.ASYNC)
# #
sql_data = f""" sql_data = f"""
set term ^; set term ^;
@ -605,8 +604,7 @@ def test_1(act_1: Action, capsys):
act_1.reset() act_1.reset()
act_1.isql(switches=['-nod'], input=sql_data) act_1.isql(switches=['-nod'], input=sql_data)
# Restore FW to ON (make sweep to do its work "harder"): # Restore FW to ON (make sweep to do its work "harder"):
with act_1.connect_server() as srv: act_1.db.set_async_write()
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.SYNC)
# Trace # Trace
with act_1.trace(db_events=trace_1): with act_1.trace(db_events=trace_1):
# Traced action # Traced action

View File

@ -373,5 +373,5 @@ expected_stdout_1 = """
@pytest.mark.version('>=4.0') @pytest.mark.version('>=4.0')
def test_1(db_1): def test_1(db_1):
pytest.skip("Test requires 3rd party encryption plugin") pytest.skip("Requires encryption plugin")
#pytest.fail("Test not IMPLEMENTED") #pytest.fail("Test not IMPLEMENTED")

View File

@ -33,7 +33,6 @@ import subprocess
import time import time
from pathlib import Path from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file from firebird.qa import db_factory, python_act, Action, temp_file
from firebird.driver import DbWriteMode
# version: 3.0 # version: 3.0
# resources: None # resources: None
@ -254,8 +253,7 @@ heavy_output_1 = temp_file('heavy_script.out')
@pytest.mark.version('>=3.0') @pytest.mark.version('>=3.0')
def test_1(act_1: Action, heavy_script_1: Path, heavy_output_1: Path, capsys): def test_1(act_1: Action, heavy_script_1: Path, heavy_output_1: Path, capsys):
# Change database FW to OFF in order to increase speed of insertions and output its header info # Change database FW to OFF in order to increase speed of insertions and output its header info
with act_1.connect_server() as srv: act_1.db.set_async_write()
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.ASYNC)
# Preparing script for ISQL that will do 'heavy DML' # Preparing script for ISQL that will do 'heavy DML'
heavy_script_1.write_text(""" heavy_script_1.write_text("""
recreate sequence g; recreate sequence g;

View File

@ -34,7 +34,6 @@
import pytest import pytest
from zipfile import Path from zipfile import Path
from firebird.qa import db_factory, python_act, Action from firebird.qa import db_factory, python_act, Action
from firebird.driver import DbWriteMode
# version: 3.0 # version: 3.0
# resources: None # resources: None
@ -119,8 +118,7 @@ expected_stdout_1 = """
@pytest.mark.version('>=3.0') @pytest.mark.version('>=3.0')
def test_1(act_1: Action): def test_1(act_1: Action):
with act_1.connect_server() as srv: act_1.db.set_async_write()
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.ASYNC)
# Read FNC scripts from zip file and execute it # Read FNC scripts from zip file and execute it
script_file = Path(act_1.vars['files'] / 'core_4880.zip', script_file = Path(act_1.vars['files'] / 'core_4880.zip',
at='core_4880_fnc.tmp') at='core_4880_fnc.tmp')

View File

@ -174,5 +174,5 @@ expected_stdout_1 = """
@pytest.mark.version('>=3.0') @pytest.mark.version('>=3.0')
def test_1(db_1): def test_1(db_1):
pytest.skip("Test depends on 3rd party encryption plugin") pytest.skip("Requires encryption plugin")
#pytest.fail("Test not IMPLEMENTED") #pytest.fail("Test not IMPLEMENTED")

View File

@ -26,7 +26,7 @@
import pytest import pytest
from firebird.qa import db_factory, python_act, Action from firebird.qa import db_factory, python_act, Action
from firebird.driver import DbWriteMode, TPB, Isolation from firebird.driver import TPB, Isolation
# version: 2.5.6 # version: 2.5.6
# resources: None # resources: None
@ -85,8 +85,7 @@ expected_stdout_1 = """
@pytest.mark.version('>=2.5.6') @pytest.mark.version('>=2.5.6')
def test_1(act_1: Action): def test_1(act_1: Action):
with act_1.connect_server() as srv: act_1.db.set_async_write()
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.ASYNC)
# #
custom_tpb = TPB(isolation=Isolation.CONCURRENCY).get_buffer() custom_tpb = TPB(isolation=Isolation.CONCURRENCY).get_buffer()
with act_1.db.connect(no_gc=True) as con: with act_1.db.connect(no_gc=True) as con:

View File

@ -21,7 +21,6 @@
import pytest import pytest
from firebird.qa import db_factory, python_act, Action from firebird.qa import db_factory, python_act, Action
from firebird.driver import DbWriteMode
# version: 4.0 # version: 4.0
# resources: None # resources: None
@ -126,8 +125,7 @@ test_sript_1 = """
@pytest.mark.version('>=4.0') @pytest.mark.version('>=4.0')
def test_1(act_1: Action): def test_1(act_1: Action):
with act_1.connect_server() as srv: act_1.db.set_async_write()
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.ASYNC)
act_1.expected_stdout = expected_stdout_1 act_1.expected_stdout = expected_stdout_1
act_1.isql(switches=[], input=test_sript_1) act_1.isql(switches=[], input=test_sript_1)
assert act_1.clean_stdout == act_1.clean_expected_stdout assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -26,7 +26,6 @@
import pytest import pytest
from firebird.qa import db_factory, python_act, Action from firebird.qa import db_factory, python_act, Action
from firebird.driver import DbWriteMode
# version: 2.5.7 # version: 2.5.7
# resources: None # resources: None
@ -267,8 +266,7 @@ test_script_1 = f"""
def test_1(act_1: Action): def test_1(act_1: Action):
if act_1.get_server_architecture() == 'SS': if act_1.get_server_architecture() == 'SS':
# Bucgcheck is reproduced on 2.5.7.27030 only when FW = OFF # Bucgcheck is reproduced on 2.5.7.27030 only when FW = OFF
with act_1.connect_server() as srv: act_1.db.set_async_write()
srv.database.set_write_mode(database=act_1.db.db_path, mode=DbWriteMode.ASYNC)
# Test # Test
act_1.expected_stdout = expected_stdout_1 act_1.expected_stdout = expected_stdout_1
act_1.isql(switches=[], input=test_script_1) act_1.isql(switches=[], input=test_script_1)

View File

@ -33,7 +33,7 @@
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from firebird.qa import db_factory, python_act, Action
# version: 3.0.2 # version: 3.0.2
# resources: None # resources: None
@ -304,16 +304,88 @@ db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
# cleanup( (f_trc_cfg, f_trc_lst, f_trc_log, f_trc_err, sql_log, sql_err, sql_cmd) ) # cleanup( (f_trc_cfg, f_trc_lst, f_trc_log, f_trc_err, sql_log, sql_err, sql_cmd) )
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
PLAN (TEST ORDER TEST_F01_ID) PLAN (TEST ORDER TEST_F01_ID)
Number of fetches: acceptable. Number of fetches: acceptable.
""" """
FETCHES_THRESHOLD = 80
init_sql_1 = """
recreate table test
(
id int not null,
f01 int,
f02 int
);
set term ^;
create or alter procedure sp_add_init_data(a_rows_to_add int)
as
declare n int;
declare i int = 0;
begin
n = a_rows_to_add;
while (i < n) do
begin
insert into test(id, f01, f02) values(:i, nullif(mod(:i, :n/20), 0), iif(mod(:i,3)<2, 0, 1))
returning :i+1 into i;
end
end
^
set term ^;
commit;
execute procedure sp_add_init_data(300000);
commit;
create index test_f01_id on test(f01, id);
create index test_f02_only on test(f02);
commit;
"""
test_script_1 = """
set list on;
select count(*) cnt_check
from (
select *
from test
where f01 -- ###################################################################
IS NULL -- <<< ::: NB ::: we check here 'f01 is NULL', exactly as ticket says.
and f02=0 -- ###################################################################
order by f01, id
) ;
"""
trace_1 = ['time_threshold = 0',
'log_statement_finish = true',
'print_plan = true',
'print_perf = true',
'log_initfini = false',
]
@pytest.mark.version('>=3.0.2') @pytest.mark.version('>=3.0.2')
@pytest.mark.xfail def test_1(act_1: Action):
def test_1(db_1): act_1.db.set_async_write()
pytest.fail("Test not IMPLEMENTED") act_1.isql(switches=[], input=init_sql_1)
#
with act_1.trace(db_events=trace_1):
act_1.reset()
act_1.isql(switches=[], input=test_script_1)
# Process trace
run_with_plan = ''
num_of_fetches = 99999999
for line in act_1.trace_log:
if line.lower().startswith('plan ('):
run_with_plan = line.strip().upper()
elif 'fetch(es)' in line:
words = line.split()
for k in range(len(words)):
if words[k].startswith('fetch'):
num_of_fetches = int(words[k-1])
# Check
assert run_with_plan == 'PLAN (TEST ORDER TEST_F01_ID)'
assert num_of_fetches < FETCHES_THRESHOLD

View File

@ -2,25 +2,28 @@
# #
# id: bugs.core_5496 # id: bugs.core_5496
# title: Creating SRP SYSDBA with explicit admin (-admin yes in gsec or grant admin role in create user) creates two SYSDBA accounts # title: Creating SRP SYSDBA with explicit admin (-admin yes in gsec or grant admin role in create user) creates two SYSDBA accounts
# decription: # decription:
# Test script should display only ONE record. # Test script should display only ONE record.
# Confirmed problem on: # Confirmed problem on:
# 3.0.0.32483: three(!) records are displayed instead of one. # 3.0.0.32483: three(!) records are displayed instead of one.
# 3.0.1.32609: no records displayed with 'sysdba' account. # 3.0.1.32609: no records displayed with 'sysdba' account.
# Confirmed bug on 3.0.2.32658, WI-V3.0.2.32691. # Confirmed bug on 3.0.2.32658, WI-V3.0.2.32691.
# #
# Checked on 3.0.2.32703: all OK. # Checked on 3.0.2.32703: all OK.
# Checked on 4.0.0.1479, 3.0.5.33115 - all fine. # Checked on 4.0.0.1479, 3.0.5.33115 - all fine.
# #
# 03-mar-2021: replaced 'xnet' with 'localhost' in order have ability to run this test on Linux. # 03-mar-2021: replaced 'xnet' with 'localhost' in order have ability to run this test on Linux.
# #
# [pcisar] 8.12.2021
# Fails with "no permission for remote access to database security.db" on Linux FB 4.0
#
# tracker_id: CORE-5496 # tracker_id: CORE-5496
# min_versions: ['3.0.2'] # min_versions: ['3.0.2']
# versions: 3.0.2 # versions: 3.0.2
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from firebird.qa import db_factory, python_act, Action
# version: 3.0.2 # version: 3.0.2
# resources: None # resources: None
@ -33,14 +36,14 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# #
# db_conn.close() # db_conn.close()
# #
# check_sql=''' # check_sql='''
# -- connect 'xnet://security.db'; # -- connect 'xnet://security.db';
# connect 'localhost:security.db'; # connect 'localhost:security.db';
@ -49,36 +52,61 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# create or alter user bar password '123' grant admin role using plugin Srp; # create or alter user bar password '123' grant admin role using plugin Srp;
# commit; # commit;
# grant rdb$admin to sysdba granted by foo; # grant rdb$admin to sysdba granted by foo;
# grant rdb$admin to sysdba granted by rio; # grant rdb$admin to sysdba granted by rio;
# grant rdb$admin to sysdba granted by bar; # grant rdb$admin to sysdba granted by bar;
# commit; # commit;
# set list on; # set list on;
# set count on; # set count on;
# select sec$user_name, sec$plugin from sec$users where upper(sec$user_name) = upper('sysdba') and upper(sec$plugin) = upper('srp'); # select sec$user_name, sec$plugin from sec$users where upper(sec$user_name) = upper('sysdba') and upper(sec$plugin) = upper('srp');
# commit; # commit;
# #
# drop user foo using plugin Srp; # drop user foo using plugin Srp;
# drop user rio using plugin Srp; # drop user rio using plugin Srp;
# drop user bar using plugin Srp; # drop user bar using plugin Srp;
# commit; # commit;
# quit; # quit;
# ''' # '''
# #
# runProgram('isql', ['-q'], check_sql) # runProgram('isql', ['-q'], check_sql)
# #
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
SEC$USER_NAME SYSDBA SEC$USER_NAME SYSDBA
SEC$PLUGIN Srp SEC$PLUGIN Srp
Records affected: 1 Records affected: 1
""" """
test_script_1 = """
connect 'localhost:security.db';
create or alter user foo password '123' grant admin role using plugin Srp;
create or alter user rio password '123' grant admin role using plugin Srp;
create or alter user bar password '123' grant admin role using plugin Srp;
commit;
grant rdb$admin to sysdba granted by foo;
grant rdb$admin to sysdba granted by rio;
grant rdb$admin to sysdba granted by bar;
commit;
set list on;
set count on;
select sec$user_name, sec$plugin from sec$users where upper(sec$user_name) = upper('sysdba') and upper(sec$plugin) = upper('srp');
commit;
drop user foo using plugin Srp;
drop user rio using plugin Srp;
drop user bar using plugin Srp;
commit;
quit;
"""
@pytest.mark.version('>=3.0.2') @pytest.mark.version('>=3.0.2')
@pytest.mark.xfail def test_1(act_1: Action):
def test_1(db_1): pytest.skip("Requires remote access to security.db")
pytest.fail("Test not IMPLEMENTED") act_1.expected_stdout = expected_stdout_1
act_1.isql(switches=['-q', '-b'], input=test_script_1)
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,15 +2,15 @@
# #
# id: bugs.core_5501 # id: bugs.core_5501
# title: Unclear gstat's diagnostic when damaged page in DB file appears encrypted # title: Unclear gstat's diagnostic when damaged page in DB file appears encrypted
# decription: # decription:
# Test creates table 'TEST' with varchar and blob fields, + index on varchar, and add some data to it. # Test creates table 'TEST' with varchar and blob fields, + index on varchar, and add some data to it.
# Blob field is filled by long values in order to prevent acomodation of its content within data pages. # Blob field is filled by long values in order to prevent acomodation of its content within data pages.
# As result, this table should have pages of three different types: DataPage, BTreePage and BlobPage. # As result, this table should have pages of three different types: DataPage, BTreePage and BlobPage.
# #
# Then we find number of first PP of this table by scrolling RDB$PAGES join RDB$RELATIONS result set. # Then we find number of first PP of this table by scrolling RDB$PAGES join RDB$RELATIONS result set.
# After this we: # After this we:
# * define type of every page starting from first PP for 'TEST' table and up to total pages of DB, # * define type of every page starting from first PP for 'TEST' table and up to total pages of DB,
# and doing this for each subsequent page, until ALL THREE different page types will be detected: # and doing this for each subsequent page, until ALL THREE different page types will be detected:
# 1) data page, 2) index B-Tree and 3) blob page. # 1) data page, 2) index B-Tree and 3) blob page.
# These page numbers are stored in variables: (brk_datapage, brk_indxpage, brk_blobpage). # These page numbers are stored in variables: (brk_datapage, brk_indxpage, brk_blobpage).
# When all three page numbers are found, loop is terminated; # When all three page numbers are found, loop is terminated;
@ -22,28 +22,43 @@
# * Close DB file handle and: # * Close DB file handle and:
# ** 1) run 'gstat -e'; # ** 1) run 'gstat -e';
# ** 2) run online validation; # ** 2) run online validation;
# * open DB file again as binary and restore its content from var. 'raw_db_content' in order # * open DB file again as binary and restore its content from var. 'raw_db_content' in order
# fbtest framework could finish this test (by making connect and drop this database); # fbtest framework could finish this test (by making connect and drop this database);
# #
# KEY POINTS: # KEY POINTS:
# * report of 'gstat -e' should contain line with text 'ENCRYPTED 3 (DB problem!)' # * report of 'gstat -e' should contain line with text 'ENCRYPTED 3 (DB problem!)'
# (number '3' should present becase we damaged pages of THREE diff. types: DP, BTree and Blob). # (number '3' should present becase we damaged pages of THREE diff. types: DP, BTree and Blob).
# * report of online validation should contain lines with info about three diff. page types which have problems. # * report of online validation should contain lines with info about three diff. page types which have problems.
# #
# Checked on 3.0.2.32702 (CS/SC/SS), 4.0.0.563 (CS/SC/SS) # Checked on 3.0.2.32702 (CS/SC/SS), 4.0.0.563 (CS/SC/SS)
# #
# [pcisar] 8.12.2021
# Reimplementation does not work as expected on Linux 4.0
# gstat output:
# Data pages: total 97, encrypted 0, non-crypted 97
# Index pages: total 85, encrypted 0, non-crypted 85
# Blob pages: total 199, encrypted 0, non-crypted 199
# Generator pages: total 1, encrypted 0, non-crypted 1
# Validation does not report BLOB page errors, only data and index corruptions.
#
# tracker_id: CORE-5501 # tracker_id: CORE-5501
# min_versions: ['3.0.2'] # min_versions: ['3.0.2']
# versions: 3.0.2 # versions: 3.0.2
# qmid: None # qmid: None
from __future__ import annotations
from typing import Dict
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action import re
from struct import unpack_from
from firebird.qa import db_factory, python_act, Action
from firebird.driver import Connection
# version: 3.0.2 # version: 3.0.2
# resources: None # resources: None
substitutions_1 = [('total \\d+,', 'total'), ('non-crypted \\d+', 'non-crypted'), ('crypted \\d+', 'crypted')] substitutions_1 = [('total \\d+,', 'total'),
('non-crypted \\d+', 'non-crypted'), ('crypted \\d+', 'crypted')]
init_script_1 = """ init_script_1 = """
alter database drop linger; alter database drop linger;
@ -53,55 +68,55 @@ init_script_1 = """
commit; commit;
set count on; set count on;
insert into test(s, b) insert into test(s, b)
select select
rpad( '',1000, uuid_to_char(gen_uuid()) ), rpad( '',1000, uuid_to_char(gen_uuid()) ),
rpad( '', rpad( '',
10000, -- NB: blob should have a big size! It should NOT be stored withih a data page. 10000, -- NB: blob should have a big size! It should NOT be stored withih a data page.
'qwertyuioplkjhgfdsazxcvbnm0987654321') 'qwertyuioplkjhgfdsazxcvbnm0987654321')
from rdb$types from rdb$types
rows 100; rows 100;
commit; commit;
""" """
db_1 = db_factory(sql_dialect=3, init=init_script_1) db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import fdb # import fdb
# import re # import re
# import subprocess # import subprocess
# import time # import time
# from fdb import services # from fdb import services
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# dbnm = db_conn.database_name # dbnm = db_conn.database_name
# #
# so=sys.stdout # so=sys.stdout
# se=sys.stderr # se=sys.stderr
# #
# map_dbo={} # map_dbo={}
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def flush_and_close( file_handle ): # def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync # # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f, # # If you're starting with a Python file object f,
# # first do f.flush(), and # # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os # global os
# #
# file_handle.flush() # file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"! # # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno()) # os.fsync(file_handle.fileno())
# file_handle.close() # file_handle.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def cleanup( f_names_list ): # def cleanup( f_names_list ):
# global os # global os
# for i in range(len( f_names_list )): # for i in range(len( f_names_list )):
@ -112,12 +127,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# else: # else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None # del_name = None
# #
# if del_name and os.path.isfile( del_name ): # if del_name and os.path.isfile( del_name ):
# os.remove( del_name ) # os.remove( del_name )
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def fill_dbo(con, map_dbo): # def fill_dbo(con, map_dbo):
# cur=con.cursor() # cur=con.cursor()
# sql=''' # sql='''
@ -131,9 +146,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# rr.rdb$relation_type rel_type, # rr.rdb$relation_type rel_type,
# rr.rdb$system_flag sys_flag # rr.rdb$system_flag sys_flag
# from rdb$relations rr # from rdb$relations rr
# #
# union all # union all
# #
# select # select
# rr.rdb$relation_id rel_id, -- 0 # rr.rdb$relation_id rel_id, -- 0
# rr.rdb$relation_name rel_name, -- 1 # rr.rdb$relation_name rel_name, -- 1
@ -152,27 +167,27 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# cur.execute(sql) # cur.execute(sql)
# for r in cur: # for r in cur:
# map_dbo[ r[0], r[2] ] = ( r[1].strip(), r[3].strip() ) # map_dbo[ r[0], r[2] ] = ( r[1].strip(), r[3].strip() )
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def parse_page_header(con, page_number, map_dbo): # def parse_page_header(con, page_number, map_dbo):
# #
# from struct import unpack_from # from struct import unpack_from
# #
# global PAGE_TYPES # global PAGE_TYPES
# #
# page_buffer = con.get_page_contents( page_number ) # page_buffer = con.get_page_contents( page_number )
# #
# # dimitr, 20.01.2017 ~13:00 # # dimitr, 20.01.2017 ~13:00
# # all *CHAR = 1 byte, *SHORT = 2 bytes, *LONG = 4 bytes. # # all *CHAR = 1 byte, *SHORT = 2 bytes, *LONG = 4 bytes.
# #
# # https://docs.python.org/2/library/struct.html # # https://docs.python.org/2/library/struct.html
# # struct.unpack_from(fmt, buffer[, offset=0]) # # struct.unpack_from(fmt, buffer[, offset=0])
# # Unpack the buffer according to the given format. # # Unpack the buffer according to the given format.
# # The result is a tuple even if it contains exactly one item. # # The result is a tuple even if it contains exactly one item.
# # The buffer must contain at least the amount of data required by the format # # The buffer must contain at least the amount of data required by the format
# # len(buffer[offset:]) must be at least calcsize(fmt). # # len(buffer[offset:]) must be at least calcsize(fmt).
# # First character of the format string can be used to indicate the byte order, # # First character of the format string can be used to indicate the byte order,
# # size and alignment of the packed data # # size and alignment of the packed data
# # Native byte order is big-endian or little-endian: # # Native byte order is big-endian or little-endian:
# # < little-endian # # < little-endian
@ -181,9 +196,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # Use sys.byteorder to check the endianness of your system: # # Use sys.byteorder to check the endianness of your system:
# # https://docs.python.org/2/library/struct.html#format-characters # # https://docs.python.org/2/library/struct.html#format-characters
# # c char string of length 1 # # c char string of length 1
# # b signed char # # b signed char
# # B unsigned char # # B unsigned char
# # h short # # h short
# # H unsigned short integer # # H unsigned short integer
# # i int integer 4 # # i int integer 4
# # I unsigned int integer 4 # # I unsigned int integer 4
@ -191,15 +206,15 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # L unsigned long (4) # # L unsigned long (4)
# # q long long (8) # # q long long (8)
# # Q unsigned long long # # Q unsigned long long
# #
# (page_type,) = unpack_from('<b',page_buffer) # (page_type,) = unpack_from('<b',page_buffer)
# #
# relation_id=-1 # relation_id=-1
# index_id=-1 # index_id=-1
# segment_cnt=-1 # for Data page: number of record segments on page # segment_cnt=-1 # for Data page: number of record segments on page
# #
# if page_type == 4: # if page_type == 4:
# # POINTER pege: # # POINTER pege:
# # *pag* dpg_header=16, SLONG dpg_sequence=4, SLONG ppg_next=4, USHORT ppg_count=2 ==> 16+4+4+2=26 # # *pag* dpg_header=16, SLONG dpg_sequence=4, SLONG ppg_next=4, USHORT ppg_count=2 ==> 16+4+4+2=26
# # struct pointer_page # # struct pointer_page
# # { # # {
@ -213,10 +228,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # SLONG ppg_page[1]; // Data page vector # # SLONG ppg_page[1]; // Data page vector
# # }; # # };
# (relation_id,) = unpack_from('<H',page_buffer,26) # 'H' ==> USHORT # (relation_id,) = unpack_from('<H',page_buffer,26) # 'H' ==> USHORT
# #
# # ------------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------------
# #
# #
# if page_type == 5: # if page_type == 5:
# # DATA page: # # DATA page:
# # *pag* dpg_header=16, SLONG dpg_sequence=4 ==> 16+4 = 20: # # *pag* dpg_header=16, SLONG dpg_sequence=4 ==> 16+4 = 20:
@ -234,11 +249,11 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # }; # # };
# (relation_id,) = unpack_from('<H',page_buffer,20) # 'H' ==> USHORT # (relation_id,) = unpack_from('<H',page_buffer,20) # 'H' ==> USHORT
# (segment_cnt,) = unpack_from('<H',page_buffer,22) # (segment_cnt,) = unpack_from('<H',page_buffer,22)
# #
# #
# # ------------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------------
# #
# #
# if page_type == 6: # if page_type == 6:
# # Index root page # # Index root page
# # struct index_root_page # # struct index_root_page
@ -246,14 +261,14 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # pag irt_header; # # pag irt_header;
# # USHORT irt_relation; // relation id (for consistency) # # USHORT irt_relation; // relation id (for consistency)
# (relation_id,) = unpack_from('<H',page_buffer,16) # 'H' ==> USHORT # (relation_id,) = unpack_from('<H',page_buffer,16) # 'H' ==> USHORT
# #
# #
# # ------------------------------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------------------------------
# #
# index_id=-1 # index_id=-1
# ix_level=-1 # ix_level=-1
# btr_len=-1 # btr_len=-1
# #
# if page_type == 7: # if page_type == 7:
# # B-tree page ("bucket"): # # B-tree page ("bucket"):
# # struct btree_page # # struct btree_page
@ -272,9 +287,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# (btr_len,) = unpack_from('<H',page_buffer,30) # 'H' ==> USHORT // length of data in bucket # (btr_len,) = unpack_from('<H',page_buffer,30) # 'H' ==> USHORT // length of data in bucket
# (index_id,) = unpack_from('<B',page_buffer,32) # 'B' => UCHAR # (index_id,) = unpack_from('<B',page_buffer,32) # 'B' => UCHAR
# (ix_level,) = unpack_from('<B',page_buffer,33) # (ix_level,) = unpack_from('<B',page_buffer,33)
# #
# #---------------------------------------------------------------------------------------------------------- # #----------------------------------------------------------------------------------------------------------
# #
# if index_id>=0 and (relation_id, index_id) in map_dbo: # if index_id>=0 and (relation_id, index_id) in map_dbo:
# u = map_dbo[ relation_id, index_id ] # u = map_dbo[ relation_id, index_id ]
# page_info = ''.join( ( PAGE_TYPES[page_type].ljust(9), ', ', u[1].strip(),', data_len=',str(btr_len),', lev=',str(ix_level) ) ) # 'Indx Page, <index_name>, <length of data in bucket>' # page_info = ''.join( ( PAGE_TYPES[page_type].ljust(9), ', ', u[1].strip(),', data_len=',str(btr_len),', lev=',str(ix_level) ) ) # 'Indx Page, <index_name>, <length of data in bucket>'
@ -284,24 +299,24 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# page_info = ''.join( ( PAGE_TYPES[page_type].ljust(9),', ',u[0].strip(),', segments on page: ',str(segment_cnt) ) ) # '<table_name>, segments on page: NNN' - for Data page # page_info = ''.join( ( PAGE_TYPES[page_type].ljust(9),', ',u[0].strip(),', segments on page: ',str(segment_cnt) ) ) # '<table_name>, segments on page: NNN' - for Data page
# else: # else:
# page_info = ''.join( ( PAGE_TYPES[page_type].ljust(9),', ',u[0].strip() ) ) # '<table_name>' - for Pointer page # page_info = ''.join( ( PAGE_TYPES[page_type].ljust(9),', ',u[0].strip() ) ) # '<table_name>' - for Pointer page
# #
# elif relation_id == -1: # elif relation_id == -1:
# page_info = PAGE_TYPES[page_type].ljust(9) # page_info = PAGE_TYPES[page_type].ljust(9)
# else: # else:
# page_info = ''.join( ('UNKNOWN; ',PAGE_TYPES[page_type].ljust(9),'; relation_id ', str(relation_id), '; index_id ', str(index_id)) ) # page_info = ''.join( ('UNKNOWN; ',PAGE_TYPES[page_type].ljust(9),'; relation_id ', str(relation_id), '; index_id ', str(index_id)) )
# #
# return (page_type, relation_id, page_info) # return (page_type, relation_id, page_info)
# #
# # end of func parse_page_header # # end of func parse_page_header
# #
# #
# fill_dbo(db_conn, map_dbo) # fill_dbo(db_conn, map_dbo)
# # ('map_dbo:', {(128, -1): ('TEST', ''), (128, 0): ('TEST', 'TEST_S')}) # # ('map_dbo:', {(128, -1): ('TEST', ''), (128, 0): ('TEST', 'TEST_S')})
# #
# sql=''' # sql='''
# select p.rdb$relation_id, p.rdb$page_number # select p.rdb$relation_id, p.rdb$page_number
# from rdb$pages p # from rdb$pages p
# join rdb$relations r on p.rdb$relation_id = r.rdb$relation_id # join rdb$relations r on p.rdb$relation_id = r.rdb$relation_id
# where r.rdb$relation_name=upper('TEST') and p.rdb$page_type = 4 # where r.rdb$relation_name=upper('TEST') and p.rdb$page_type = 4
# order by p.rdb$page_number # order by p.rdb$page_number
# rows 1 # rows 1
@ -311,34 +326,34 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# (rel_id, pp1st) = (-1, -1) # (rel_id, pp1st) = (-1, -1)
# for r in cur: # for r in cur:
# (rel_id, pp1st) = ( r[0], r[1] ) # (128, 192) # (rel_id, pp1st) = ( r[0], r[1] ) # (128, 192)
# #
# PAGE_TYPES = { 0 : "undef/free", # PAGE_TYPES = { 0 : "undef/free",
# 1 : "DB header", # 1 : "DB header",
# 2 : "PIP", # 2 : "PIP",
# 3 : "TIP", # 3 : "TIP",
# 4 : "Pntr Page", # 4 : "Pntr Page",
# 5 : "Data Page", # 5 : "Data Page",
# 6 : "Indx Root", # 6 : "Indx Root",
# 7 : "Indx Data", # 7 : "Indx Data",
# 8 : "Blob Page", # 8 : "Blob Page",
# 9 : "Gens Page", # 9 : "Gens Page",
# 10 : "SCN" # only for ODS>=12 # 10 : "SCN" # only for ODS>=12
# } # }
# #
# #
# res = db_conn.db_info([fdb.isc_info_page_size, fdb.isc_info_allocation]) # res = db_conn.db_info([fdb.isc_info_page_size, fdb.isc_info_allocation])
# pagesAllocated = res[fdb.isc_info_allocation] # pagesAllocated = res[fdb.isc_info_allocation]
# pgsize = res[fdb.isc_info_page_size] # pgsize = res[fdb.isc_info_page_size]
# #
# ################## # ##################
# # Found first page for each of three types: Data, Index and Blob # # Found first page for each of three types: Data, Index and Blob
# # (loop starts from first PointerPage of table 'TEST') # # (loop starts from first PointerPage of table 'TEST')
# ################## # ##################
# #
# (brk_datapage, brk_indxpage, brk_blobpage) = (-1, -1, -1) # (brk_datapage, brk_indxpage, brk_blobpage) = (-1, -1, -1)
# for i in range(pp1st,pagesAllocated): # for i in range(pp1st,pagesAllocated):
# (page_type, relation_id, page_info) = parse_page_header(db_conn, i, map_dbo) # (page_type, relation_id, page_info) = parse_page_header(db_conn, i, map_dbo)
# #print('page:',i, '; page_type:',page_type, '; rel_id:',relation_id,';', page_info) # #print('page:',i, '; page_type:',page_type, '; rel_id:',relation_id,';', page_info)
# if relation_id==128 and page_type == 5: # if relation_id==128 and page_type == 5:
# brk_datapage = i # brk_datapage = i
# if relation_id==128 and page_type == 7: # if relation_id==128 and page_type == 7:
@ -347,93 +362,93 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# brk_blobpage = i # brk_blobpage = i
# if brk_datapage > 0 and brk_indxpage > 0 and brk_blobpage > 0: # if brk_datapage > 0 and brk_indxpage > 0 and brk_blobpage > 0:
# break # break
# #
# db_conn.close() # db_conn.close()
# #
# #
# # Store binary content of .fdb for futher restore: # # Store binary content of .fdb for futher restore:
# ###################### # ######################
# with open(dbnm, 'rb') as f: # with open(dbnm, 'rb') as f:
# raw_db_content=f.read() # raw_db_content=f.read()
# #
# #################### # ####################
# # Make pages damaged # # Make pages damaged
# #################### # ####################
# #
# # 0xFFAACCEEBB0000CC 0xDDEEAADDCC00DDEE # # 0xFFAACCEEBB0000CC 0xDDEEAADDCC00DDEE
# bw=bytearray(b'\\xff\\xaa\\xcc\\xee\\xbb\\x00\\x00\\xcc\\xdd\\xee\\xaa\\xdd\\xcc\\x00\\xdd\\xee') # bw=bytearray(b'\\xff\\xaa\\xcc\\xee\\xbb\\x00\\x00\\xcc\\xdd\\xee\\xaa\\xdd\\xcc\\x00\\xdd\\xee')
# #
# with open(dbnm, 'r+b') as w: # with open(dbnm, 'r+b') as w:
# for brk_page in (brk_datapage, brk_indxpage, brk_blobpage): # for brk_page in (brk_datapage, brk_indxpage, brk_blobpage):
# w.seek( brk_page * pgsize) # w.seek( brk_page * pgsize)
# w.write(bw) # w.write(bw)
# #
# #--------------------------------------------------------------------------- # #---------------------------------------------------------------------------
# #
# ###################### # ######################
# # Validate DB - ensure that there are errors in pages: # # Validate DB - ensure that there are errors in pages:
# ###################### # ######################
# f_onval_log=open( os.path.join(context['temp_directory'],'tmp_onval_c5501.log'), 'w') # f_onval_log=open( os.path.join(context['temp_directory'],'tmp_onval_c5501.log'), 'w')
# subprocess.call([context['fbsvcmgr_path'], 'localhost:service_mgr', 'action_validate', 'dbname', dbnm, 'val_lock_timeout','1'],stdout=f_onval_log, stderr=subprocess.STDOUT) # subprocess.call([context['fbsvcmgr_path'], 'localhost:service_mgr', 'action_validate', 'dbname', dbnm, 'val_lock_timeout','1'],stdout=f_onval_log, stderr=subprocess.STDOUT)
# flush_and_close( f_onval_log ) # flush_and_close( f_onval_log )
# #
# # RESULT: validation log should contain lines with problems about three diff. page types: # # RESULT: validation log should contain lines with problems about three diff. page types:
# # expected data encountered unknown # # expected data encountered unknown
# # expected index B-tree encountered unknown # # expected index B-tree encountered unknown
# # expected blob encountered unknown # # expected blob encountered unknown
# #
# #--------------------------------------------------------------------------- # #---------------------------------------------------------------------------
# #
# f_gstat_log=os.path.join(context['temp_directory'],'tmp_gstat_c5501.log') # f_gstat_log=os.path.join(context['temp_directory'],'tmp_gstat_c5501.log')
# f_gstat_err=os.path.join(context['temp_directory'],'tmp_gstat_c5501.err') # f_gstat_err=os.path.join(context['temp_directory'],'tmp_gstat_c5501.err')
# #
# sys.stdout = open( f_gstat_log, 'w') # sys.stdout = open( f_gstat_log, 'w')
# sys.stderr = open( f_gstat_err, 'w') # sys.stderr = open( f_gstat_err, 'w')
# #
# runProgram('gstat',['-e',dsn]) # runProgram('gstat',['-e',dsn])
# #
# sys.stdout = so # sys.stdout = so
# sys.stderr = se # sys.stderr = se
# #
# #
# # ------------------ # # ------------------
# # restore DB content # # restore DB content
# # ------------------ # # ------------------
# with open(dbnm,'wb') as f: # with open(dbnm,'wb') as f:
# f.write(raw_db_content) # f.write(raw_db_content)
# #
# #
# with open( f_gstat_err, 'r') as f: # with open( f_gstat_err, 'r') as f:
# for line in f: # for line in f:
# print('UNEXPECTED STDERR', line) # print('UNEXPECTED STDERR', line)
# #
# #
# # Data pages: total 63, encrypted 0, non-crypted 63 # # Data pages: total 63, encrypted 0, non-crypted 63
# # Index pages: total 86, encrypted 0, non-crypted 86 # # Index pages: total 86, encrypted 0, non-crypted 86
# # Blob pages: total 199, encrypted 0, non-crypted 199 # # Blob pages: total 199, encrypted 0, non-crypted 199
# # Other pages: total 117, ENCRYPTED 3 (DB problem!), non-crypted 114 <<< __THIS__ should appear after CORE-5501 was fixed. # # Other pages: total 117, ENCRYPTED 3 (DB problem!), non-crypted 114 <<< __THIS__ should appear after CORE-5501 was fixed.
# #
# pages_info_overall_pattern=re.compile('(data|index|blob|other)\\s+pages[:]{0,1}\\s+total[:]{0,1}\\s+\\d+[,]{0,1}\\s+encrypted[:]{0,1}\\s+\\d+.*[,]{0,1}non-crypted[:]{0,1}\\s+\\d+.*', re.IGNORECASE) # pages_info_overall_pattern=re.compile('(data|index|blob|other)\\s+pages[:]{0,1}\\s+total[:]{0,1}\\s+\\d+[,]{0,1}\\s+encrypted[:]{0,1}\\s+\\d+.*[,]{0,1}non-crypted[:]{0,1}\\s+\\d+.*', re.IGNORECASE)
# #
# with open( f_gstat_log, 'r') as f: # with open( f_gstat_log, 'r') as f:
# for line in f: # for line in f:
# if pages_info_overall_pattern.match(line.strip()): # if pages_info_overall_pattern.match(line.strip()):
# print(line.strip()) # print(line.strip())
# #
# #
# ######################################################################## # ########################################################################
# #
# # Validation log should contain following lines: # # Validation log should contain following lines:
# # -------------- # # --------------
# # Error: Page 187 wrong type (expected data encountered unknown (255)) # # Error: Page 187 wrong type (expected data encountered unknown (255))
# # Error: Page 186 wrong type (expected blob encountered unknown (255)) # # Error: Page 186 wrong type (expected blob encountered unknown (255))
# # Warning: Pointer page 180 {sequence 0} bits {0x0A large, secondary} are not consistent with data page 187 {sequence 0} state {0x05 full, swept} # # Warning: Pointer page 180 {sequence 0} bits {0x0A large, secondary} are not consistent with data page 187 {sequence 0} state {0x05 full, swept}
# # Index 1 (TEST_S_UNQ) # # Index 1 (TEST_S_UNQ)
# # Error: Page 184 wrong type (expected index B-tree encountered unknown (255)) # # Error: Page 184 wrong type (expected index B-tree encountered unknown (255))
# # Error: Page 184 wrong type (expected index B-tree encountered unknown (255)) # # Error: Page 184 wrong type (expected index B-tree encountered unknown (255))
# # Relation 128 (TEST) : 4 ERRORS found # # Relation 128 (TEST) : 4 ERRORS found
# #
# # We have to ensure that validation informs about ALL __THREE__ types of problem: # # We have to ensure that validation informs about ALL __THREE__ types of problem:
# # with DataPage, Index B-Tree and BlobPage: # # with DataPage, Index B-Tree and BlobPage:
# ########################################### # ###########################################
# (data_page_problem, indx_page_problem, blob_page_problem) = (-1, -1, -1) # (data_page_problem, indx_page_problem, blob_page_problem) = (-1, -1, -1)
@ -445,28 +460,259 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# indx_page_problem = 1 # indx_page_problem = 1
# if 'expected blob' in line: # if 'expected blob' in line:
# blob_page_problem = 1 # blob_page_problem = 1
# #
# print( 'Detect all THREE page types with problem ? => %s' % ('YES' if (data_page_problem, indx_page_problem, blob_page_problem) == (1,1,1) else 'NO.') ) # print( 'Detect all THREE page types with problem ? => %s' % ('YES' if (data_page_problem, indx_page_problem, blob_page_problem) == (1,1,1) else 'NO.') )
# #
# # Cleanup: # # Cleanup:
# ########## # ##########
# cleanup( (f_gstat_log, f_gstat_err, f_onval_log) ) # cleanup( (f_gstat_log, f_gstat_err, f_onval_log) )
# #
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
Data pages: total 63, encrypted 0, non-crypted 63 Data pages: total 63, encrypted 0, non-crypted 63
Index pages: total 88, encrypted 0, non-crypted 88 Index pages: total 88, encrypted 0, non-crypted 88
Blob pages: total 199, encrypted 0, non-crypted 199 Blob pages: total 199, encrypted 0, non-crypted 199
Other pages: total 115, ENCRYPTED 3 (DB problem!), non-crypted 112 Other pages: total 115, ENCRYPTED 3 (DB problem!), non-crypted 112
Detect all THREE page types with problem ? => YES Detected all THREE page types with problem => YES
""" """
PAGE_TYPES = {0: "undef/free",
1: "DB header",
2: "PIP",
3: "TIP",
4: "Pntr Page",
5: "Data Page",
6: "Indx Root",
7: "Indx Data",
8: "Blob Page",
9: "Gens Page",
10: "SCN" # only for ODS>=12
}
def fill_dbo(con: Connection, map_dbo: Dict):
cur = con.cursor()
sql = """
select rel_id, rel_name, idx_id, idx_name
from (
select
rr.rdb$relation_id rel_id, -- 0
rr.rdb$relation_name rel_name, -- 1
-1 idx_id, -- 2
'' idx_name, -- 3
rr.rdb$relation_type rel_type,
rr.rdb$system_flag sys_flag
from rdb$relations rr
union all
select
rr.rdb$relation_id rel_id, -- 0
rr.rdb$relation_name rel_name, -- 1
coalesce(ri.rdb$index_id-1,-1) idx_id, -- 2
coalesce(ri.rdb$index_name,'') idx_name, -- 3
rr.rdb$relation_type rel_type,
rr.rdb$system_flag sys_flag
from rdb$relations rr
join rdb$indices ri on
rr.rdb$relation_name = ri.rdb$relation_name
) r
where
coalesce(r.rel_type,0) = 0 -- exclude views, GTT and external tables
and r.sys_flag is distinct from 1
"""
cur.execute(sql)
for r in cur:
map_dbo[r[0], r[2]] = (r[1].strip(), r[3].strip())
def parse_page_header(con: Connection, page_number: int, map_dbo: Dict):
page_buffer = con.info.get_page_content(page_number)
# dimitr, 20.01.2017 ~13:00
# all *CHAR = 1 byte, *SHORT = 2 bytes, *LONG = 4 bytes.
# https://docs.python.org/2/library/struct.html
# struct.unpack_from(fmt, buffer[, offset=0])
# Unpack the buffer according to the given format.
# The result is a tuple even if it contains exactly one item.
# The buffer must contain at least the amount of data required by the format
# len(buffer[offset:]) must be at least calcsize(fmt).
# First character of the format string can be used to indicate the byte order,
# size and alignment of the packed data
# Native byte order is big-endian or little-endian:
# < little-endian
# > big-endian
# Intel x86 and AMD64 (x86-64) are little-endian
# Use sys.byteorder to check the endianness of your system:
# https://docs.python.org/2/library/struct.html#format-characters
# c char string of length 1
# b signed char
# B unsigned char
# h short
# H unsigned short integer
# i int integer 4
# I unsigned int integer 4
# l long (4)
# L unsigned long (4)
# q long long (8)
# Q unsigned long long
page_type = unpack_from('<b', page_buffer)[0]
relation_id = -1
index_id = -1
segment_cnt = -1 # for Data page: number of record segments on page
index_id = -1
ix_level = -1
btr_len = -1
if page_type == 4:
# POINTER pege:
# *pag* dpg_header=16, SLONG dpg_sequence=4, SLONG ppg_next=4, USHORT ppg_count=2 ==> 16+4+4+2=26
# struct pointer_page
# {
# pag ppg_header;
# SLONG ppg_sequence; // Sequence number in relation
# SLONG ppg_next; // Next pointer page in relation
# USHORT ppg_count; // Number of slots active
# USHORT ppg_relation; // Relation id
# USHORT ppg_min_space; // Lowest slot with space available
# USHORT ppg_max_space; // Highest slot with space available
# SLONG ppg_page[1]; // Data page vector
# };
relation_id = unpack_from('<H', page_buffer, 26)[0] # 'H' ==> USHORT
elif page_type == 5:
# DATA page:
# *pag* dpg_header=16, SLONG dpg_sequence=4 ==> 16+4 = 20:
# struct data_page
# {
# 16 pag dpg_header;
# 4 SLONG dpg_sequence; // Sequence number in relation
# 2 USHORT dpg_relation; // Relation id
# 2 USHORT dpg_count; // Number of record segments on page
# struct dpg_repeat
# {
# USHORT dpg_offset; // Offset of record fragment
# USHORT dpg_length; // Length of record fragment
# } dpg_rpt[1];
# };
relation_id = unpack_from('<H', page_buffer, 20)[0] # 'H' ==> USHORT
segment_cnt = unpack_from('<H', page_buffer, 22)[0]
elif page_type == 6:
# Index root page
# struct index_root_page
# {
# pag irt_header;
# USHORT irt_relation; // relation id (for consistency)
relation_id = unpack_from('<H', page_buffer, 16)[0] # 'H' ==> USHORT
elif page_type == 7:
# B-tree page ("bucket"):
# struct btree_page
# {
# 16 pag btr_header;
# 4 SLONG btr_sibling; // right sibling page
# 4 SLONG btr_left_sibling; // left sibling page
# 4 SLONG btr_prefix_total; // sum of all prefixes on page
# 2 USHORT btr_relation; // relation id for consistency
# 2 USHORT btr_length; // length of data in bucket
# 1 UCHAR btr_id; // index id for consistency
# 1 UCHAR btr_level; // index level (0 = leaf)
# btree_nod btr_nodes[1];
# };
relation_id = unpack_from('<H', page_buffer, 28)[0] # 'H' ==> USHORT
btr_len = unpack_from('<H', page_buffer, 30)[0] # 'H' ==> USHORT // length of data in bucket
index_id = unpack_from('<B', page_buffer, 32)[0] # 'B' => UCHAR
ix_level = unpack_from('<B', page_buffer, 33)[0]
#
if index_id>=0 and (relation_id, index_id) in map_dbo:
u = map_dbo[ relation_id, index_id ]
page_info = f'{PAGE_TYPES[page_type].ljust(9)}, {u[1].strip()}, data_len={btr_len}, lev={ix_level}'
#page_info = ''.join((PAGE_TYPES[page_type].ljust(9), ', ', u[1].strip(), ', data_len=', str(btr_len), ', lev=', str(ix_level))) # 'Indx Page, <index_name>, <length of data in bucket>'
elif (relation_id, -1) in map_dbo:
u = map_dbo[ relation_id, -1 ]
if page_type == 5:
page_info = f'{PAGE_TYPES[page_type].ljust(9)}, {u[0].strip()}, segments on page: {segment_cnt}'
#page_info = ''.join( ( PAGE_TYPES[page_type].ljust(9),', ',u[0].strip(),', segments on page: ',str(segment_cnt) ) ) # '<table_name>, segments on page: NNN' - for Data page
else:
page_info = f'{PAGE_TYPES[page_type].ljust(9)}, {u[0].strip()}'
#page_info = ''.join( ( PAGE_TYPES[page_type].ljust(9),', ',u[0].strip() ) ) # '<table_name>' - for Pointer page
elif relation_id == -1:
page_info = PAGE_TYPES[page_type].ljust(9)
else:
page_info = f'UNKNOWN; {PAGE_TYPES[page_type].ljust(9)}; relation_id {relation_id}; index_id {index_id}'
#page_info = ''.join( ('UNKNOWN; ',PAGE_TYPES[page_type].ljust(9),'; relation_id ', str(relation_id), '; index_id ', str(index_id)) )
return (page_type, relation_id, page_info)
@pytest.mark.version('>=3.0.2') @pytest.mark.version('>=3.0.2')
@pytest.mark.xfail def test_1(act_1: Action, capsys):
def test_1(db_1): map_dbo = {}
pytest.fail("Test not IMPLEMENTED") sql = """
select p.rdb$relation_id, p.rdb$page_number
from rdb$pages p
join rdb$relations r on p.rdb$relation_id = r.rdb$relation_id
where r.rdb$relation_name=upper('TEST') and p.rdb$page_type = 4
order by p.rdb$page_number
rows 1
"""
with act_1.db.connect() as con:
fill_dbo(con, map_dbo)
c = con.cursor()
rel_id, pp1st = c.execute(sql).fetchone()
# Found first page for each of three types: Data, Index and Blob
# (loop starts from first PointerPage of table 'TEST')
brk_datapage = brk_indxpage = brk_blobpage = -1
for i in range(pp1st, con.info.pages_allocated):
page_type, relation_id, page_info = parse_page_header(con, i, map_dbo)
#print('page:',i, '; page_type:',page_type, '; rel_id:',relation_id,';', page_info)
if relation_id == 128 and page_type == 5:
brk_datapage = i
elif relation_id == 128 and page_type == 7:
brk_indxpage = i
elif page_type == 8:
brk_blobpage = i
if brk_datapage > 0 and brk_indxpage > 0 and brk_blobpage > 0:
break
#
# Store binary content of .fdb for futher restore
raw_db_content = act_1.db.db_path.read_bytes()
# Make pages damaged
# 0xFFAACCEEBB0000CC 0xDDEEAADDCC00DDEE
bw = bytearray(b'\\xff\\xaa\\xcc\\xee\\xbb\\x00\\x00\\xcc\\xdd\\xee\\xaa\\xdd\\xcc\\x00\\xdd\\xee')
with open(act_1.db.db_path, 'r+b') as w:
for brk_page in (brk_datapage, brk_indxpage, brk_blobpage):
w.seek(brk_page * con.info.page_size)
w.write(bw)
#
act_1.gstat(switches=['-e'])
pattern = re.compile('(data|index|blob|other)\\s+pages[:]{0,1}\\s+total[:]{0,1}\\s+\\d+[,]{0,1}\\s+encrypted[:]{0,1}\\s+\\d+.*[,]{0,1}non-crypted[:]{0,1}\\s+\\d+.*', re.IGNORECASE)
for line in act_1.stdout.splitlines():
if pattern.match(line.strip()):
print(line.strip())
# Validate DB - ensure that there are errors in pages
# RESULT: validation log should contain lines with problems about three diff. page types:
# expected data encountered unknown
# expected index B-tree encountered unknown
# expected blob encountered unknown
with act_1.connect_server() as srv:
srv.database.validate(database=act_1.db.db_path, lock_timeout=1)
validation_log = srv.readlines()
# Process validation log
data_page_problem = indx_page_problem = blob_page_problem = False
for line in validation_log:
if 'expected data' in line:
data_page_problem = True
elif 'expected index B-tree' in line:
indx_page_problem = True
elif 'expected blob' in line:
blob_page_problem = True
print(f"Detected all THREE page types with problem => {'YES' if data_page_problem and indx_page_problem and blob_page_problem else 'NO'}")
# restore DB content
act_1.db.db_path.write_bytes(raw_db_content)
# Check
act_1.reset()
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,32 +2,34 @@
# #
# id: bugs.core_5538 # id: bugs.core_5538
# title: DELETE FROM MON$STATEMENTS does not interrupt a longish fetch # title: DELETE FROM MON$STATEMENTS does not interrupt a longish fetch
# decription: # decription:
# We create several tables and add single row to each of them. Row contains name of corresponding table. # We create several tables and add single row to each of them. Row contains name of corresponding table.
# Then we create view that based on UNIONED-query to all of these tables. # Then we create view that based on UNIONED-query to all of these tables.
# After this, we handle list of PATTERNS and pass each of its elements (herteafter its name is: <P>) to # After this, we handle list of PATTERNS and pass each of its elements (herteafter its name is: <P>) to
# '-include_data' gbak command switch. # '-include_data' gbak command switch.
# Further we RESTORE from this .fbk to temporary DB. This new database which contain only those tables # Further we RESTORE from this .fbk to temporary DB. This new database which contain only those tables
# which names matched to '-include_data <P>' pattern on previous step. # which names matched to '-include_data <P>' pattern on previous step.
# We also must check joint usage of '-include_data' and (old) '-skip_data' command switches. # We also must check joint usage of '-include_data' and (old) '-skip_data' command switches.
# For this purpose we create single pattern for EXCLUDING some tables (see 'skip_ptn' variable) and use # For this purpose we create single pattern for EXCLUDING some tables (see 'skip_ptn' variable) and use
# this pattern together with elements from patterns list for tables which data must be included in .fbk. # this pattern together with elements from patterns list for tables which data must be included in .fbk.
# #
# Checked on: 4.0.0.1639 SS: 13.978s. # Checked on: 4.0.0.1639 SS: 13.978s.
# #
# #
# tracker_id: CORE-5538 # tracker_id: CORE-5538
# min_versions: ['4.0'] # min_versions: ['4.0']
# versions: 4.0 # versions: 4.0
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
# version: 4.0 # version: 4.0
# resources: None # resources: None
substitutions_1 = [('[ \t]+', ' ')] substitutions_1 = [('[ \t]+', ' ')]
#substitutions_1 = []
init_script_1 = """ init_script_1 = """
recreate view v_test as select 1 x from rdb$database; recreate view v_test as select 1 x from rdb$database;
@ -76,7 +78,6 @@ init_script_1 = """
; ;
commit; commit;
insert into test_anna default values; insert into test_anna default values;
insert into test_beta default values; insert into test_beta default values;
insert into test_ciao default values; insert into test_ciao default values;
@ -95,47 +96,45 @@ init_script_1 = """
insert into test_won2 default values; insert into test_won2 default values;
insert into test_w_n3 default values; insert into test_w_n3 default values;
commit; commit;
"""
"""
db_1 = db_factory(sql_dialect=3, init=init_script_1) db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import sys # import sys
# import time # import time
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# #
# # dsn localhost/3400:C:\\FBTESTING\\qa # # dsn localhost/3400:C:\\FBTESTING\\qa\\fbt-repo\\tmp\\bugs.core_NNNN.fdb
# bt-repo mpugs.core_NNNN.fdb
# # db_conn.database_name C:\\FBTESTING\\QA\\FBT-REPO\\TMP\\BUGS.CORE_NNNN.FDB # # db_conn.database_name C:\\FBTESTING\\QA\\FBT-REPO\\TMP\\BUGS.CORE_NNNN.FDB
# # $(DATABASE_LOCATION)... C:/FBTESTING/qa/fbt-repo/tmp/bugs.core_NNN.fdb # # $(DATABASE_LOCATION)... C:/FBTESTING/qa/fbt-repo/tmp/bugs.core_NNN.fdb
# #
# this_fdb=db_conn.database_name # this_fdb=db_conn.database_name
# this_fbk=os.path.join(context['temp_directory'],'tmp_5538.fbk') # this_fbk=os.path.join(context['temp_directory'],'tmp_5538.fbk')
# test_res=os.path.join(context['temp_directory'],'tmp_5538.tmp') # test_res=os.path.join(context['temp_directory'],'tmp_5538.tmp')
# #
# db_conn.close() # db_conn.close()
# #
# ############################################## # ##############################################
# # Script for ISQL that will do 'heavy select': # # Script for ISQL that will do 'heavy select':
# #
# usr=user_name # usr=user_name
# pwd=user_password # pwd=user_password
# #
# # 1. Check that we can use patterns for include data only from several selected tables: # # 1. Check that we can use patterns for include data only from several selected tables:
# incl_ptn_list = ('test_doc%', 'test_d(o|u)ra', '%_w(i|o|_)n[[:DIGIT:]]', 'test_a[[:ALPHA:]]{1,}a' ) # incl_ptn_list = ('test_doc%', 'test_d(o|u)ra', '%_w(i|o|_)n[[:DIGIT:]]', 'test_a[[:ALPHA:]]{1,}a' )
# #
# for i, p in enumerate(incl_ptn_list): # for i, p in enumerate(incl_ptn_list):
# runProgram('gbak',['-b', dsn, this_fbk, '-include', p ]) # runProgram('gbak',['-b', dsn, this_fbk, '-include', p ])
# runProgram('gbak',['-rep', this_fbk, 'localhost:'+test_res]) # runProgram('gbak',['-rep', this_fbk, 'localhost:'+test_res])
# sql_check = "set heading off; select %(i)s ptn_indx, q'{%(p)s}' as ptn_text, v.* from v_test v;" % locals() # sql_check = "set heading off; select %(i)s ptn_indx, q'{%(p)s}' as ptn_text, v.* from v_test v;" % locals()
# runProgram('isql',['localhost:'+test_res], sql_check ) # runProgram('isql',['localhost:'+test_res], sql_check )
# #
# # 2. Check interaction between -INCLUDE_DATA and -SKIP_DATA switches for a table: # # 2. Check interaction between -INCLUDE_DATA and -SKIP_DATA switches for a table:
# # We must check only conditions marked by '**': # # We must check only conditions marked by '**':
# # +--------------------------------------------------+ # # +--------------------------------------------------+
@ -147,23 +146,24 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # | MATCH | excluded |**excluded**|**excluded**| # # | MATCH | excluded |**excluded**|**excluded**|
# # | NOT MATCH | included |**included**|**excluded**| # # | NOT MATCH | included |**included**|**excluded**|
# # +-----------+------------+------------+------------+ # # +-----------+------------+------------+------------+
# #
# skip_ptn = 'test_d(o|u)%' # skip_ptn = 'test_d(o|u)%'
# incl_ptn_list = ('test_d%', 'test_(a|b)[[:ALPHA:]]+a', ) # incl_ptn_list = ('test_d%', 'test_(a|b)[[:ALPHA:]]+a', )
# #
# for i, p in enumerate(incl_ptn_list): # for i, p in enumerate(incl_ptn_list):
# runProgram('gbak',['-b', dsn, this_fbk, '-include_data', p, '-skip_data', skip_ptn ]) # runProgram('gbak',['-b', dsn, this_fbk, '-include_data', p, '-skip_data', skip_ptn ])
# runProgram('gbak',['-rep', this_fbk, 'localhost:'+test_res]) # runProgram('gbak',['-rep', this_fbk, 'localhost:'+test_res])
# sql_check = "set heading off; select %(i)s ptn_indx, q'{%(p)s}' as include_ptn, q'{%(skip_ptn)s}' as exclude_ptn, v.* from v_test v;" % locals() # sql_check = "set heading off; select %(i)s ptn_indx, q'{%(p)s}' as include_ptn, q'{%(skip_ptn)s}' as exclude_ptn, v.* from v_test v;" % locals()
# runProgram('isql',['localhost:'+test_res], sql_check ) # runProgram('isql',['localhost:'+test_res], sql_check )
# #
# time.sleep(1) # time.sleep(1)
# os.remove( this_fbk ) # os.remove( this_fbk )
# os.remove( test_res ) # os.remove( test_res )
# #
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
0 test_doc% doca 0 test_doc% doca
@ -179,11 +179,49 @@ expected_stdout_1 = """
0 test_d% test_d(o|u)% dina 0 test_d% test_d(o|u)% dina
1 test_(a|b)[[:ALPHA:]]+a test_d(o|u)% anna 1 test_(a|b)[[:ALPHA:]]+a test_d(o|u)% anna
1 test_(a|b)[[:ALPHA:]]+a test_d(o|u)% beta 1 test_(a|b)[[:ALPHA:]]+a test_d(o|u)% beta
""" """
# this_fbk=os.path.join(context['temp_directory'],'tmp_5538.fbk')
# test_res=os.path.join(context['temp_directory'],'tmp_5538.tmp')
fbk_file_1 = temp_file('core_5538.fbk')
fdb_file_1 = temp_file('core_5538.fdb')
@pytest.mark.version('>=4.0') @pytest.mark.version('>=4.0')
@pytest.mark.xfail def test_1(act_1: Action, fbk_file_1: Path, fdb_file_1: Path, capsys):
def test_1(db_1): # 1. Check that we can use patterns for include data only from several selected tables:
pytest.fail("Test not IMPLEMENTED") for i, p in enumerate(['test_doc%', 'test_d(o|u)ra', '%_w(i|o|_)n[[:DIGIT:]]', 'test_a[[:ALPHA:]]{1,}a']):
act_1.reset()
act_1.gbak(switches=['-b', act_1.db.dsn, str(fbk_file_1), '-include', p])
act_1.reset()
act_1.gbak(switches=['-rep', str(fbk_file_1), f'localhost:{fdb_file_1}'])
act_1.reset()
act_1.isql(switches=[f'localhost:{fdb_file_1}'], connect_db=False,
input=f"set heading off; select {i} ptn_indx, q'{{{p}}}' as ptn_text, v.* from v_test v;")
print(act_1.stdout)
# 2. Check interaction between -INCLUDE_DATA and -SKIP_DATA switches for a table:
# We must check only conditions marked by '**':
# +--------------------------------------------------+
# | | INCLUDE_DATA |
# | |--------------------------------------|
# | SKIP_DATA | NOT SET | MATCH | NOT MATCH |
# +-----------+------------+------------+------------+
# | NOT SET | included | included | excluded | <<< these rules can be skipped in this test
# | MATCH | excluded |**excluded**|**excluded**|
# | NOT MATCH | included |**included**|**excluded**|
# +-----------+------------+------------+------------+
skip_ptn = 'test_d(o|u)%'
for i, p in enumerate(['test_d%', 'test_(a|b)[[:ALPHA:]]+a']):
act_1.reset()
act_1.gbak(switches=['-b', act_1.db.dsn, str(fbk_file_1), '-include_data', p, '-skip_data', skip_ptn])
act_1.reset()
act_1.gbak(switches=['-rep', str(fbk_file_1), f'localhost:{fdb_file_1}'])
act_1.reset()
act_1.isql(switches=[f'localhost:{fdb_file_1}'], connect_db=False,
input=f"set heading off; select {i} ptn_indx, q'{{{p}}}' as include_ptn, q'{{{skip_ptn}}}' as exclude_ptn, v.* from v_test v;")
print(act_1.stdout)
# Check
act_1.reset()
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,25 +2,26 @@
# #
# id: bugs.core_5570 # id: bugs.core_5570
# title: Negative infinity (double) shown incorrectly without sign in isql # title: Negative infinity (double) shown incorrectly without sign in isql
# decription: # decription:
# Bug was in ISQL. We do insert in the table with two DP fields special values which # Bug was in ISQL. We do insert in the table with two DP fields special values which
# are "-1.#INF" and "1.#INF" (at least in such view they are represented in the trace). # are "-1.#INF" and "1.#INF" (at least in such view they are represented in the trace).
# These values are defined in Python class Decimal as literals '-Infinity' and 'Infinity'. # These values are defined in Python class Decimal as literals '-Infinity' and 'Infinity'.
# After this we try to query this table. Expected result: "minus" sign should be shown # After this we try to query this table. Expected result: "minus" sign should be shown
# leftside of negative infinity. # leftside of negative infinity.
# #
# Confirmed WRONG output (w/o sign with negative infinity) on 3.0.3.32756, 4.0.0.690. # Confirmed WRONG output (w/o sign with negative infinity) on 3.0.3.32756, 4.0.0.690.
# All fine on: # All fine on:
# 3.0.3.32794: OK, 1.235s. # 3.0.3.32794: OK, 1.235s.
# 4.0.0.713: OK, 1.203s. # 4.0.0.713: OK, 1.203s.
# #
# tracker_id: CORE-5570 # tracker_id: CORE-5570
# min_versions: ['3.0.3'] # min_versions: ['3.0.3']
# versions: 3.0.3 # versions: 3.0.3
# qmid: # qmid:
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from decimal import Decimal
from firebird.qa import db_factory, python_act, Action
# version: 3.0.3 # version: 3.0.3
# resources: None # resources: None
@ -35,44 +36,48 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# from decimal import * # from decimal import *
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# #
# #
# x1=Decimal('-Infinity') # x1=Decimal('-Infinity')
# y1=Decimal('Infinity') # y1=Decimal('Infinity')
# #
# cur1=db_conn.cursor() # cur1=db_conn.cursor()
# sql='insert into test(x, y) values(?, ?)' # sql='insert into test(x, y) values(?, ?)'
# #
# try: # try:
# cur1.execute( sql, (x1, y1, ) ) # cur1.execute( sql, (x1, y1, ) )
# except Exception, e: # except Exception, e:
# print(e[0]) # print(e[0])
# #
# cur1.close() # cur1.close()
# db_conn.commit() # db_conn.commit()
# db_conn.close() # db_conn.close()
# #
# runProgram('isql',[dsn], "set list on; set count on; select * from test;") # runProgram('isql',[dsn], "set list on; set count on; select * from test;")
# #
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
X -Infinity X -Infinity
Y Infinity Y Infinity
Records affected: 1 Records affected: 1
""" """
@pytest.mark.version('>=3.0.3') @pytest.mark.version('>=3.0.3')
@pytest.mark.xfail def test_1(act_1: Action):
def test_1(db_1): with act_1.db.connect() as con:
pytest.fail("Test not IMPLEMENTED") c = con.cursor()
c.execute('insert into test(x, y) values(?, ?)', [Decimal('-Infinity'), Decimal('Infinity')])
con.commit()
act_1.expected_stdout = expected_stdout_1
act_1.isql(switches=[], input="set list on; set count on; select * from test;")
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,13 +2,13 @@
# #
# id: bugs.core_5576 # id: bugs.core_5576
# title: Bugcheck on queries containing WITH LOCK clause # title: Bugcheck on queries containing WITH LOCK clause
# decription: # decription:
# We create database as it was show in the ticket and do backup and restore of it. # We create database as it was show in the ticket and do backup and restore of it.
# Then we run checking query - launch isql two times and check that 2nd call of ISQL # Then we run checking query - launch isql two times and check that 2nd call of ISQL
# does not raise bugcheck. Finally we run online validation against this DB. # does not raise bugcheck. Finally we run online validation against this DB.
# #
# Neither test query nor validation should raise any output in the STDERR. # Neither test query nor validation should raise any output in the STDERR.
# #
# Confirmed bug on 4.0.0.684 and 3.0.3.32743, got: # Confirmed bug on 4.0.0.684 and 3.0.3.32743, got:
# === # ===
# Statement failed, SQLSTATE = XX000 # Statement failed, SQLSTATE = XX000
@ -23,19 +23,21 @@
# FB40CS, build 4.0.0.685: OK, 5.954s. # FB40CS, build 4.0.0.685: OK, 5.954s.
# FB40SC, build 4.0.0.685: OK, 3.781s. # FB40SC, build 4.0.0.685: OK, 3.781s.
# FB40SS, build 4.0.0.685: OK, 2.828s. # FB40SS, build 4.0.0.685: OK, 2.828s.
# #
# tracker_id: CORE-5576 # tracker_id: CORE-5576
# min_versions: ['3.0.3'] # min_versions: ['3.0.3']
# versions: 3.0.3 # versions: 3.0.3
# qmid: # qmid:
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
# version: 3.0.3 # version: 3.0.3
# resources: None # resources: None
substitutions_1 = [('[0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9]', ''), ('Relation [0-9]{3,4}', 'Relation')] substitutions_1 = [('[0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9]', ''),
('Relation [0-9]{3,4}', 'Relation')]
init_script_1 = """ init_script_1 = """
recreate table test ( recreate table test (
@ -45,43 +47,43 @@ init_script_1 = """
); );
insert into test values (1, 'format1opqwertyuiopqwertyuiop'); insert into test values (1, 'format1opqwertyuiopqwertyuiop');
commit; commit;
""" """
db_1 = db_factory(sql_dialect=3, init=init_script_1) db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# #
# import os # import os
# import subprocess # import subprocess
# from fdb import services # from fdb import services
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# #
# # Obtain engine version: # # Obtain engine version:
# engine = str(db_conn.engine_version) # convert to text because 'float' object has no attribute 'startswith' # engine = str(db_conn.engine_version) # convert to text because 'float' object has no attribute 'startswith'
# #
# db_conn.close() # db_conn.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def flush_and_close( file_handle ): # def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync # # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f, # # If you're starting with a Python file object f,
# # first do f.flush(), and # # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os # global os
# #
# file_handle.flush() # file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"! # # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno()) # os.fsync(file_handle.fileno())
# file_handle.close() # file_handle.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def cleanup( f_names_list ): # def cleanup( f_names_list ):
# global os # global os
# for i in range(len( f_names_list )): # for i in range(len( f_names_list )):
@ -92,44 +94,44 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# else: # else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None # del_name = None
# #
# if del_name and os.path.isfile( del_name ): # if del_name and os.path.isfile( del_name ):
# os.remove( del_name ) # os.remove( del_name )
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# f_bkrs_err = open( os.path.join(context['temp_directory'],'tmp_backup_restore_5576.err'), 'w') # f_bkrs_err = open( os.path.join(context['temp_directory'],'tmp_backup_restore_5576.err'), 'w')
# f_bkup_tmp = os.path.join(context['temp_directory'],'tmp_5576.fbk') # f_bkup_tmp = os.path.join(context['temp_directory'],'tmp_5576.fbk')
# f_rest_tmp = os.path.join(context['temp_directory'],'tmp_5576.fdb') # f_rest_tmp = os.path.join(context['temp_directory'],'tmp_5576.fdb')
# #
# cleanup( (f_bkup_tmp,f_rest_tmp) ) # cleanup( (f_bkup_tmp,f_rest_tmp) )
# #
# fn_nul = open(os.devnull, 'w') # fn_nul = open(os.devnull, 'w')
# subprocess.call( [context['gbak_path'], "-b", dsn, f_bkup_tmp ], # subprocess.call( [context['gbak_path'], "-b", dsn, f_bkup_tmp ],
# stdout = fn_nul, # stdout = fn_nul,
# stderr = f_bkrs_err # stderr = f_bkrs_err
# ) # )
# #
# subprocess.call( [context['gbak_path'], "-rep", f_bkup_tmp, 'localhost:'+f_rest_tmp ], # subprocess.call( [context['gbak_path'], "-rep", f_bkup_tmp, 'localhost:'+f_rest_tmp ],
# stdout = fn_nul, # stdout = fn_nul,
# stderr = f_bkrs_err # stderr = f_bkrs_err
# ) # )
# #
# flush_and_close( f_bkrs_err ) # flush_and_close( f_bkrs_err )
# fn_nul.close() # fn_nul.close()
# #
# #
# script='set list on;select 1 x1 from test where i=1 with lock;' # script='set list on;select 1 x1 from test where i=1 with lock;'
# #
# # Checking query (it did produce bugcheck before fix): # # Checking query (it did produce bugcheck before fix):
# ################ # ################
# runProgram('isql',['localhost:'+f_rest_tmp],script) # runProgram('isql',['localhost:'+f_rest_tmp],script)
# runProgram('isql',['localhost:'+f_rest_tmp],script) # ---------- launch isql SECOND time! # runProgram('isql',['localhost:'+f_rest_tmp],script) # ---------- launch isql SECOND time!
# #
# #
# f_val_log=open( os.path.join(context['temp_directory'],'tmp_val_5576.log'), "w") # f_val_log=open( os.path.join(context['temp_directory'],'tmp_val_5576.log'), "w")
# f_val_err=open( os.path.join(context['temp_directory'],'tmp_val_5576.err'), "w") # f_val_err=open( os.path.join(context['temp_directory'],'tmp_val_5576.err'), "w")
# #
# subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr", # subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr",
# "action_validate", # "action_validate",
# "dbname", f_rest_tmp # "dbname", f_rest_tmp
@ -138,10 +140,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stderr=f_val_err) # stderr=f_val_err)
# flush_and_close( f_val_log ) # flush_and_close( f_val_log )
# flush_and_close( f_val_err ) # flush_and_close( f_val_err )
# #
# with open( f_val_log.name,'r') as f: # with open( f_val_log.name,'r') as f:
# print(f.read()) # print(f.read())
# #
# # Check that neither restore nor validation raised errors: # # Check that neither restore nor validation raised errors:
# ################### # ###################
# f_list=(f_bkrs_err, f_val_err) # f_list=(f_bkrs_err, f_val_err)
@ -150,29 +152,50 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# for line in f: # for line in f:
# if line.split(): # if line.split():
# print( 'UNEXPECTED STDERR in file '+f_list[i].name+': '+line.upper() ) # print( 'UNEXPECTED STDERR in file '+f_list[i].name+': '+line.upper() )
# #
# #
# # Cleanup # # Cleanup
# ######### # #########
# cleanup( (f_bkrs_err, f_val_log, f_val_err, f_bkup_tmp, f_rest_tmp) ) # cleanup( (f_bkrs_err, f_val_log, f_val_err, f_bkup_tmp, f_rest_tmp) )
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """ act_1 = python_act('db_1', substitutions=substitutions_1)
X1 1
expected_stdout_1_a = """
X1 1 X1 1
"""
expected_stdout_1_b = """
Validation started Validation started
Relation 128 (TEST) Relation 128 (TEST)
process pointer page 0 of 1 process pointer page 0 of 1
Index 1 (RDB$PRIMARY1) Index 1 (RDB$PRIMARY1)
Relation 128 (TEST) is ok Relation 128 (TEST) is ok
Validation finished Validation finished
""" """
fbk_file_1 = temp_file('core_5576.fbk')
fdb_file_1 = temp_file('core_5576.fdb')
@pytest.mark.version('>=3.0.3') @pytest.mark.version('>=3.0.3')
@pytest.mark.xfail def test_1(act_1: Action, fbk_file_1: Path, fdb_file_1: Path):
def test_1(db_1): act_1.gbak(switches=['-b', act_1.db.dsn, str(fbk_file_1)])
pytest.fail("Test not IMPLEMENTED") act_1.reset()
act_1.gbak(switches=['-rep', str(fbk_file_1), f'localhost:{fdb_file_1}'])
#
for i in range(2): # Run isql twice!
act_1.reset()
act_1.expected_stdout = expected_stdout_1_a
act_1.isql(switches=[f'localhost:{fdb_file_1}'], connect_db=False,
input='set list on;select 1 x1 from test where i=1 with lock;')
assert act_1.clean_stdout == act_1.clean_expected_stdout
# Validate the database
act_1.reset()
act_1.expected_stdout = expected_stdout_1_b
with act_1.connect_server() as srv:
srv.database.validate(database=fdb_file_1)
act_1.stdout = ''.join(srv.readlines())
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,7 +2,7 @@
# #
# id: bugs.core_5579 # id: bugs.core_5579
# title: request synchronization error in the GBAK utility (restore) # title: request synchronization error in the GBAK utility (restore)
# decription: # decription:
# Database for this test was created beforehand on 2.5.7 with intentionally broken not null constraint. # Database for this test was created beforehand on 2.5.7 with intentionally broken not null constraint.
# It was done using direct RDB$ table modification: # It was done using direct RDB$ table modification:
# --- # ---
@ -15,9 +15,9 @@
# where rdb$field_name = upper('fn') and rdb$relation_name = upper('test'); # where rdb$field_name = upper('fn') and rdb$relation_name = upper('test');
# commit; # commit;
# --- # ---
# We try to restore .fbk which was created from that DB on current FB snapshot and check that restore log # We try to restore .fbk which was created from that DB on current FB snapshot and check that restore log
# does NOT contain phrase 'request synchronization' in any line. # does NOT contain phrase 'request synchronization' in any line.
# #
# Bug was reproduced on 2.5.7.27062, 3.0.3.32746, 4.0.0.684 # Bug was reproduced on 2.5.7.27062, 3.0.3.32746, 4.0.0.684
# All fine on: # All fine on:
# FB25Cs, build 2.5.8.27067: OK, 2.125s. # FB25Cs, build 2.5.8.27067: OK, 2.125s.
@ -31,14 +31,18 @@
# 13.04.2021. Adapted for run both on Windows and Linux. Checked on: # 13.04.2021. Adapted for run both on Windows and Linux. Checked on:
# Windows: 4.0.0.2416 # Windows: 4.0.0.2416
# Linux: 4.0.0.2416 # Linux: 4.0.0.2416
# #
# tracker_id: CORE-5579 # tracker_id: CORE-5579
# min_versions: ['2.5.8'] # min_versions: ['2.5.8']
# versions: 2.5.8 # versions: 2.5.8
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action import re
import zipfile
from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
from firebird.driver import SrvRestoreFlag
# version: 2.5.8 # version: 2.5.8
# resources: None # resources: None
@ -51,34 +55,34 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import time # import time
# import zipfile # import zipfile
# import subprocess # import subprocess
# import re # import re
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# db_conn.close() # db_conn.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def flush_and_close( file_handle ): # def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync # # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f, # # If you're starting with a Python file object f,
# # first do f.flush(), and # # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os # global os
# #
# file_handle.flush() # file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"! # # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno()) # os.fsync(file_handle.fileno())
# file_handle.close() # file_handle.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def cleanup( f_names_list ): # def cleanup( f_names_list ):
# global os # global os
# for i in range(len( f_names_list )): # for i in range(len( f_names_list )):
@ -90,28 +94,28 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# print('type(f_names_list[i])=',type(f_names_list[i])) # print('type(f_names_list[i])=',type(f_names_list[i]))
# del_name = None # del_name = None
# #
# if del_name and os.path.isfile( del_name ): # if del_name and os.path.isfile( del_name ):
# os.remove( del_name ) # os.remove( del_name )
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# zf = zipfile.ZipFile( os.path.join(context['files_location'],'core_5579_broken_nn.zip') ) # zf = zipfile.ZipFile( os.path.join(context['files_location'],'core_5579_broken_nn.zip') )
# #
# # Name of .fbk inside .zip: # # Name of .fbk inside .zip:
# zipfbk='core_5579_broken_nn.fbk' # zipfbk='core_5579_broken_nn.fbk'
# #
# zf.extract( zipfbk, context['temp_directory'] ) # zf.extract( zipfbk, context['temp_directory'] )
# zf.close() # zf.close()
# #
# tmpfbk=''.join( ( context['temp_directory'], zipfbk ) ) # tmpfbk=''.join( ( context['temp_directory'], zipfbk ) )
# tmpfdb=''.join( ( context['temp_directory'], 'core_5579_broken_nn.fdb') ) # tmpfdb=''.join( ( context['temp_directory'], 'core_5579_broken_nn.fdb') )
# #
# f_restore_log=open( os.path.join(context['temp_directory'],'tmp_restore_5579.log'), 'w') # f_restore_log=open( os.path.join(context['temp_directory'],'tmp_restore_5579.log'), 'w')
# f_restore_err=open( os.path.join(context['temp_directory'],'tmp_restore_5579.err'), 'w') # f_restore_err=open( os.path.join(context['temp_directory'],'tmp_restore_5579.err'), 'w')
# #
# cleanup( (tmpfdb,) ) # cleanup( (tmpfdb,) )
# #
# subprocess.call([ context['fbsvcmgr_path'], # subprocess.call([ context['fbsvcmgr_path'],
# "localhost:service_mgr", # "localhost:service_mgr",
# "action_restore", # "action_restore",
@ -124,10 +128,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# ) # )
# # before this ticket was fixed restore log did contain following line: # # before this ticket was fixed restore log did contain following line:
# # gbak: ERROR:request synchronization error # # gbak: ERROR:request synchronization error
# #
# flush_and_close( f_restore_log ) # flush_and_close( f_restore_log )
# flush_and_close( f_restore_err ) # flush_and_close( f_restore_err )
# #
# # Check: # # Check:
# ######## # ########
# # 1. fbsvcmgr itself must finish without errors: # # 1. fbsvcmgr itself must finish without errors:
@ -135,32 +139,43 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# for line in f: # for line in f:
# if line.split(): # if line.split():
# print( 'UNEXPECTED STDERR in file '+f_restore_err.name+': '+line.upper() ) # print( 'UNEXPECTED STDERR in file '+f_restore_err.name+': '+line.upper() )
# #
# # 2. Log of restoring process must NOT contain line with phrase 'request synchronization': # # 2. Log of restoring process must NOT contain line with phrase 'request synchronization':
# #
# req_sync_pattern=re.compile('[.*]*request\\s+synchronization\\s+error\\.*', re.IGNORECASE) # req_sync_pattern=re.compile('[.*]*request\\s+synchronization\\s+error\\.*', re.IGNORECASE)
# #
# with open( f_restore_log.name,'r') as f: # with open( f_restore_log.name,'r') as f:
# for line in f: # for line in f:
# if req_sync_pattern.search(line): # if req_sync_pattern.search(line):
# print( 'UNEXPECTED STDLOG: '+line.upper() ) # print( 'UNEXPECTED STDLOG: '+line.upper() )
# #
# ##################################################################### # #####################################################################
# # Cleanup: # # Cleanup:
# #
# # do NOT remove this pause otherwise some of logs will not be enable for deletion and test will finish with # # do NOT remove this pause otherwise some of logs will not be enable for deletion and test will finish with
# # Exception raised while executing Python test script. exception: WindowsError: 32 # # Exception raised while executing Python test script. exception: WindowsError: 32
# time.sleep(1) # time.sleep(1)
# cleanup( (f_restore_log, f_restore_err, tmpfdb, tmpfbk) ) # cleanup( (f_restore_log, f_restore_err, tmpfdb, tmpfbk) )
# #
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
fbk_file_1 = temp_file('core_5579_broken_nn.fbk')
fdb_file_1 = temp_file('core_5579_broken_nn.fdb')
@pytest.mark.version('>=2.5.8') @pytest.mark.version('>=2.5.8')
@pytest.mark.xfail def test_1(act_1: Action, fdb_file_1: Path, fbk_file_1: Path):
def test_1(db_1): pattern = re.compile('[.*]*request\\s+synchronization\\s+error\\.*', re.IGNORECASE)
pytest.fail("Test not IMPLEMENTED") zipped_fbk_file = zipfile.Path(act_1.vars['files'] / 'core_5579_broken_nn.zip',
at='core_5579_broken_nn.fbk')
fbk_file_1.write_bytes(zipped_fbk_file.read_bytes())
with act_1.connect_server() as srv:
srv.database.restore(database=fdb_file_1, backup=fbk_file_1,
flags=SrvRestoreFlag.ONE_AT_A_TIME | SrvRestoreFlag.CREATE)
# before this ticket was fixed restore fails with: request synchronization error
for line in srv:
if pattern.search(line):
pytest.fail(f'RESTORE ERROR: {line}')

View File

@ -2,7 +2,7 @@
# #
# id: bugs.core_5598 # id: bugs.core_5598
# title: Error "block size exceeds implementation restriction" while inner joining large datasets with a long key using the HASH JOIN plan # title: Error "block size exceeds implementation restriction" while inner joining large datasets with a long key using the HASH JOIN plan
# decription: # decription:
# Hash join have to operate with keys of total length >= 1 Gb if we want to reproduce runtime error # Hash join have to operate with keys of total length >= 1 Gb if we want to reproduce runtime error
# "Statement failed, SQLSTATE = HY001 / unable to allocate memory from operating system" # "Statement failed, SQLSTATE = HY001 / unable to allocate memory from operating system"
# If test table that serves as the source for HJ has record length about 65 Kb than not less than 16K records must be added there. # If test table that serves as the source for HJ has record length about 65 Kb than not less than 16K records must be added there.
@ -11,7 +11,7 @@
# Than we add into this table >= 16Kb rows of unicode (NON-ascii!) characters. # Than we add into this table >= 16Kb rows of unicode (NON-ascii!) characters.
# Finally, we launch query against this table and this query will use hash join because of missed indices. # Finally, we launch query against this table and this query will use hash join because of missed indices.
# We have to check that NO errors occured during this query. # We have to check that NO errors occured during this query.
# #
# Discuss with dimitr: letters 08-jan-2018 .. 06-feb-2018. # Discuss with dimitr: letters 08-jan-2018 .. 06-feb-2018.
# Confirmed bug on: # Confirmed bug on:
# 3.0.3.32838 # 3.0.3.32838
@ -19,19 +19,20 @@
# Works fine on: # Works fine on:
# 3.0.4.32939 (SS, CS) - time ~ 29-32" # 3.0.4.32939 (SS, CS) - time ~ 29-32"
# 4.0.0.945 (SS, CS) - time ~ 29-32" # 4.0.0.945 (SS, CS) - time ~ 29-32"
# #
# tracker_id: CORE-5598 # tracker_id: CORE-5598
# min_versions: ['3.0.3'] # min_versions: ['3.0.3']
# versions: 3.0.3 # versions: 3.0.3
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from firebird.qa import db_factory, python_act, Action
# version: 3.0.3 # version: 3.0.3
# resources: None # resources: None
substitutions_1 = [('[ \t]+', ' '), ('.*RECORD LENGTH:[ \t]+[\\d]+[ \t]*\\)', ''), ('.*COUNT[ \t]+[\\d]+', '')] substitutions_1 = [('[ \t]+', ' '), ('.*RECORD LENGTH:[ \t]+[\\d]+[ \t]*\\)', ''),
('.*COUNT[ \t]+[\\d]+', '')]
init_script_1 = """""" init_script_1 = """"""
@ -45,37 +46,37 @@ db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1)
# from subprocess import Popen # from subprocess import Popen
# from fdb import services # from fdb import services
# import time # import time
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# fdb_file=db_conn.database_name # fdb_file=db_conn.database_name
# db_conn.close() # db_conn.close()
# #
# # Threshold: minimal records that is to be inserted in order to reproduce runtime exception # # Threshold: minimal records that is to be inserted in order to reproduce runtime exception
# # 'unable to allocate memory from OS': # # 'unable to allocate memory from OS':
# MIN_RECS_TO_ADD = 17000 # MIN_RECS_TO_ADD = 17000
# #
# fbs = fdb.services.connect( host = 'localhost:service_mgr' ) # fbs = fdb.services.connect( host = 'localhost:service_mgr' )
# fbs.set_write_mode( database = fdb_file, mode = fdb.services.WRITE_BUFFERED ) # fbs.set_write_mode( database = fdb_file, mode = fdb.services.WRITE_BUFFERED )
# fbs.close() # fbs.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def flush_and_close( file_handle ): # def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync # # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f, # # If you're starting with a Python file object f,
# # first do f.flush(), and # # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os # global os
# #
# file_handle.flush() # file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"! # # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno()) # os.fsync(file_handle.fileno())
# file_handle.close() # file_handle.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def cleanup( f_names_list ): # def cleanup( f_names_list ):
# global os # global os
# for i in range(len( f_names_list )): # for i in range(len( f_names_list )):
@ -86,12 +87,12 @@ db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1)
# else: # else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None # del_name = None
# #
# if del_name and os.path.isfile( del_name ): # if del_name and os.path.isfile( del_name ):
# os.remove( del_name ) # os.remove( del_name )
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# db_conn=fdb.connect(dsn = dsn, charset = 'utf8') # db_conn=fdb.connect(dsn = dsn, charset = 'utf8')
# db_conn.execute_immediate( 'create table test(id int, s varchar(8191))' ) # db_conn.execute_immediate( 'create table test(id int, s varchar(8191))' )
# db_conn.commit() # db_conn.commit()
@ -99,15 +100,15 @@ db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1)
# cur.execute( "insert into test( id, s ) select row_number()over(), lpad('', 8191, 'Алексей, Łukasz, Máté, François, Jørgen, Νικόλαος') from rdb$types,rdb$types rows %d" % MIN_RECS_TO_ADD) # cur.execute( "insert into test( id, s ) select row_number()over(), lpad('', 8191, 'Алексей, Łukasz, Máté, François, Jørgen, Νικόλαος') from rdb$types,rdb$types rows %d" % MIN_RECS_TO_ADD)
# db_conn.commit() # db_conn.commit()
# db_conn.close() # db_conn.close()
# #
# isql_cmd=''' # isql_cmd='''
# set list on; # set list on;
# --show version; # --show version;
# set explain on; # set explain on;
# select count(*) from test a join test b using(id, s); # select count(*) from test a join test b using(id, s);
# set explain off; # set explain off;
# quit; # quit;
# select # select
# m.MON$STAT_ID # m.MON$STAT_ID
# ,m.MON$STAT_GROUP # ,m.MON$STAT_GROUP
# ,m.MON$MEMORY_USED # ,m.MON$MEMORY_USED
@ -115,26 +116,26 @@ db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1)
# ,m.MON$MAX_MEMORY_USED # ,m.MON$MAX_MEMORY_USED
# ,m.MON$MAX_MEMORY_ALLOCATED # ,m.MON$MAX_MEMORY_ALLOCATED
# from mon$database d join mon$memory_usage m using (MON$STAT_ID) # from mon$database d join mon$memory_usage m using (MON$STAT_ID)
# ; # ;
# ''' # '''
# #
# isql_run=open( os.path.join(context['temp_directory'],'tmp_isql_5596.sql'), 'w') # isql_run=open( os.path.join(context['temp_directory'],'tmp_isql_5596.sql'), 'w')
# isql_run.write( isql_cmd ) # isql_run.write( isql_cmd )
# isql_run.close() # isql_run.close()
# #
# #----------------------------------- # #-----------------------------------
# isql_log=open( os.path.join(context['temp_directory'],'tmp_isql_5596.log'), 'w') # isql_log=open( os.path.join(context['temp_directory'],'tmp_isql_5596.log'), 'w')
# isql_err=open( os.path.join(context['temp_directory'],'tmp_isql_5596.err'), 'w') # isql_err=open( os.path.join(context['temp_directory'],'tmp_isql_5596.err'), 'w')
# #
# p_isql = subprocess.call( [context['isql_path'], dsn, '-i', isql_run.name ], stdout=isql_log, stderr=isql_err ) # p_isql = subprocess.call( [context['isql_path'], dsn, '-i', isql_run.name ], stdout=isql_log, stderr=isql_err )
# #
# flush_and_close( isql_log ) # flush_and_close( isql_log )
# flush_and_close( isql_err ) # flush_and_close( isql_err )
# #
# #
# # do NOT remove this delay: # # do NOT remove this delay:
# time.sleep(1) # time.sleep(1)
# #
# # STDOUT must contain: # # STDOUT must contain:
# # Select Expression # # Select Expression
# # -> Aggregate # # -> Aggregate
@ -145,40 +146,68 @@ db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1)
# # -> Table "TEST" as "A" Full Scan # # -> Table "TEST" as "A" Full Scan
# # # #
# # COUNT 17000 # # COUNT 17000
# #
# with open(isql_log.name,'r') as f: # with open(isql_log.name,'r') as f:
# for line in f: # for line in f:
# if line.rstrip(): # if line.rstrip():
# print('STDOUT:' + line.upper() ) # print('STDOUT:' + line.upper() )
# #
# with open(isql_err.name,'r') as f: # with open(isql_err.name,'r') as f:
# for line in f: # for line in f:
# if line.rstrip(): # if line.rstrip():
# print('UNEXPECTED STDERR:' + line.upper() ) # print('UNEXPECTED STDERR:' + line.upper() )
# #
# #
# # cleanup: # # cleanup:
# ########## # ##########
# time.sleep(1) # time.sleep(1)
# cleanup( ( isql_run, isql_log, isql_err ) ) # cleanup( ( isql_run, isql_log, isql_err ) )
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
STDOUT:SELECT EXPRESSION SELECT EXPRESSION
STDOUT: -> AGGREGATE -> AGGREGATE
STDOUT: -> FILTER -> FILTER
STDOUT: -> HASH JOIN (INNER) -> HASH JOIN (INNER)
STDOUT: -> TABLE "TEST" AS "B" FULL SCAN -> TABLE "TEST" AS "B" FULL SCAN
STDOUT: -> RECORD BUFFER (RECORD LENGTH: 32793) -> RECORD BUFFER (RECORD LENGTH: 32793)
STDOUT: -> TABLE "TEST" AS "A" FULL SCAN -> TABLE "TEST" AS "A" FULL SCAN
STDOUT:COUNT 17000 COUNT 17000
""" """
MIN_RECS_TO_ADD = 17000
test_script_1 = """
set list on;
--show version;
set explain on;
select count(*) from test a join test b using(id, s);
set explain off;
quit;
select
m.MON$STAT_ID
,m.MON$STAT_GROUP
,m.MON$MEMORY_USED
,m.MON$MEMORY_ALLOCATED
,m.MON$MAX_MEMORY_USED
,m.MON$MAX_MEMORY_ALLOCATED
from mon$database d join mon$memory_usage m using (MON$STAT_ID);
"""
@pytest.mark.version('>=3.0.3') @pytest.mark.version('>=3.0.3')
@pytest.mark.xfail def test_1(act_1: Action):
def test_1(db_1): act_1.db.set_async_write()
pytest.fail("Test not IMPLEMENTED") with act_1.db.connect(charset='utf8') as con:
con.execute_immediate('create table test(id int, s varchar(8191))')
con.commit()
c = con.cursor()
c.execute(f"insert into test(id, s) select row_number()over(), lpad('', 8191, 'Алексей, Łukasz, Máté, François, Jørgen, Νικόλαος') from rdb$types,rdb$types rows {MIN_RECS_TO_ADD}")
con.commit()
#
act_1.expected_stdout = expected_stdout_1
act_1.isql(switches=[], input=test_script_1)
act_1.stdout = act_1.stdout.upper()
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,28 +2,32 @@
# #
# id: bugs.core_5618 # id: bugs.core_5618
# title: Part of the pages of the second level blobs is not released when deleting relations. # title: Part of the pages of the second level blobs is not released when deleting relations.
# decription: # decription:
# We create table with blob field and write into it binary data with length that # We create table with blob field and write into it binary data with length that
# is too big to store such blob as level-0 and level-1. Filling is implemented as # is too big to store such blob as level-0 and level-1. Filling is implemented as
# specified in: # specified in:
# http://pythonhosted.org/fdb/differences-from-kdb.html#stream-blobs # http://pythonhosted.org/fdb/differences-from-kdb.html#stream-blobs
# Then we drop table and close connection. # Then we drop table and close connection.
# Finally, we obtain firebird.log, run full validation (like 'gfix -v -full' does) and get firebird.log again. # Finally, we obtain firebird.log, run full validation (like 'gfix -v -full' does) and get firebird.log again.
# Comparison of two firebird.log versions should give only one difference related to warnings, and they count # Comparison of two firebird.log versions should give only one difference related to warnings, and they count
# must be equal to 0. # must be equal to 0.
# #
# Reproduced on 3.0.3.32837, got lot of warnings in firebird.log when did usual validation ('gfix -v -full ...') # Reproduced on 3.0.3.32837, got lot of warnings in firebird.log when did usual validation ('gfix -v -full ...')
# Checked on: # Checked on:
# 30SS, build 3.0.3.32856: OK, 4.047s. # 30SS, build 3.0.3.32856: OK, 4.047s.
# 40SS, build 4.0.0.834: OK, 8.266s. # 40SS, build 4.0.0.834: OK, 8.266s.
# #
# tracker_id: CORE-5618 # tracker_id: CORE-5618
# min_versions: ['3.0.3'] # min_versions: ['3.0.3']
# versions: 3.0.3 # versions: 3.0.3
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action import zipfile
from difflib import unified_diff
from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
from firebird.driver import SrvRepairFlag
# version: 3.0.3 # version: 3.0.3
# resources: None # resources: None
@ -32,41 +36,41 @@ substitutions_1 = []
init_script_1 = """ init_script_1 = """
recreate table test(b blob sub_type 0); recreate table test(b blob sub_type 0);
""" """
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1) db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import subprocess # import subprocess
# import zipfile # import zipfile
# import difflib # import difflib
# import time # import time
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# this_fdb = db_conn.database_name # this_fdb = db_conn.database_name
# db_conn.close() # db_conn.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def flush_and_close( file_handle ): # def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync # # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f, # # If you're starting with a Python file object f,
# # first do f.flush(), and # # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os # global os
# #
# file_handle.flush() # file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"! # # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno()) # os.fsync(file_handle.fileno())
# file_handle.close() # file_handle.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def cleanup( f_names_list ): # def cleanup( f_names_list ):
# global os # global os
# for i in range(len( f_names_list )): # for i in range(len( f_names_list )):
@ -77,16 +81,16 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
# else: # else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None # del_name = None
# #
# if del_name and os.path.isfile( del_name ): # if del_name and os.path.isfile( del_name ):
# os.remove( del_name ) # os.remove( del_name )
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def svc_get_fb_log( f_fb_log ): # def svc_get_fb_log( f_fb_log ):
# #
# import subprocess # import subprocess
# #
# subprocess.call([ context['fbsvcmgr_path'], # subprocess.call([ context['fbsvcmgr_path'],
# "localhost:service_mgr", # "localhost:service_mgr",
# "action_get_fb_log" # "action_get_fb_log"
@ -94,120 +98,143 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
# stdout=f_fb_log, stderr=subprocess.STDOUT # stdout=f_fb_log, stderr=subprocess.STDOUT
# ) # )
# return # return
# #
# #
# ##################################################################### # #####################################################################
# # Move database to FW = OFF in order to increase speed of insertions and output its header info: # # Move database to FW = OFF in order to increase speed of insertions and output its header info:
# #
# fwoff_log=open( os.path.join(context['temp_directory'],'tmp_fw_off_5618.log'), 'w') # fwoff_log=open( os.path.join(context['temp_directory'],'tmp_fw_off_5618.log'), 'w')
# subprocess.call( [ context['fbsvcmgr_path'], "localhost:service_mgr", # subprocess.call( [ context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_properties", # "action_properties",
# "prp_write_mode", "prp_wm_async", # "prp_write_mode", "prp_wm_async",
# "dbname", this_fdb # "dbname", this_fdb
# ], # ],
# stdout=fwoff_log, # stdout=fwoff_log,
# stderr=subprocess.STDOUT # stderr=subprocess.STDOUT
# ) # )
# flush_and_close( fwoff_log ) # flush_and_close( fwoff_log )
# #
# zf = zipfile.ZipFile( os.path.join(context['files_location'],'core_5618.zip') ) # zf = zipfile.ZipFile( os.path.join(context['files_location'],'core_5618.zip') )
# blob_src = 'core_5618.bin' # blob_src = 'core_5618.bin'
# zf.extract( blob_src, '$(DATABASE_LOCATION)') # zf.extract( blob_src, '$(DATABASE_LOCATION)')
# zf.close() # zf.close()
# #
# con1 = fdb.connect(dsn = dsn) # con1 = fdb.connect(dsn = dsn)
# #
# cur1=con1.cursor() # cur1=con1.cursor()
# blob_src = ''.join( ('$(DATABASE_LOCATION)', blob_src) ) # blob_src = ''.join( ('$(DATABASE_LOCATION)', blob_src) )
# #
# blob_handle = open( blob_src, 'rb') # blob_handle = open( blob_src, 'rb')
# cur1.execute('insert into test(b) values(?)',[blob_handle]) # cur1.execute('insert into test(b) values(?)',[blob_handle])
# blob_handle.close() # blob_handle.close()
# #
# cur1.close() # cur1.close()
# con1.execute_immediate('drop table test'); # con1.execute_immediate('drop table test');
# con1.commit() # con1.commit()
# con1.close() # con1.close()
# #
# #
# f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_4337_fblog_before.txt'), 'w') # f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_4337_fblog_before.txt'), 'w')
# svc_get_fb_log( f_fblog_before ) # svc_get_fb_log( f_fblog_before )
# flush_and_close( f_fblog_before ) # flush_and_close( f_fblog_before )
# #
# #
# ########################################################## # ##########################################################
# # Run full validation (this is what 'gfix -v -full' does): # # Run full validation (this is what 'gfix -v -full' does):
# #
# val_log=open( os.path.join(context['temp_directory'],'tmp_onval_5618.log'), 'w') # val_log=open( os.path.join(context['temp_directory'],'tmp_onval_5618.log'), 'w')
# #
# subprocess.call( [context['fbsvcmgr_path'], "localhost:service_mgr", # subprocess.call( [context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_repair", # "action_repair",
# "rpr_validate_db", # "rpr_validate_db",
# "rpr_full", # "rpr_full",
# "dbname", this_fdb # "dbname", this_fdb
# ], # ],
# stdout=val_log, # stdout=val_log,
# stderr=subprocess.STDOUT # stderr=subprocess.STDOUT
# ) # )
# #
# flush_and_close( val_log ) # flush_and_close( val_log )
# #
# #
# # Get content of firebird.log AFTER test finish. # # Get content of firebird.log AFTER test finish.
# ############################# # #############################
# #
# f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_4337_fblog_after.txt'), 'w') # f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_4337_fblog_after.txt'), 'w')
# svc_get_fb_log( f_fblog_after ) # svc_get_fb_log( f_fblog_after )
# flush_and_close( f_fblog_after ) # flush_and_close( f_fblog_after )
# #
# #
# # Compare firebird.log versions BEFORE and AFTER this test: # # Compare firebird.log versions BEFORE and AFTER this test:
# ###################### # ######################
# #
# oldfb=open(f_fblog_before.name, 'r') # oldfb=open(f_fblog_before.name, 'r')
# newfb=open(f_fblog_after.name, 'r') # newfb=open(f_fblog_after.name, 'r')
# #
# difftext = ''.join(difflib.unified_diff( # difftext = ''.join(difflib.unified_diff(
# oldfb.readlines(), # oldfb.readlines(),
# newfb.readlines() # newfb.readlines()
# )) # ))
# oldfb.close() # oldfb.close()
# newfb.close() # newfb.close()
# #
# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_4337_diff.txt'), 'w') # f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_4337_diff.txt'), 'w')
# f_diff_txt.write(difftext) # f_diff_txt.write(difftext)
# flush_and_close( f_diff_txt ) # flush_and_close( f_diff_txt )
# #
# #
# with open( f_diff_txt.name,'r') as f: # with open( f_diff_txt.name,'r') as f:
# for line in f: # for line in f:
# if line.startswith('+') and 'warning'.upper() in line.upper(): # if line.startswith('+') and 'warning'.upper() in line.upper():
# print( 'DIFF IN FIREBIRD.LOG: ' + (' '.join(line.split()).upper()) ) # print( 'DIFF IN FIREBIRD.LOG: ' + (' '.join(line.split()).upper()) )
# #
# with open( fwoff_log.name,'r') as f: # with open( fwoff_log.name,'r') as f:
# for line in f: # for line in f:
# print( ''.join( ('Unexpected line in ', fwoff_log.name, ':', line ) ) ) # print( ''.join( ('Unexpected line in ', fwoff_log.name, ':', line ) ) )
# #
# with open( val_log.name,'r') as f: # with open( val_log.name,'r') as f:
# for line in f: # for line in f:
# print( ''.join( ('Unexpected line in ', val_log.name, ':', line ) ) ) # print( ''.join( ('Unexpected line in ', val_log.name, ':', line ) ) )
# #
# #
# # Cleanup: # # Cleanup:
# ########## # ##########
# time.sleep(1) # time.sleep(1)
# cleanup( (f_fblog_before, f_fblog_after, f_diff_txt, val_log, blob_handle, fwoff_log) ) # cleanup( (f_fblog_before, f_fblog_after, f_diff_txt, val_log, blob_handle, fwoff_log) )
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
DIFF IN FIREBIRD.LOG: + VALIDATION FINISHED: 0 ERRORS, 0 WARNINGS, 0 FIXED + VALIDATION FINISHED: 0 ERRORS, 0 WARNINGS, 0 FIXED
""" """
blob_src_1 = temp_file('core_5618.bin')
@pytest.mark.version('>=3.0.3') @pytest.mark.version('>=3.0.3')
@pytest.mark.xfail def test_1(act_1: Action, blob_src_1: Path):
def test_1(db_1): act_1.db.set_async_write()
pytest.fail("Test not IMPLEMENTED") zipped_blob_file = zipfile.Path(act_1.vars['files'] / 'core_5618.zip',
at='core_5618.bin')
blob_src_1.write_bytes(zipped_blob_file.read_bytes())
#
with act_1.db.connect() as con:
c = con.cursor()
with open(blob_src_1, mode='rb') as blob_handle:
c.execute('insert into test (b) values (?)', [blob_handle])
c.close()
con.execute_immediate('drop table test')
con.commit()
#
log_before = act_1.get_firebird_log()
# Run full validation (this is what 'gfix -v -full' does)
with act_1.connect_server() as srv:
srv.database.repair(database=act_1.db.db_path,
flags=SrvRepairFlag.FULL | SrvRepairFlag.VALIDATE_DB)
assert srv.readlines() == []
#
log_after = act_1.get_firebird_log()
log_diff = [line.strip().upper() for line in unified_diff(log_before, log_after)
if line.startswith('+') and 'WARNING' in line.upper()]
assert log_diff == ['+\tVALIDATION FINISHED: 0 ERRORS, 0 WARNINGS, 0 FIXED']

View File

@ -2,7 +2,7 @@
# #
# id: bugs.core_5630 # id: bugs.core_5630
# title: Shadow file is can not be created during restore when -use_all_space option is used # title: Shadow file is can not be created during restore when -use_all_space option is used
# decription: # decription:
# Confirmed bug on WI-V3.0.3.32805, WI-T4.0.0.789. # Confirmed bug on WI-V3.0.3.32805, WI-T4.0.0.789.
# Restore process failed with messages: # Restore process failed with messages:
# === # ===
@ -16,14 +16,15 @@
# FB30SS, build 3.0.3.32832: OK, 5.875s. # FB30SS, build 3.0.3.32832: OK, 5.875s.
# FB40CS, build 4.0.0.796: OK, 7.344s. # FB40CS, build 4.0.0.796: OK, 7.344s.
# FB40SS, build 4.0.0.796: OK, 4.531s. # FB40SS, build 4.0.0.796: OK, 4.531s.
# #
# tracker_id: CORE-5630 # tracker_id: CORE-5630
# min_versions: ['3.0.3'] # min_versions: ['3.0.3']
# versions: 3.0.3 # versions: 3.0.3
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
# version: 3.0.3 # version: 3.0.3
# resources: None # resources: None
@ -37,12 +38,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# import os # import os
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# #
# db_conn.close() # db_conn.close()
# #
# #------------------------------------------- # #-------------------------------------------
# def del_tmp_files(f_list): # def del_tmp_files(f_list):
# import os # import os
@ -50,28 +51,28 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# if os.path.isfile(f_list[i]): # if os.path.isfile(f_list[i]):
# os.remove(f_list[i]) # os.remove(f_list[i])
# #------------------------------------------- # #-------------------------------------------
# #
# n_prefix="$(DATABASE_LOCATION)bugs.tmp.core.5630" # n_prefix="$(DATABASE_LOCATION)bugs.tmp.core.5630"
# fdb_file=n_prefix + ".fdb" # fdb_file=n_prefix + ".fdb"
# shd_file=n_prefix + ".shd" # shd_file=n_prefix + ".shd"
# fbk_file=n_prefix + ".fbk" # fbk_file=n_prefix + ".fbk"
# #
# del_tmp_files( (fdb_file, shd_file, fbk_file) ) # del_tmp_files( (fdb_file, shd_file, fbk_file) )
# #
# usr=user_name # usr=user_name
# pwd=user_password # pwd=user_password
# #
# sql_text=''' # sql_text='''
# set bail on; # set bail on;
# set list on; # set list on;
# #
# create database 'localhost:%(fdb_file)s' user '%(usr)s' password '%(pwd)s'; # create database 'localhost:%(fdb_file)s' user '%(usr)s' password '%(pwd)s';
# #
# recreate table test(s varchar(30)); # recreate table test(s varchar(30));
# commit; # commit;
# #
# create or alter view v_shadow_info as # create or alter view v_shadow_info as
# select # select
# rdb$file_sequence -- 0 # rdb$file_sequence -- 0
# ,rdb$file_start -- 0 # ,rdb$file_start -- 0
# ,rdb$file_length -- 0 # ,rdb$file_length -- 0
@ -80,10 +81,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# from rdb$files # from rdb$files
# where lower(rdb$file_name) containing lower('bugs.tmp.core.5630.shd') # where lower(rdb$file_name) containing lower('bugs.tmp.core.5630.shd')
# ; # ;
# #
# insert into test select 'line #' || lpad(row_number()over(), 3, '0' ) from rdb$types rows 200; # insert into test select 'line #' || lpad(row_number()over(), 3, '0' ) from rdb$types rows 200;
# commit; # commit;
# #
# create shadow 1 '%(shd_file)s'; # create shadow 1 '%(shd_file)s';
# commit; # commit;
# set list on; # set list on;
@ -91,21 +92,21 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# select hash( list(s) ) as s_hash_before from test; # select hash( list(s) ) as s_hash_before from test;
# quit; # quit;
# ''' # '''
# #
# f_init_ddl = open( os.path.join(context['temp_directory'],'tmp_5630_ddl.sql'), 'w') # f_init_ddl = open( os.path.join(context['temp_directory'],'tmp_5630_ddl.sql'), 'w')
# f_init_ddl.write( sql_text % locals() ) # f_init_ddl.write( sql_text % locals() )
# f_init_ddl.close() # f_init_ddl.close()
# #
# runProgram( 'isql',[ '-q', '-i', f_init_ddl.name ] ) # runProgram( 'isql',[ '-q', '-i', f_init_ddl.name ] )
# runProgram( 'gbak',['-b', 'localhost:%s' % fdb_file, fbk_file ] ) # runProgram( 'gbak',['-b', 'localhost:%s' % fdb_file, fbk_file ] )
# #
# del_tmp_files( (fdb_file, shd_file) ) # del_tmp_files( (fdb_file, shd_file) )
# #
# # -------------------------------------- # # --------------------------------------
# # restore using "-use_all_space" switch: # # restore using "-use_all_space" switch:
# # -------------------------------------- # # --------------------------------------
# runProgram( 'gbak',['-c', '-use_all_space', fbk_file, 'localhost:%s' % fdb_file ] ) # runProgram( 'gbak',['-c', '-use_all_space', fbk_file, 'localhost:%s' % fdb_file ] )
# #
# # Check that we have the same data in DB tables: # # Check that we have the same data in DB tables:
# sql_text=''' # sql_text='''
# set list on; # set list on;
@ -113,35 +114,91 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# select hash( list(s) ) as s_hash_after from test; # select hash( list(s) ) as s_hash_after from test;
# ''' # '''
# runProgram( 'isql',[ '-q', 'localhost:%s' % fdb_file ], sql_text ) # runProgram( 'isql',[ '-q', 'localhost:%s' % fdb_file ], sql_text )
# #
# #
# ############################### # ###############################
# # Cleanup. # # Cleanup.
# del_tmp_files( (fdb_file, shd_file, fbk_file, f_init_ddl.name) ) # del_tmp_files( (fdb_file, shd_file, fbk_file, f_init_ddl.name) )
# #
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """ act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1_a = """
RDB$FILE_SEQUENCE 0 RDB$FILE_SEQUENCE 0
RDB$FILE_START 0 RDB$FILE_START 0
RDB$FILE_LENGTH 0 RDB$FILE_LENGTH 0
RDB$FILE_FLAGS 1 RDB$FILE_FLAGS 1
RDB$SHADOW_NUMBER 1 RDB$SHADOW_NUMBER 1
S_HASH_BEFORE 1499836372373901520 S_HASH_BEFORE 1499836372373901520
"""
expected_stdout_1_b = """
RDB$FILE_SEQUENCE 0 RDB$FILE_SEQUENCE 0
RDB$FILE_START 0 RDB$FILE_START 0
RDB$FILE_LENGTH 0 RDB$FILE_LENGTH 0
RDB$FILE_FLAGS 1 RDB$FILE_FLAGS 1
RDB$SHADOW_NUMBER 1 RDB$SHADOW_NUMBER 1
S_HASH_AFTER 1499836372373901520 S_HASH_AFTER 1499836372373901520
""" """
fdb_file_1 = temp_file('core_5630.fdb')
fbk_file_1 = temp_file('core_5630.fbk')
shd_file_1 = temp_file('core_5630.shd')
@pytest.mark.version('>=3.0.3') @pytest.mark.version('>=3.0.3')
@pytest.mark.xfail def test_1(act_1: Action, fdb_file_1: Path, fbk_file_1: Path, shd_file_1: Path):
def test_1(db_1): init_ddl = f"""
pytest.fail("Test not IMPLEMENTED") set bail on;
set list on;
create database 'localhost:{fdb_file_1}' user '{act_1.db.user}' password '{act_1.db.password}';
recreate table test(s varchar(30));
commit;
create or alter view v_shadow_info as
select
rdb$file_sequence -- 0
,rdb$file_start -- 0
,rdb$file_length -- 0
,rdb$file_flags -- 1
,rdb$shadow_number -- 1
from rdb$files
where lower(rdb$file_name) containing lower('core_5630.shd')
;
insert into test select 'line #' || lpad(row_number()over(), 3, '0' ) from rdb$types rows 200;
commit;
create shadow 1 '{shd_file_1}';
commit;
set list on;
select * from v_shadow_info;
select hash( list(s) ) as s_hash_before from test;
quit;
"""
act_1.expected_stdout = expected_stdout_1_a
act_1.isql(switches=['-q'], input=init_ddl)
assert act_1.clean_stdout == act_1.clean_expected_stdout
#
with act_1.connect_server() as srv:
srv.database.backup(database=fdb_file_1, backup=fbk_file_1)
srv.wait()
#
fdb_file_1.unlink()
shd_file_1.unlink()
#
act_1.reset()
act_1.gbak(switches=['-c', '-use_all_space', str(fbk_file_1), f'localhost:{fdb_file_1}'])
# Check that we have the same data in DB tables
sql_text = """
set list on;
select * from v_shadow_info;
select hash( list(s) ) as s_hash_after from test;
"""
act_1.reset()
act_1.expected_stdout = expected_stdout_1_b
act_1.isql(switches=['-q', f'localhost:{fdb_file_1}'], input=sql_text, connect_db=False)
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,7 +2,7 @@
# #
# id: bugs.core_5637 # id: bugs.core_5637
# title: string right truncation on restore of security db # title: string right truncation on restore of security db
# decription: # decription:
# Confirmed bug on 4.0.0.838, got: # Confirmed bug on 4.0.0.838, got:
# gbak: ERROR:arithmetic exception, numeric overflow, or string truncation # gbak: ERROR:arithmetic exception, numeric overflow, or string truncation
# gbak: ERROR: string right truncation # gbak: ERROR: string right truncation
@ -11,18 +11,22 @@
# ... # ...
# Checked on: # Checked on:
# 4.0.0.918: OK, 6.516s. # 4.0.0.918: OK, 6.516s.
# #
# Refactored 25.10.2019: restored DB state must be changed to full shutdown in order to make sure tha all attachments are gone. # Refactored 25.10.2019: restored DB state must be changed to full shutdown in order to make sure tha all attachments are gone.
# Otherwise got on CS: "WindowsError: 32 The process cannot access the file because it is being used by another process". # Otherwise got on CS: "WindowsError: 32 The process cannot access the file because it is being used by another process".
# Checked on 4.0.0.1633 SS, CS. # Checked on 4.0.0.1633 SS, CS.
# #
# tracker_id: CORE-5637 # tracker_id: CORE-5637
# min_versions: ['4.0'] # min_versions: ['4.0']
# versions: 4.0 # versions: 4.0
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action import zipfile
from difflib import unified_diff
from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
from firebird.driver import SrvRestoreFlag
# version: 4.0 # version: 4.0
# resources: None # resources: None
@ -35,34 +39,34 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import time # import time
# import zipfile # import zipfile
# import difflib # import difflib
# import subprocess # import subprocess
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# db_conn.close() # db_conn.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def flush_and_close( file_handle ): # def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync # # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f, # # If you're starting with a Python file object f,
# # first do f.flush(), and # # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os # global os
# #
# file_handle.flush() # file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"! # # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno()) # os.fsync(file_handle.fileno())
# file_handle.close() # file_handle.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def cleanup( f_names_list ): # def cleanup( f_names_list ):
# global os # global os
# for i in range(len( f_names_list )): # for i in range(len( f_names_list )):
@ -73,16 +77,16 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# else: # else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None # del_name = None
# #
# if del_name and os.path.isfile( del_name ): # if del_name and os.path.isfile( del_name ):
# os.remove( del_name ) # os.remove( del_name )
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def svc_get_fb_log( f_fb_log ): # def svc_get_fb_log( f_fb_log ):
# #
# global subprocess # global subprocess
# #
# subprocess.call( [ context['fbsvcmgr_path'], # subprocess.call( [ context['fbsvcmgr_path'],
# "localhost:service_mgr", # "localhost:service_mgr",
# "action_get_fb_log" # "action_get_fb_log"
@ -90,22 +94,22 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stdout=f_fb_log, stderr=subprocess.STDOUT # stdout=f_fb_log, stderr=subprocess.STDOUT
# ) # )
# return # return
# #
# #
# #
# zf = zipfile.ZipFile( os.path.join(context['files_location'],'core5637.zip') ) # zf = zipfile.ZipFile( os.path.join(context['files_location'],'core5637.zip') )
# tmpfbk = 'core5637-security3.fbk' # tmpfbk = 'core5637-security3.fbk'
# zf.extract( tmpfbk, '$(DATABASE_LOCATION)') # zf.extract( tmpfbk, '$(DATABASE_LOCATION)')
# zf.close() # zf.close()
# #
# tmpfbk='$(DATABASE_LOCATION)'+tmpfbk # tmpfbk='$(DATABASE_LOCATION)'+tmpfbk
# tmpfdb='$(DATABASE_LOCATION)'+'tmp_5637_check_restored.fdb' # tmpfdb='$(DATABASE_LOCATION)'+'tmp_5637_check_restored.fdb'
# #
# f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_5637_fblog_before.txt'), 'w') # f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_5637_fblog_before.txt'), 'w')
# svc_get_fb_log( f_fblog_before ) # svc_get_fb_log( f_fblog_before )
# flush_and_close( f_fblog_before ) # flush_and_close( f_fblog_before )
# #
# #
# f_restore_log=open( os.path.join(context['temp_directory'],'tmp_5637_check_restored.log'), 'w') # f_restore_log=open( os.path.join(context['temp_directory'],'tmp_5637_check_restored.log'), 'w')
# subprocess.check_call([context['fbsvcmgr_path'],"localhost:service_mgr", # subprocess.check_call([context['fbsvcmgr_path'],"localhost:service_mgr",
# "action_restore", # "action_restore",
@ -114,26 +118,26 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# "res_replace", # "res_replace",
# "verbose" # "verbose"
# ], # ],
# stdout=f_restore_log, # stdout=f_restore_log,
# stderr=subprocess.STDOUT) # stderr=subprocess.STDOUT)
# flush_and_close( f_restore_log ) # flush_and_close( f_restore_log )
# #
# f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_5637_fblog_after.txt'), 'w') # f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_5637_fblog_after.txt'), 'w')
# svc_get_fb_log( f_fblog_after ) # svc_get_fb_log( f_fblog_after )
# flush_and_close( f_fblog_after ) # flush_and_close( f_fblog_after )
# #
# #
# f_validation_log=open( os.path.join(context['temp_directory'],'tmp_5637_validation.log'), 'w') # f_validation_log=open( os.path.join(context['temp_directory'],'tmp_5637_validation.log'), 'w')
# subprocess.check_call([context['fbsvcmgr_path'],"localhost:service_mgr", # subprocess.check_call([context['fbsvcmgr_path'],"localhost:service_mgr",
# "action_validate", # "action_validate",
# "dbname", tmpfdb, # "dbname", tmpfdb,
# ], # ],
# stdout=f_validation_log, # stdout=f_validation_log,
# stderr=subprocess.STDOUT) # stderr=subprocess.STDOUT)
# flush_and_close( f_validation_log ) # flush_and_close( f_validation_log )
# #
# #time.sleep(1) # #time.sleep(1)
# #
# # 25.10.2019: add full shutdown to forcedly drop all attachments. # # 25.10.2019: add full shutdown to forcedly drop all attachments.
# ## |||||||||||||||||||||||||||| # ## ||||||||||||||||||||||||||||
# ## ###################################||| FB 4.0+, SS and SC |||############################## # ## ###################################||| FB 4.0+, SS and SC |||##############################
@ -147,72 +151,89 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# ## This means that one need to kill all connections to prevent from exception on cleanup phase: # ## This means that one need to kill all connections to prevent from exception on cleanup phase:
# ## SQLCODE: -901 / lock time-out on wait transaction / object <this_test_DB> is in use # ## SQLCODE: -901 / lock time-out on wait transaction / object <this_test_DB> is in use
# ## ############################################################################################# # ## #############################################################################################
# #
# f_shutdown_log=open( os.path.join(context['temp_directory'],'tmp_5637_shutdown.log'), 'w') # f_shutdown_log=open( os.path.join(context['temp_directory'],'tmp_5637_shutdown.log'), 'w')
# #
# subprocess.call( [context['fbsvcmgr_path'], "localhost:service_mgr", # subprocess.call( [context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_properties", "prp_shutdown_mode", "prp_sm_full", "prp_shutdown_db", "0", "dbname", tmpfdb, # "action_properties", "prp_shutdown_mode", "prp_sm_full", "prp_shutdown_db", "0", "dbname", tmpfdb,
# ], # ],
# stdout = f_shutdown_log, # stdout = f_shutdown_log,
# stderr = subprocess.STDOUT # stderr = subprocess.STDOUT
# ) # )
# #
# flush_and_close( f_shutdown_log ) # flush_and_close( f_shutdown_log )
# #
# #
# # Compare firebird.log versions BEFORE and AFTER this test: # # Compare firebird.log versions BEFORE and AFTER this test:
# ###################### # ######################
# #
# oldfb=open(f_fblog_before.name, 'r') # oldfb=open(f_fblog_before.name, 'r')
# newfb=open(f_fblog_after.name, 'r') # newfb=open(f_fblog_after.name, 'r')
# #
# difftext = ''.join(difflib.unified_diff( # difftext = ''.join(difflib.unified_diff(
# oldfb.readlines(), # oldfb.readlines(),
# newfb.readlines() # newfb.readlines()
# )) # ))
# oldfb.close() # oldfb.close()
# newfb.close() # newfb.close()
# #
# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_5637_diff.txt'), 'w') # f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_5637_diff.txt'), 'w')
# f_diff_txt.write(difftext) # f_diff_txt.write(difftext)
# flush_and_close( f_diff_txt ) # flush_and_close( f_diff_txt )
# #
# # Check logs: # # Check logs:
# ############# # #############
# with open( f_restore_log.name,'r') as f: # with open( f_restore_log.name,'r') as f:
# for line in f: # for line in f:
# if 'Error'.upper() in line.upper(): # if 'Error'.upper() in line.upper():
# print( 'UNEXPECTED ERROR IN RESTORE LOG: ' + (' '.join(line.split()).upper()) ) # print( 'UNEXPECTED ERROR IN RESTORE LOG: ' + (' '.join(line.split()).upper()) )
# #
# with open( f_validation_log.name,'r') as f: # with open( f_validation_log.name,'r') as f:
# for line in f: # for line in f:
# if 'Error'.upper() in line.upper(): # if 'Error'.upper() in line.upper():
# print( 'UNEXPECTED ERROR IN VALIDATION LOG: ' + (' '.join(line.split()).upper()) ) # print( 'UNEXPECTED ERROR IN VALIDATION LOG: ' + (' '.join(line.split()).upper()) )
# #
# with open( f_shutdown_log.name,'r') as f: # with open( f_shutdown_log.name,'r') as f:
# for line in f: # for line in f:
# if line.split(): # if line.split():
# print( 'UNEXPECTED OUTPUT IN DB SHUTDOWN LOG: ' + (' '.join(line.split()).upper()) ) # print( 'UNEXPECTED OUTPUT IN DB SHUTDOWN LOG: ' + (' '.join(line.split()).upper()) )
# #
# with open( f_diff_txt.name,'r') as f: # with open( f_diff_txt.name,'r') as f:
# for line in f: # for line in f:
# if line.startswith('+'): # if line.startswith('+'):
# print( 'UNEXPECTED DIFF IN FIREBIRD.LOG: ' + (' '.join(line.split()).upper()) ) # print( 'UNEXPECTED DIFF IN FIREBIRD.LOG: ' + (' '.join(line.split()).upper()) )
# #
# #
# # Cleanup: # # Cleanup:
# ########## # ##########
# time.sleep(1) # time.sleep(1)
# cleanup( (f_restore_log,f_validation_log,f_shutdown_log,f_fblog_before,f_fblog_after,f_diff_txt, tmpfbk,tmpfdb) ) # cleanup( (f_restore_log,f_validation_log,f_shutdown_log,f_fblog_before,f_fblog_after,f_diff_txt, tmpfbk,tmpfdb) )
# #
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
sec_fbk_1 = temp_file('core5637-security3.fbk')
sec_fdb_1 = temp_file('core5637-security3.fdb')
@pytest.mark.version('>=4.0') @pytest.mark.version('>=4.0')
@pytest.mark.xfail def test_1(act_1: Action, sec_fbk_1: Path, sec_fdb_1: Path):
def test_1(db_1): zipped_fbk_file = zipfile.Path(act_1.vars['files'] / 'core_5637.zip',
pytest.fail("Test not IMPLEMENTED") at='core5637-security3.fbk')
sec_fbk_1.write_bytes(zipped_fbk_file.read_bytes())
#
log_before = act_1.get_firebird_log()
# Restore security database
with act_1.connect_server() as srv:
srv.database.restore(database=sec_fdb_1, backup=sec_fbk_1, flags=SrvRestoreFlag.REPLACE)
restore_log = srv.readlines()
#
log_after = act_1.get_firebird_log()
#
srv.database.validate(database=sec_fdb_1)
validation_log = srv.readlines()
#
assert [line for line in restore_log if 'ERROR' in line.upper()] == []
assert [line for line in validation_log if 'ERROR' in line.upper()] == []
assert list(unified_diff(log_before, log_after)) == []

View File

@ -2,7 +2,7 @@
# #
# id: bugs.core_5645 # id: bugs.core_5645
# title: Wrong transaction can be passed to external engine # title: Wrong transaction can be passed to external engine
# decription: # decription:
# Implemented according to notes given by Adriano in the ticket 27-oct-2017 02:41. # Implemented according to notes given by Adriano in the ticket 27-oct-2017 02:41.
# Checked on: # Checked on:
# 4.0.0.1743 SS: 2.719s. # 4.0.0.1743 SS: 2.719s.
@ -17,48 +17,55 @@
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from firebird.qa import db_factory, python_act, Action, Database
# version: 3.0.3 # version: 3.0.3
# resources: None # resources: None
substitutions_1 = [('INFO_BLOB_ID.*', '')] substitutions_1 = [('INFO_BLOB_ID.*', '')]
init_script_1 = """""" init_script_1 = """
create table persons (
id integer not null,
name varchar(60) not null,
address varchar(60),
info blob sub_type text
);
"""
db_1 = db_factory(sql_dialect=3, init=init_script_1) db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import sys # import sys
# import subprocess # import subprocess
# from fdb import services # from fdb import services
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# #
# this_db = db_conn.database_name # this_db = db_conn.database_name
# fb_major=db_conn.engine_version # fb_major=db_conn.engine_version
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def flush_and_close( file_handle ): # def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync # # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f, # # If you're starting with a Python file object f,
# # first do f.flush(), and # # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os # global os
# #
# file_handle.flush() # file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"! # # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno()) # os.fsync(file_handle.fileno())
# file_handle.close() # file_handle.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def cleanup( f_names_list ): # def cleanup( f_names_list ):
# global os # global os
# for i in range(len( f_names_list )): # for i in range(len( f_names_list )):
@ -69,12 +76,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# else: # else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None # del_name = None
# #
# if del_name and os.path.isfile( del_name ): # if del_name and os.path.isfile( del_name ):
# os.remove( del_name ) # os.remove( del_name )
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# table_ddl=''' # table_ddl='''
# create table persons ( # create table persons (
# id integer not null, # id integer not null,
@ -83,66 +90,67 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# info blob sub_type text # info blob sub_type text
# ); # );
# ''' # '''
# #
# #
# fdb_repl = os.path.join(context['temp_directory'],'tmp_5645_repl.fdb') # fdb_repl = os.path.join(context['temp_directory'],'tmp_5645_repl.fdb')
# cleanup( (fdb_repl,) ) # cleanup( (fdb_repl,) )
# #
# con_repl = fdb.create_database( dsn = 'localhost:%(fdb_repl)s' % locals() ) # con_repl = fdb.create_database( dsn = 'localhost:%(fdb_repl)s' % locals() )
# con_repl.execute_immediate( table_ddl ) # con_repl.execute_immediate( table_ddl )
# con_repl.commit() # con_repl.commit()
# con_repl.close() # con_repl.close()
# #
# db_conn.execute_immediate( table_ddl ) # db_conn.execute_immediate( table_ddl )
# db_conn.commit() # db_conn.commit()
# #
# ddl_for_replication=''' # ddl_for_replication='''
# create table replicate_config ( # create table replicate_config (
# name varchar(31) not null, # name varchar(31) not null,
# data_source varchar(255) not null # data_source varchar(255) not null
# ); # );
# #
# insert into replicate_config (name, data_source) # insert into replicate_config (name, data_source)
# values ('ds1', '%(fdb_repl)s'); # values ('ds1', '%(fdb_repl)s');
# #
# create trigger persons_replicate # create trigger persons_replicate
# after insert on persons # after insert on persons
# external name 'udrcpp_example!replicate!ds1' # external name 'udrcpp_example!replicate!ds1'
# engine udr; # engine udr;
# #
# create trigger persons_replicate2 # create trigger persons_replicate2
# after insert on persons # after insert on persons
# external name 'udrcpp_example!replicate_persons!ds1' # external name 'udrcpp_example!replicate_persons!ds1'
# engine udr; # engine udr;
# commit; # commit;
# #
# ''' % locals() # ''' % locals()
# #
# f_apply_ddl_sql = open( os.path.join(context['temp_directory'],'tmp_5645.sql'), 'w', buffering = 0) # f_apply_ddl_sql = open( os.path.join(context['temp_directory'],'tmp_5645.sql'), 'w', buffering = 0)
# f_apply_ddl_sql.write( ddl_for_replication ) # f_apply_ddl_sql.write( ddl_for_replication )
# flush_and_close( f_apply_ddl_sql ) # flush_and_close( f_apply_ddl_sql )
# #
# f_apply_ddl_log = open( '.'.join( (os.path.splitext( f_apply_ddl_sql.name )[0], 'log') ), 'w', buffering = 0) # f_apply_ddl_log = open( '.'.join( (os.path.splitext( f_apply_ddl_sql.name )[0], 'log') ), 'w', buffering = 0)
# subprocess.call( [ context['isql_path'], dsn, '-q', '-i', f_apply_ddl_sql.name ], stdout = f_apply_ddl_log, stderr = subprocess.STDOUT) # subprocess.call( [ context['isql_path'], dsn, '-q', '-i', f_apply_ddl_sql.name ], stdout = f_apply_ddl_log, stderr = subprocess.STDOUT)
# flush_and_close( f_apply_ddl_log ) # flush_and_close( f_apply_ddl_log )
# #
# #-------------------------------- # #--------------------------------
# #
# cur = db_conn.cursor() # cur = db_conn.cursor()
# cur.execute( "insert into persons values (1, 'One', 'some_address', 'some_blob_info')" ) # cur.execute( "insert into persons values (1, 'One', 'some_address', 'some_blob_info')" )
# db_conn.commit() # db_conn.commit()
# db_conn.close() # db_conn.close()
# #
# if fb_major >= 4.0: # if fb_major >= 4.0:
# runProgram( 'isql', ['-q', dsn], 'ALTER EXTERNAL CONNECTIONS POOL CLEAR ALL;' ) # runProgram( 'isql', ['-q', dsn], 'ALTER EXTERNAL CONNECTIONS POOL CLEAR ALL;' )
# #
# runProgram( 'isql', ['-q', 'localhost:%(fdb_repl)s' % locals()], 'set list on; set count on; select id,name,address,info as info_blob_id from persons;rollback; drop database;' ) # runProgram( 'isql', ['-q', 'localhost:%(fdb_repl)s' % locals()], 'set list on; set count on; select id,name,address,info as info_blob_id from persons;rollback; drop database;' )
# #
# cleanup( (f_apply_ddl_sql,f_apply_ddl_log) ) # cleanup( (f_apply_ddl_sql,f_apply_ddl_log) )
# #
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
ID 1 ID 1
@ -157,11 +165,49 @@ expected_stdout_1 = """
INFO_BLOB_ID 80:1 INFO_BLOB_ID 80:1
some_blob_info some_blob_info
Records affected: 2 Records affected: 2
""" """
db_1_repl = db_factory(sql_dialect=3, init=init_script_1, filename='tmp_5645_repl.fd')
@pytest.mark.version('>=3.0.3') @pytest.mark.version('>=3.0.3')
@pytest.mark.xfail def test_1(act_1: Action, db_1_repl: Database):
def test_1(db_1): pytest.skip("Requires UDR udrcpp_example")
pytest.fail("Test not IMPLEMENTED") ddl_for_replication = f"""
create table replicate_config (
name varchar(31) not null,
data_source varchar(255) not null
);
insert into replicate_config (name, data_source)
values ('ds1', '{db_1_repl}');
create trigger persons_replicate
after insert on persons
external name 'udrcpp_example!replicate!ds1'
engine udr;
create trigger persons_replicate2
after insert on persons
external name 'udrcpp_example!replicate_persons!ds1'
engine udr;
commit;
"""
act_1.isql(switches=['-q'], input=ddl_for_replication)
#
with act_1.db.connect() as con:
c = con.cursor()
c.execute("insert into persons values (1, 'One', 'some_address', 'some_blob_info')")
con.commit()
#
if act_1.is_version('>4.0'):
act_1.reset()
act_1.isql(switches=['-q'], input='ALTER EXTERNAL CONNECTIONS POOL CLEAR ALL;')
# Check
act_1.reset()
act_1.expected_stdout = expected_stdout_1
act_1.isql(switches=['-q', db_1_repl.dsn],
input='set list on; set count on; select id,name,address,info as info_blob_id from persons; rollback;',
connect_db=False)
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,7 +2,7 @@
# #
# id: bugs.core_5647 # id: bugs.core_5647
# title: Increase number of formats/versions of views from 255 to 32K # title: Increase number of formats/versions of views from 255 to 32K
# decription: # decription:
# FB40SS, build 4.0.0.789: OK, 3.828s (SS, CS). # FB40SS, build 4.0.0.789: OK, 3.828s (SS, CS).
# Older version issued: # Older version issued:
# Statement failed, SQLSTATE = 54000 # Statement failed, SQLSTATE = 54000
@ -10,14 +10,14 @@
# -TABLE VW1 # -TABLE VW1
# -too many versions # -too many versions
# NB: we have to change FW to OFF in order to increase speed of this test run thus use test_type = Python. # NB: we have to change FW to OFF in order to increase speed of this test run thus use test_type = Python.
# #
# tracker_id: CORE-5647 # tracker_id: CORE-5647
# min_versions: ['4.0'] # min_versions: ['4.0']
# versions: 4.0 # versions: 4.0
# qmid: # qmid:
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from firebird.qa import db_factory, python_act, Action
# version: 4.0 # version: 4.0
# resources: None # resources: None
@ -30,13 +30,13 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# db_conn.close() # db_conn.close()
# runProgram('gfix',[dsn,'-w','async']) # runProgram('gfix',[dsn,'-w','async'])
# #
# script=''' # script='''
# set bail on; # set bail on;
# set list on; # set list on;
@ -69,17 +69,50 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# quit; # quit;
# ''' # '''
# runProgram('isql',[dsn],script) # runProgram('isql',[dsn],script)
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
RET_CODE 0 RET_CODE 0
""" """
test_script_1 = """
set bail on;
set list on;
set term ^;
execute block returns(ret_code smallint) as
declare n int = 300;
begin
while (n > 0) do
begin
if (mod(n, 2) = 0) then
begin
in autonomous transaction do
begin
execute statement 'create or alter view vw1 (dump1) as select 1 from rdb$database';
end
end
else
begin
in autonomous transaction do
begin
execute statement 'create or alter view vw1 (dump1, dump2) as select 1, 2 from rdb$database';
end
end
n = n - 1;
end
ret_code = -abs(n);
suspend;
end ^
set term ;^
quit;
"""
@pytest.mark.version('>=4.0') @pytest.mark.version('>=4.0')
@pytest.mark.xfail def test_1(act_1: Action):
def test_1(db_1): act_1.db.set_async_write()
pytest.fail("Test not IMPLEMENTED") act_1.expected_stdout = expected_stdout_1
act_1.isql(switches=[], input=test_script_1)
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,40 +2,44 @@
# #
# id: bugs.core_5648 # id: bugs.core_5648
# title: Avoid serialization of isc_attach_database calls issued by EXECUTE STATEMENT implementation # title: Avoid serialization of isc_attach_database calls issued by EXECUTE STATEMENT implementation
# decription: # decription:
# We use special IP = 192.0.2.2 as never reachable address thus any attempt to connect it will fail. # We use special IP = 192.0.2.2 as never reachable address thus any attempt to connect it will fail.
# Currently FB tries to establish connect to this host about 20-22 seconds. # Currently FB tries to establish connect to this host about 20-22 seconds.
# We launch 1st ISQL in Async mode (using subprocess.Popen) with task to establish connect to this host. # We launch 1st ISQL in Async mode (using subprocess.Popen) with task to establish connect to this host.
# At the same time we launch 2nd ISQL with EDS to localhost and the same DB as test uses. # At the same time we launch 2nd ISQL with EDS to localhost and the same DB as test uses.
# Second ISQL must do its job instantly, despite of hanging 1st ISQl, and time for this is about 50 ms. # Second ISQL must do its job instantly, despite of hanging 1st ISQl, and time for this is about 50 ms.
# We use threshold and compare time for which 2nd ISQL did its job. Finally, we ouptput result of this comparison. # We use threshold and compare time for which 2nd ISQL did its job. Finally, we ouptput result of this comparison.
# #
# ::::::::::::: NOTE ::::::::::::::::::: # ::::::::::::: NOTE :::::::::::::::::::
# As of current FB snapshots, there is NOT ability to interrupt ISQL which tries to make connect to 192.0.2.2, # As of current FB snapshots, there is NOT ability to interrupt ISQL which tries to make connect to 192.0.2.2,
# until this ISQL __itself__ make decision that host is unreachable. This takes about 20-22 seconds. # until this ISQL __itself__ make decision that host is unreachable. This takes about 20-22 seconds.
# Also, if we kill this (hanging) ISQL process, than we will not be able to drop database until this time exceed. # Also, if we kill this (hanging) ISQL process, than we will not be able to drop database until this time exceed.
# For this reason, it was decided not only to kill ISQL but also run fbsvcmgr with DB full-shutdown command - this # For this reason, it was decided not only to kill ISQL but also run fbsvcmgr with DB full-shutdown command - this
# will ensure that database is really free from any attachments and can be dropped. # will ensure that database is really free from any attachments and can be dropped.
# #
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::: # :::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# See also http://tracker.firebirdsql.org/browse/CORE-5609 # See also http://tracker.firebirdsql.org/browse/CORE-5609
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::: # :::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# #
# #
# Reproduced bug on 3.0.3.32756 (SC, 15-jul-2017), 4.0.0.690 (SC, 15-jul-2017) # Reproduced bug on 3.0.3.32756 (SC, 15-jul-2017), 4.0.0.690 (SC, 15-jul-2017)
# Passed on: # Passed on:
# 3.0.3.32805: OK, 24.797s (Classic, 21-sep-2017) # 3.0.3.32805: OK, 24.797s (Classic, 21-sep-2017)
# 3.0.3.32828: OK, 25.328s (SuperServer, 08-nov-2017) # 3.0.3.32828: OK, 25.328s (SuperServer, 08-nov-2017)
# 4.0.0.748: OK, 25.984s (Classic) # 4.0.0.748: OK, 25.984s (Classic)
# 4.0.0.789: OK, 24.406s (SuperClassic and SuperServer, 08-nov-2017). # 4.0.0.789: OK, 24.406s (SuperClassic and SuperServer, 08-nov-2017).
# #
# tracker_id: CORE-5648 # tracker_id: CORE-5648
# min_versions: ['3.0.3'] # min_versions: ['3.0.3']
# versions: 3.0.3 # versions: 3.0.3
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action import subprocess
import time
from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
from firebird.driver import ShutdownMode, ShutdownMethod
# version: 3.0.3 # version: 3.0.3
# resources: None # resources: None
@ -54,30 +58,30 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# import time # import time
# import difflib # import difflib
# from fdb import services # from fdb import services
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# db_file = db_conn.database_name # db_file = db_conn.database_name
# #
# db_conn.close() # db_conn.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def flush_and_close( file_handle ): # def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync # # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f, # # If you're starting with a Python file object f,
# # first do f.flush(), and # # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os # global os
# #
# file_handle.flush() # file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"! # # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno()) # os.fsync(file_handle.fileno())
# file_handle.close() # file_handle.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def cleanup( f_names_list ): # def cleanup( f_names_list ):
# global os # global os
# for i in range(len( f_names_list )): # for i in range(len( f_names_list )):
@ -88,16 +92,16 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# else: # else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None # del_name = None
# #
# if del_name and os.path.isfile( del_name ): # if del_name and os.path.isfile( del_name ):
# os.remove( del_name ) # os.remove( del_name )
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# usr=user_name # usr=user_name
# pwd=user_password # pwd=user_password
# remote_host = '192.0.2.2' # remote_host = '192.0.2.2'
# #
# eds_query=''' # eds_query='''
# set bail on; # set bail on;
# set list on; # set list on;
@ -115,13 +119,13 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# end # end
# ^ # ^
# set term ;^ # set term ;^
# --select current_timestamp as " " from rdb$database; # --select current_timestamp as " " from rdb$database;
# select iif( waited_ms < max_wait_ms, # select iif( waited_ms < max_wait_ms,
# 'OK: second EDS was fast', # 'OK: second EDS was fast',
# 'FAILED: second EDS waited too long, ' || waited_ms || ' ms - more than max_wait_ms='||max_wait_ms # 'FAILED: second EDS waited too long, ' || waited_ms || ' ms - more than max_wait_ms='||max_wait_ms
# ) as result_msg # ) as result_msg
# from ( # from (
# select # select
# datediff( millisecond from cast( rdb$get_context('USER_SESSION','DTS_BEG') as timestamp) to current_timestamp ) as waited_ms # datediff( millisecond from cast( rdb$get_context('USER_SESSION','DTS_BEG') as timestamp) to current_timestamp ) as waited_ms
# ,500 as max_wait_ms # ,500 as max_wait_ms
# -- ^ # -- ^
@ -131,51 +135,51 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# from rdb$database # from rdb$database
# ); # );
# ''' # '''
# #
# f_eds_to_unavail_host_sql = open( os.path.join(context['temp_directory'],'tmp_unavail_host_5648.sql'), 'w') # f_eds_to_unavail_host_sql = open( os.path.join(context['temp_directory'],'tmp_unavail_host_5648.sql'), 'w')
# f_eds_to_unavail_host_sql.write( eds_query % locals() ) # f_eds_to_unavail_host_sql.write( eds_query % locals() )
# flush_and_close( f_eds_to_unavail_host_sql ) # flush_and_close( f_eds_to_unavail_host_sql )
# #
# remote_host = 'localhost' # remote_host = 'localhost'
# f_eds_to_local_host_sql = open( os.path.join(context['temp_directory'],'tmp_local_host_5648.sql'), 'w') # f_eds_to_local_host_sql = open( os.path.join(context['temp_directory'],'tmp_local_host_5648.sql'), 'w')
# f_eds_to_local_host_sql.write( eds_query % locals() ) # f_eds_to_local_host_sql.write( eds_query % locals() )
# flush_and_close( f_eds_to_local_host_sql ) # flush_and_close( f_eds_to_local_host_sql )
# #
# #
# #
# f_eds_to_unavail_host_log = open( os.path.join(context['temp_directory'],'tmp_unavail_host_5648.log'), 'w') # f_eds_to_unavail_host_log = open( os.path.join(context['temp_directory'],'tmp_unavail_host_5648.log'), 'w')
# f_eds_to_unavail_host_err = open( os.path.join(context['temp_directory'],'tmp_unavail_host_5648.err'), 'w') # f_eds_to_unavail_host_err = open( os.path.join(context['temp_directory'],'tmp_unavail_host_5648.err'), 'w')
# p_isql_to_unavail_host=subprocess.Popen( [context['isql_path'], dsn, "-n", "-i", f_eds_to_unavail_host_sql.name ], # p_isql_to_unavail_host=subprocess.Popen( [context['isql_path'], dsn, "-n", "-i", f_eds_to_unavail_host_sql.name ],
# stdout = f_eds_to_unavail_host_log, # stdout = f_eds_to_unavail_host_log,
# stderr = f_eds_to_unavail_host_err # stderr = f_eds_to_unavail_host_err
# ) # )
# #
# # Let ISQL be loaded: # # Let ISQL be loaded:
# time.sleep(1) # time.sleep(1)
# #
# f_eds_to_local_host_log = open( os.path.join(context['temp_directory'],'tmp_local_host_5648.log'), 'w') # f_eds_to_local_host_log = open( os.path.join(context['temp_directory'],'tmp_local_host_5648.log'), 'w')
# f_eds_to_local_host_err = open( os.path.join(context['temp_directory'],'tmp_local_host_5648.err'), 'w') # f_eds_to_local_host_err = open( os.path.join(context['temp_directory'],'tmp_local_host_5648.err'), 'w')
# subprocess.call( [context['isql_path'], dsn, "-n", "-i", f_eds_to_local_host_sql.name ], # subprocess.call( [context['isql_path'], dsn, "-n", "-i", f_eds_to_local_host_sql.name ],
# stdout = f_eds_to_local_host_log, # stdout = f_eds_to_local_host_log,
# stderr = f_eds_to_local_host_err # stderr = f_eds_to_local_host_err
# ) # )
# #
# #............ kill ISQL that is attampting to found 192.0.2.2 host and thus will hang for about 45 seconds ....... # #............ kill ISQL that is attampting to found 192.0.2.2 host and thus will hang for about 45 seconds .......
# #
# p_isql_to_unavail_host.terminate() # p_isql_to_unavail_host.terminate()
# flush_and_close( f_eds_to_unavail_host_log ) # flush_and_close( f_eds_to_unavail_host_log )
# flush_and_close( f_eds_to_unavail_host_err ) # flush_and_close( f_eds_to_unavail_host_err )
# #
# flush_and_close( f_eds_to_local_host_log ) # flush_and_close( f_eds_to_local_host_log )
# flush_and_close( f_eds_to_local_host_err ) # flush_and_close( f_eds_to_local_host_err )
# #
# # Make DB shutdown and bring online because some internal server process still can be active! # # Make DB shutdown and bring online because some internal server process still can be active!
# # If we skip this step than runtime error related to dropping test DB can occur! # # If we skip this step than runtime error related to dropping test DB can occur!
# ######################################### # #########################################
# #
# f_db_reset_log=open( os.path.join(context['temp_directory'],'tmp_reset_5648.log'), 'w') # f_db_reset_log=open( os.path.join(context['temp_directory'],'tmp_reset_5648.log'), 'w')
# f_db_reset_err=open( os.path.join(context['temp_directory'],'tmp_reset_5648.err'), 'w') # f_db_reset_err=open( os.path.join(context['temp_directory'],'tmp_reset_5648.err'), 'w')
# #
# f_db_reset_log.write('Point before DB shutdown.'+os.linesep) # f_db_reset_log.write('Point before DB shutdown.'+os.linesep)
# f_db_reset_log.seek(0,2) # f_db_reset_log.seek(0,2)
# subprocess.call( [context['fbsvcmgr_path'], "localhost:service_mgr", # subprocess.call( [context['fbsvcmgr_path'], "localhost:service_mgr",
@ -186,7 +190,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stderr = f_db_reset_err # stderr = f_db_reset_err
# ) # )
# f_db_reset_log.write(os.linesep+'Point after DB shutdown.'+os.linesep) # f_db_reset_log.write(os.linesep+'Point after DB shutdown.'+os.linesep)
# #
# subprocess.call( [context['fbsvcmgr_path'], "localhost:service_mgr", # subprocess.call( [context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_properties", "prp_db_online", # "action_properties", "prp_db_online",
# "dbname", db_file, # "dbname", db_file,
@ -194,62 +198,112 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stdout = f_db_reset_log, # stdout = f_db_reset_log,
# stderr = f_db_reset_err # stderr = f_db_reset_err
# ) # )
# #
# f_db_reset_log.write(os.linesep+'Point after DB online.'+os.linesep) # f_db_reset_log.write(os.linesep+'Point after DB online.'+os.linesep)
# flush_and_close( f_db_reset_log ) # flush_and_close( f_db_reset_log )
# flush_and_close( f_db_reset_err ) # flush_and_close( f_db_reset_err )
# #
# with open( f_eds_to_local_host_log.name,'r') as f: # with open( f_eds_to_local_host_log.name,'r') as f:
# for line in f: # for line in f:
# if line.split(): # if line.split():
# print('STDLOG of 2nd EDS: ', ' '.join(line.split()) ) # print('STDLOG of 2nd EDS: ', ' '.join(line.split()) )
# #
# #
# with open( f_eds_to_local_host_err.name,'r') as f: # with open( f_eds_to_local_host_err.name,'r') as f:
# for line in f: # for line in f:
# if line.split(): # if line.split():
# print('UNEXPECTED STDERR in '+f_eds_to_local_host_err.name+': '+line) # print('UNEXPECTED STDERR in '+f_eds_to_local_host_err.name+': '+line)
# #
# with open( f_db_reset_log.name,'r') as f: # with open( f_db_reset_log.name,'r') as f:
# for line in f: # for line in f:
# if line.split(): # if line.split():
# print('STDLOG of DB reset: ', ' '.join(line.split()) ) # print('STDLOG of DB reset: ', ' '.join(line.split()) )
# #
# with open( f_db_reset_err.name,'r') as f: # with open( f_db_reset_err.name,'r') as f:
# for line in f: # for line in f:
# if line.split(): # if line.split():
# print('UNEXPECTED STDERR in '+f_db_reset_log.name+': '+line) # print('UNEXPECTED STDERR in '+f_db_reset_log.name+': '+line)
# #
# ############################### # ###############################
# # Cleanup. # # Cleanup.
# time.sleep(1) # time.sleep(1)
# #
# f_list=( # f_list=(
# f_eds_to_local_host_sql # f_eds_to_local_host_sql
# ,f_eds_to_local_host_log # ,f_eds_to_local_host_log
# ,f_eds_to_local_host_err # ,f_eds_to_local_host_err
# ,f_eds_to_unavail_host_sql # ,f_eds_to_unavail_host_sql
# ,f_eds_to_unavail_host_log # ,f_eds_to_unavail_host_log
# ,f_eds_to_unavail_host_err # ,f_eds_to_unavail_host_err
# ,f_db_reset_log # ,f_db_reset_log
# ,f_db_reset_err # ,f_db_reset_err
# ) # )
# cleanup( f_list ) # cleanup( f_list )
# #
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
STDLOG of 2nd EDS: RESULT_MSG OK: second EDS was fast RESULT_MSG OK: second EDS was fast
STDLOG of DB reset: Point before DB shutdown. """
STDLOG of DB reset: Point after DB shutdown.
STDLOG of DB reset: Point after DB online. eds_script_1 = temp_file('eds_script.sql')
"""
@pytest.mark.version('>=3.0.3') @pytest.mark.version('>=3.0.3')
@pytest.mark.xfail def test_1(act_1: Action, eds_script_1: Path):
def test_1(db_1): eds_sql = f"""
pytest.fail("Test not IMPLEMENTED") set bail on;
set list on;
--select current_timestamp as " " from rdb$database;
set term ^;
execute block as
declare c smallint;
declare remote_host varchar(50) = '%(remote_host)s'; -- never unreachable: 192.0.2.2
begin
rdb$set_context('USER_SESSION','DTS_BEG', cast('now' as timestamp) );
execute statement 'select 1 from rdb$database'
on external remote_host || ':' || rdb$get_context('SYSTEM', 'DB_NAME')
as user '{act_1.db.user}' password '{act_1.db.password}'
into c;
end
^
set term ;^
--select current_timestamp as " " from rdb$database;
select iif( waited_ms < max_wait_ms,
'OK: second EDS was fast',
'FAILED: second EDS waited too long, ' || waited_ms || ' ms - more than max_wait_ms='||max_wait_ms
) as result_msg
from (
select
datediff( millisecond from cast( rdb$get_context('USER_SESSION','DTS_BEG') as timestamp) to current_timestamp ) as waited_ms
,500 as max_wait_ms
-- ^
-- | #################
-- +-------------------------------- T H R E S H O L D
-- #################
from rdb$database
);
"""
#
remote_host = '192.0.2.2'
eds_script_1.write_text(eds_sql % locals())
p_unavail_host = subprocess.Popen([act_1.vars['isql'], '-n', '-i', str(eds_script_1),
'-user', act_1.db.user,
'-password', act_1.db.password, act_1.db.dsn],
stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
try:
time.sleep(2)
remote_host = 'localhost'
act_1.expected_stdout = expected_stdout_1
act_1.isql(switches=['-n'], input=eds_sql % locals())
finally:
p_unavail_host.terminate()
# Ensure that database is not busy
with act_1.connect_server() as srv:
srv.database.shutdown(database=act_1.db.db_path, mode=ShutdownMode.FULL,
method=ShutdownMethod.FORCED, timeout=0)
srv.database.bring_online(database=act_1.db.db_path)
# Check
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,25 +2,27 @@
# #
# id: bugs.core_5659 # id: bugs.core_5659
# title: Bad PLAN generated for query on Firebird v3.0 # title: Bad PLAN generated for query on Firebird v3.0
# decription: # decription:
# Test is based on data from original database that was provided in the ticket by its author. # Test is based on data from original database that was provided in the ticket by its author.
# Lot of data from tables were removed in order to reduce DB size. # Lot of data from tables were removed in order to reduce DB size.
# Reproduced bug on 3.0.2.32708, 4.0.0.800 # Reproduced bug on 3.0.2.32708, 4.0.0.800
# Wrong plan was: # Wrong plan was:
# PLAN JOIN (A NATURAL, C INDEX (PK_EST_PRODUTO), B INDEX (PK_COM_PEDIDO)) # PLAN JOIN (A NATURAL, C INDEX (PK_EST_PRODUTO), B INDEX (PK_COM_PEDIDO))
# Elapsed time was more than 1.2 second. # Elapsed time was more than 1.2 second.
# #
# All fine on: # All fine on:
# 3.0.3.32838: OK, 5.922s. # 3.0.3.32838: OK, 5.922s.
# 4.0.0.801: OK, 6.547s. # 4.0.0.801: OK, 6.547s.
# #
# tracker_id: CORE-5659 # tracker_id: CORE-5659
# min_versions: ['3.0'] # min_versions: ['3.0']
# versions: 3.0 # versions: 3.0
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action import zipfile
from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
# version: 3.0 # version: 3.0
# resources: None # resources: None
@ -33,35 +35,35 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import time # import time
# import zipfile # import zipfile
# import subprocess # import subprocess
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# #
# db_conn.close() # db_conn.close()
# #
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def flush_and_close( file_handle ): # def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync # # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f, # # If you're starting with a Python file object f,
# # first do f.flush(), and # # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os # global os
# #
# file_handle.flush() # file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"! # # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno()) # os.fsync(file_handle.fileno())
# file_handle.close() # file_handle.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def cleanup( f_names_list ): # def cleanup( f_names_list ):
# global os # global os
# for i in range(len( f_names_list )): # for i in range(len( f_names_list )):
@ -72,20 +74,20 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# else: # else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None # del_name = None
# #
# if del_name and os.path.isfile( del_name ): # if del_name and os.path.isfile( del_name ):
# os.remove( del_name ) # os.remove( del_name )
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# zf = zipfile.ZipFile( os.path.join(context['files_location'],'core_5659.zip') ) # zf = zipfile.ZipFile( os.path.join(context['files_location'],'core_5659.zip') )
# tmpfbk = 'core_5659.fbk' # tmpfbk = 'core_5659.fbk'
# zf.extract( tmpfbk, '$(DATABASE_LOCATION)') # zf.extract( tmpfbk, '$(DATABASE_LOCATION)')
# zf.close() # zf.close()
# #
# tmpfbk='$(DATABASE_LOCATION)'+tmpfbk # tmpfbk='$(DATABASE_LOCATION)'+tmpfbk
# tmpfdb='$(DATABASE_LOCATION)'+'tmp_bad_plan_5659.fdb' # tmpfdb='$(DATABASE_LOCATION)'+'tmp_bad_plan_5659.fdb'
# #
# f_restore_log=open( os.path.join(context['temp_directory'],'tmp_bad_plan_5659.log'), 'w') # f_restore_log=open( os.path.join(context['temp_directory'],'tmp_bad_plan_5659.log'), 'w')
# subprocess.check_call([context['fbsvcmgr_path'],"localhost:service_mgr", # subprocess.check_call([context['fbsvcmgr_path'],"localhost:service_mgr",
# "action_restore", # "action_restore",
@ -93,18 +95,18 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# "dbname", tmpfdb, # "dbname", tmpfdb,
# "res_replace" # "res_replace"
# ], # ],
# stdout=f_restore_log, # stdout=f_restore_log,
# stderr=subprocess.STDOUT) # stderr=subprocess.STDOUT)
# flush_and_close( f_restore_log ) # flush_and_close( f_restore_log )
# #
# # should be empty: # # should be empty:
# ################## # ##################
# with open( f_restore_log.name,'r') as f: # with open( f_restore_log.name,'r') as f:
# for line in f: # for line in f:
# if line.split(): # if line.split():
# print('UNEXPECTED STDERR in '+f_restore_log.name+': '+line) # print('UNEXPECTED STDERR in '+f_restore_log.name+': '+line)
# #
# #
# sqltxt=''' # sqltxt='''
# set planonly; # set planonly;
# select # select
@ -119,25 +121,50 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# b.dth_pedido between ? and ? # b.dth_pedido between ? and ?
# ; # ;
# ''' # '''
# #
# runProgram('isql', [ 'localhost:'+tmpfdb,'-q' ], sqltxt) # runProgram('isql', [ 'localhost:'+tmpfdb,'-q' ], sqltxt)
# #
# #
# # Cleanup: # # Cleanup:
# ########## # ##########
# time.sleep(1) # time.sleep(1)
# cleanup( (f_restore_log, tmpfdb, tmpfbk) ) # cleanup( (f_restore_log, tmpfdb, tmpfbk) )
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
PLAN JOIN (B INDEX (COM_PEDIDO_IDX1), A INDEX (FK_COM_PEDIDO_ITEM_PEDIDO), C INDEX (PK_EST_PRODUTO)) PLAN JOIN (B INDEX (COM_PEDIDO_IDX1), A INDEX (FK_COM_PEDIDO_ITEM_PEDIDO), C INDEX (PK_EST_PRODUTO))
""" """
test_script_1 = """
set planonly;
select
a.id_pedido_item,
c.descricao
from com_pedido b
join com_pedido_item a on a.id_pedido = b.id_pedido
and ( not(a.id_produto =1 and a.id_pedido_item_pai is not null))
join est_produto c on c.id_produto = a.id_produto
where
-- b.dth_pedido between cast('10.12.16 05:00:00' as timestamp) and cast('10.12.16 20:00:00' as timestamp)
b.dth_pedido between ? and ? ;
"""
fbk_file_1 = temp_file('core5637-security3.fbk')
fdb_file_1 = temp_file('bad_plan_5659.fdb')
@pytest.mark.version('>=3.0') @pytest.mark.version('>=3.0')
@pytest.mark.xfail def test_1(act_1: Action, fbk_file_1: Path, fdb_file_1: Path):
def test_1(db_1): zipped_fbk_file = zipfile.Path(act_1.vars['files'] / 'core_5659.zip',
pytest.fail("Test not IMPLEMENTED") at='core_5659.fbk')
fbk_file_1.write_bytes(zipped_fbk_file.read_bytes())
#
with act_1.connect_server() as srv:
srv.database.restore(backup=fbk_file_1, database=fdb_file_1)
srv.wait()
#
act_1.expected_stdout = expected_stdout_1
act_1.isql(switches=['-q', f'localhost:{fdb_file_1}'], input=test_script_1, connect_db=False)
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,40 +2,40 @@
# #
# id: bugs.core_5673 # id: bugs.core_5673
# title: Unique constraint not working in encrypted database on first command # title: Unique constraint not working in encrypted database on first command
# decription: # decription:
# #
# We create new database ('tmp_core_5673.fdb') and try to encrypt it using IBSurgeon Demo Encryption package # We create new database ('tmp_core_5673.fdb') and try to encrypt it using IBSurgeon Demo Encryption package
# ( https://ib-aid.com/download-demo-firebird-encryption-plugin/ ; https://ib-aid.com/download/crypt/CryptTest.zip ) # ( https://ib-aid.com/download-demo-firebird-encryption-plugin/ ; https://ib-aid.com/download/crypt/CryptTest.zip )
# License file plugins\\dbcrypt.conf with unlimited expiration was provided by IBSurgeon to Firebird Foundation (FF). # License file plugins\\dbcrypt.conf with unlimited expiration was provided by IBSurgeon to Firebird Foundation (FF).
# This file was preliminary stored in FF Test machine. # This file was preliminary stored in FF Test machine.
# Test assumes that this file and all neccessary libraries already were stored into FB_HOME and %FB_HOME%\\plugins. # Test assumes that this file and all neccessary libraries already were stored into FB_HOME and %FB_HOME%\\plugins.
# #
# We create table with UNIQUE constraint, add some data to it and try to encrypt database using 'alter database encrypt with <plugin_name> ...' # We create table with UNIQUE constraint, add some data to it and try to encrypt database using 'alter database encrypt with <plugin_name> ...'
# command (where <plugin_name> = dbcrypt - name of .dll in FB_HOME\\plugins\\ folder that implements encryption). # command (where <plugin_name> = dbcrypt - name of .dll in FB_HOME\\plugins\\ folder that implements encryption).
# Then we allow engine to complete this job - take delay about 1..2 seconds BEFORE detach from database. # Then we allow engine to complete this job - take delay about 1..2 seconds BEFORE detach from database.
# #
# After this we make TWO attempts to insert duplicates and catch exceptions for each of them and print exception details. # After this we make TWO attempts to insert duplicates and catch exceptions for each of them and print exception details.
# Expected result: TWO exception must occur here. # Expected result: TWO exception must occur here.
# #
# ::: NB :::: # ::: NB ::::
# Could not check reproducing of bug on FB 3.0.2 because there is no encryption plugin for this (too old) version. # Could not check reproducing of bug on FB 3.0.2 because there is no encryption plugin for this (too old) version.
# Decided only to ensure that exception will be catched on recent FB version for each attempt to insert duplicate. # Decided only to ensure that exception will be catched on recent FB version for each attempt to insert duplicate.
# Checked on: # Checked on:
# 4.0.0.1524: OK, 4.056s ; 4.0.0.1421: OK, 6.160s. # 4.0.0.1524: OK, 4.056s ; 4.0.0.1421: OK, 6.160s.
# 3.0.5.33139: OK, 2.895s ; 3.0.5.33118: OK, 2.837s. # 3.0.5.33139: OK, 2.895s ; 3.0.5.33118: OK, 2.837s.
# #
# 15.04.2021. Adapted for run both on Windows and Linux. Checked on: # 15.04.2021. Adapted for run both on Windows and Linux. Checked on:
# Windows: 4.0.0.2416 # Windows: 4.0.0.2416
# Linux: 4.0.0.2416 # Linux: 4.0.0.2416
# #
# #
# tracker_id: CORE-5673 # tracker_id: CORE-5673
# min_versions: ['3.0.3'] # min_versions: ['3.0.3']
# versions: 3.0.3 # versions: 3.0.3
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from firebird.qa import db_factory, python_act, Action
# version: 3.0.3 # version: 3.0.3
# resources: None # resources: None
@ -48,18 +48,18 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import time # import time
# import subprocess # import subprocess
# import re # import re
# import fdb # import fdb
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# engine = db_conn.engine_version # engine = db_conn.engine_version
# db_conn.close() # db_conn.close()
# #
# # 14.04.2021. # # 14.04.2021.
# # Name of encryption plugin depends on OS: # # Name of encryption plugin depends on OS:
# # * for Windows we (currently) use plugin by IBSurgeon, its name is 'dbcrypt'; # # * for Windows we (currently) use plugin by IBSurgeon, its name is 'dbcrypt';
@ -69,16 +69,16 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # # #
# PLUGIN_NAME = 'dbcrypt' if os.name == 'nt' else ( '"fbSampleDbCrypt"' if engine >= 4.0 else '"DbCrypt_example"') # PLUGIN_NAME = 'dbcrypt' if os.name == 'nt' else ( '"fbSampleDbCrypt"' if engine >= 4.0 else '"DbCrypt_example"')
# KHOLDER_NAME = 'KeyHolder' if os.name == 'nt' else "fbSampleKeyHolder" # KHOLDER_NAME = 'KeyHolder' if os.name == 'nt' else "fbSampleKeyHolder"
# #
# #
# con = fdb.connect( dsn = dsn ) # con = fdb.connect( dsn = dsn )
# con.execute_immediate( 'recreate table test(x int, constraint test_x_unq unique(x))' ) # con.execute_immediate( 'recreate table test(x int, constraint test_x_unq unique(x))' )
# con.commit() # con.commit()
# #
# cur = con.cursor() # cur = con.cursor()
# cur.execute( 'insert into test(x) select row_number()over() from rdb$types rows 10' ) # cur.execute( 'insert into test(x) select row_number()over() from rdb$types rows 10' )
# con.commit() # con.commit()
# #
# ############################################## # ##############################################
# # WARNING! Do NOT use 'connection_obj.execute_immediate()' for ALTER DATABASE ENCRYPT... command! # # WARNING! Do NOT use 'connection_obj.execute_immediate()' for ALTER DATABASE ENCRYPT... command!
# # There is bug in FB driver which leads this command to fail with 'token unknown' message # # There is bug in FB driver which leads this command to fail with 'token unknown' message
@ -89,30 +89,31 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# ############################################## # ##############################################
# cur.execute('alter database encrypt with %(PLUGIN_NAME)s key Red' % locals()) # cur.execute('alter database encrypt with %(PLUGIN_NAME)s key Red' % locals())
# con.commit() # con.commit()
# #
# time.sleep(2) # time.sleep(2)
# # ^ # # ^
# # +-------- !! ALLOW BACKGROUND ENCRYPTION PROCESS TO COMPLETE ITS JOB !! # # +-------- !! ALLOW BACKGROUND ENCRYPTION PROCESS TO COMPLETE ITS JOB !!
# #
# try: # try:
# cur.execute( 'insert into test(x) values(1)' ) # cur.execute( 'insert into test(x) values(1)' )
# except Exception as e: # except Exception as e:
# for x in e.args: # for x in e.args:
# print( x ) # print( x )
# #
# try: # try:
# cur.execute( 'insert into test(x) values(2)' ) # cur.execute( 'insert into test(x) values(2)' )
# except Exception as e: # except Exception as e:
# for x in e.args: # for x in e.args:
# #print( x.replace(chr(92),"/") if type(x)=='str' else x ) # #print( x.replace(chr(92),"/") if type(x)=='str' else x )
# print( x ) # print( x )
# #
# #
# cur.close() # cur.close()
# con.close() # con.close()
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
Error while executing SQL statement: Error while executing SQL statement:
@ -128,11 +129,8 @@ expected_stdout_1 = """
- Problematic key value is ("X" = 2) - Problematic key value is ("X" = 2)
-803 -803
335544665 335544665
""" """
@pytest.mark.version('>=3.0.3') @pytest.mark.version('>=3.0.3')
@pytest.mark.xfail
def test_1(db_1): def test_1(db_1):
pytest.fail("Test not IMPLEMENTED") pytest.skip("Requires encryption plugin")

View File

@ -2,34 +2,34 @@
# #
# id: bugs.core_5675 # id: bugs.core_5675
# title: isc_vax_integer() and isc_portable_integer() work wrongly with short negative numbers # title: isc_vax_integer() and isc_portable_integer() work wrongly with short negative numbers
# decription: # decription:
# Confirmed bug on4.0.0.800. # Confirmed bug on4.0.0.800.
# Works fine on: # Works fine on:
# FB25SC, build 2.5.8.27089: OK, 0.422s. # FB25SC, build 2.5.8.27089: OK, 0.422s.
# FB30SS, build 3.0.3.32876: OK, 1.484s. # FB30SS, build 3.0.3.32876: OK, 1.484s.
# FB40SS, build 4.0.0.852: OK, 1.156s. # FB40SS, build 4.0.0.852: OK, 1.156s.
# #
# NB. It seems that some bug exists in function _renderSizedIntegerForSPB from fdb package (services.py): # NB. It seems that some bug exists in function _renderSizedIntegerForSPB from fdb package (services.py):
# iRaw = struct.pack(myformat, i) # iRaw = struct.pack(myformat, i)
# iConv = api.isc_vax_integer(iRaw, len(iRaw)) # iConv = api.isc_vax_integer(iRaw, len(iRaw))
# This function cuts off high 4 bytes when we pass to it bugint values greater than 2^31, i.e.: # This function cuts off high 4 bytes when we pass to it bugint values greater than 2^31, i.e.:
# 2147483648L ==> reversed = b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' # 2147483648L ==> reversed = b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'
# -2147483649L ==> reversed = b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' # -2147483649L ==> reversed = b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'
# #
# For this reason it was decided currently to limit scope by specifying numbers with abs() less than 2^31 - untill fdb driver will be fixed. # For this reason it was decided currently to limit scope by specifying numbers with abs() less than 2^31 - untill fdb driver will be fixed.
# See letter from dimitr 08-jan-2018 20:56 # See letter from dimitr 08-jan-2018 20:56
# #
# 25.08.2020: adjusted name of function from services that must work here: # 25.08.2020: adjusted name of function from services that must work here:
# its name is "_render_sized_integer_for_spb" rather than old "_renderSizedIntegerForSPB". # its name is "_render_sized_integer_for_spb" rather than old "_renderSizedIntegerForSPB".
# Checked on 4.0.0.2173; 3.0.7.33357; 2.5.9.27152. # Checked on 4.0.0.2173; 3.0.7.33357; 2.5.9.27152.
# #
# tracker_id: CORE-5675 # tracker_id: CORE-5675
# min_versions: ['2.5.8'] # min_versions: ['2.5.8']
# versions: 2.5.8 # versions: 2.5.8
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from firebird.qa import db_factory, python_act, Action
# version: 2.5.8 # version: 2.5.8
# resources: None # resources: None
@ -42,7 +42,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# from __future__ import print_function # from __future__ import print_function
# import os # import os
# import binascii # import binascii
@ -52,14 +52,14 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# db_conn.close() # db_conn.close()
# con = services.connect(host='localhost', user='sysdba', password='masterkey') # con = services.connect(host='localhost', user='sysdba', password='masterkey')
# #print( con.get_server_version() ) # #print( con.get_server_version() )
# #
# dec_values=( 1, -1, 127, -128, 128, -256, 255, -32768, 32767, 32768, -65536, 65535, 65536 ) #, 32767, -32768, 32768, -32769, 2147483647, -2147483648, 2147483648, -2147483649, 3000000000, 3000000000000, 9223372036854775807 ) # dec_values=( 1, -1, 127, -128, 128, -256, 255, -32768, 32767, 32768, -65536, 65535, 65536 ) #, 32767, -32768, 32768, -32769, 2147483647, -2147483648, 2147483648, -2147483649, 3000000000, 3000000000000, 9223372036854775807 )
# num_ctypes=( 'b', 'b', 'b', 'b', 'b', 'B', 'B', 'h', 'h', 'h', 'H', 'H', 'H' ) #, 'i', 'i', 'i', 'i', 'i', 'i', 'q', 'q', 'q', 'q', 'q' ) # num_ctypes=( 'b', 'b', 'b', 'b', 'b', 'B', 'B', 'h', 'h', 'h', 'H', 'H', 'H' ) #, 'i', 'i', 'i', 'i', 'i', 'i', 'q', 'q', 'q', 'q', 'q' )
# #
# #dec_values=( 1, -1, 127, -128, 128, -256, 255, -32768, 32767, 32768, -65536, 65535, 65536 , 32767, -32768, 32768, -32769, 2147483647, -2147483648, 2147483648, -2147483649, 3000000000, 3000000000000, 9223372036854775807 ) # #dec_values=( 1, -1, 127, -128, 128, -256, 255, -32768, 32767, 32768, -65536, 65535, 65536 , 32767, -32768, 32768, -32769, 2147483647, -2147483648, 2147483648, -2147483649, 3000000000, 3000000000000, 9223372036854775807 )
# #num_ctypes=( 'b', 'b', 'b', 'b', 'b', 'B', 'B', 'h', 'h', 'h', 'H', 'H', 'H' , 'i', 'i', 'i', 'i', 'i', 'i', 'q', 'q', 'q', 'q', 'q') # #num_ctypes=( 'b', 'b', 'b', 'b', 'b', 'B', 'B', 'h', 'h', 'h', 'H', 'H', 'H' , 'i', 'i', 'i', 'i', 'i', 'i', 'q', 'q', 'q', 'q', 'q')
# #
# #
# for i in range(0, len(dec_values)): # for i in range(0, len(dec_values)):
# num = dec_values[i] # num = dec_values[i]
# fmt = num_ctypes[i] # fmt = num_ctypes[i]
@ -72,11 +72,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# rev = e[0] # rev = e[0]
# finally: # finally:
# print( ' '.join( (msg + ['; result: ',rev,]) ) ) # print( ' '.join( (msg + ['; result: ',rev,]) ) )
# #
# con.close() # con.close()
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
Try revert bytes in decimal value: 1 using struct format: "b" ; result: 01 Try revert bytes in decimal value: 1 using struct format: "b" ; result: 01
@ -92,11 +93,8 @@ expected_stdout_1 = """
Try revert bytes in decimal value: -65536 using struct format: "H" ; result: ushort format requires 0 <= number <= USHRT_MAX Try revert bytes in decimal value: -65536 using struct format: "H" ; result: ushort format requires 0 <= number <= USHRT_MAX
Try revert bytes in decimal value: 65535 using struct format: "H" ; result: ushort format requires 0 <= number <= USHRT_MAX Try revert bytes in decimal value: 65535 using struct format: "H" ; result: ushort format requires 0 <= number <= USHRT_MAX
Try revert bytes in decimal value: 65536 using struct format: "H" ; result: ushort format requires 0 <= number <= USHRT_MAX Try revert bytes in decimal value: 65536 using struct format: "H" ; result: ushort format requires 0 <= number <= USHRT_MAX
""" """
@pytest.mark.version('>=2.5.8') @pytest.mark.version('>=2.5.8')
@pytest.mark.xfail def test_1(act_1: Action):
def test_1(db_1): pytest.skip("Requires function not provided by driver")
pytest.fail("Test not IMPLEMENTED")

View File

@ -1,20 +1,20 @@
#coding:utf-8 #coding:utf-8
# #
# id: bugs.core_5685 # id: bugs.core_5685
# title: Sometime it is impossible to cancel\\kill connection executing external query # title: Sometime it is impossible to cancel/kill connection executing external query
# decription: # decription:
# Problem did appear when host "A" established connection to host "B" but could not get completed reply from this "B". # Problem did appear when host "A" established connection to host "B" but could not get completed reply from this "B".
# This can be emulated by following steps: # This can be emulated by following steps:
# 1. We establich new remote connection to the same database using EDS mechanism and supply completely new ROLE to force new attachment be created; # 1. We establich new remote connection to the same database using EDS mechanism and supply completely new ROLE to force new attachment be created;
# 2. Within this EDS we do query to selectable procedure (with name 'sp_unreachable') which surely will not produce any result. # 2. Within this EDS we do query to selectable procedure (with name 'sp_unreachable') which surely will not produce any result.
# Bogon IP '192.0.2.2' is used in order to make this SP hang for sufficient time (on Windows it is about 20, on POSIX - about 44 seconds). # Bogon IP '192.0.2.2' is used in order to make this SP hang for sufficient time (on Windows it is about 20, on POSIX - about 44 seconds).
# Steps 1 and 2 are implemented by asynchronous call of ISQL: we must have ability to kill its process after. # Steps 1 and 2 are implemented by asynchronous call of ISQL: we must have ability to kill its process after.
# When this 'hanging ISQL' is launched, we wait 1..2 seconds and run one more ISQL, which has mission to KILL all attachments except his own. # When this 'hanging ISQL' is launched, we wait 1..2 seconds and run one more ISQL, which has mission to KILL all attachments except his own.
# This ISQL session is named 'killer', and it writes result of actions to log. # This ISQL session is named 'killer', and it writes result of actions to log.
# This "killer-ISQL" does TWO iterations with the same code which looks like 'select ... from mon$attachments' and 'delete from mon$attachments'. # This "killer-ISQL" does TWO iterations with the same code which looks like 'select ... from mon$attachments' and 'delete from mon$attachments'.
# First iteration must return data of 'hanging ISQL' and also this session must be immediately killed. # First iteration must return data of 'hanging ISQL' and also this session must be immediately killed.
# Second iteration must NOT return any data - and this is main check in this test. # Second iteration must NOT return any data - and this is main check in this test.
# #
# For builds which had bug (before 25.12.2017) one may see that second iteration STILL RETURNS the same data as first one: # For builds which had bug (before 25.12.2017) one may see that second iteration STILL RETURNS the same data as first one:
# ==== # ====
# ITERATION_NO 1 # ITERATION_NO 1
@ -24,7 +24,7 @@
# HANGING_STATEMENT_BLOB_ID 0:3 # HANGING_STATEMENT_BLOB_ID 0:3
# select * from sp_get_data # select * from sp_get_data
# Records affected: 1 # Records affected: 1
# #
# ITERATION_NO 2 # ITERATION_NO 2
# HANGING_ATTACH_CONNECTION 1 # HANGING_ATTACH_CONNECTION 1
# HANGING_ATTACH_PROTOCOL TCP # HANGING_ATTACH_PROTOCOL TCP
@ -34,96 +34,102 @@
# Records affected: 1 # Records affected: 1
# ==== # ====
# (expected: all fields in ITER #2 must be NULL) # (expected: all fields in ITER #2 must be NULL)
# #
# Confirmed bug on 3.0.2.32703 (check file "tmp_kill_5685.log" in %FBT_REPO% mp folder with result that will get "killer-ISQL") # Confirmed bug on 3.0.2.32703 (check file "tmp_kill_5685.log" in %FBT_REPO% mp folder with result that will get "killer-ISQL")
# #
# NOTE-1: console output in 4.0 slightly differs from in 3.0: a couple of messages ("-Killed by database administrator" and "-send_packet/send") # NOTE-1: console output in 4.0 slightly differs from in 3.0: a couple of messages ("-Killed by database administrator" and "-send_packet/send")
# was added to STDERR. For this reason test code was splitted on two sections, 3.0 and 4.0. # was added to STDERR. For this reason test code was splitted on two sections, 3.0 and 4.0.
# NOTE-2: unstable results detected for 2.5.9 SuperClassic. Currently test contains min_version = 3.0.3 rather than 2.5.9 # NOTE-2: unstable results detected for 2.5.9 SuperClassic. Currently test contains min_version = 3.0.3 rather than 2.5.9
# #
# 06.03.2021. # 06.03.2021.
# Removed separate section for 3.x because code for 4.x was made unified. # Removed separate section for 3.x because code for 4.x was made unified.
# Checked on: # Checked on:
# * Windows: 4.0.0.2377 (SS/CS), 3.0.8.33423 (SS/CS) # * Windows: 4.0.0.2377 (SS/CS), 3.0.8.33423 (SS/CS)
# * Linux: 4.0.0.2379, 3.0.8.33415 # * Linux: 4.0.0.2379, 3.0.8.33415
# #
# #
# tracker_id: CORE-5685 # tracker_id: CORE-5685
# min_versions: ['3.0.3'] # min_versions: ['3.0.3']
# versions: 3.0.3 # versions: 3.0.3
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action import re
import subprocess
import time
from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
from firebird.driver import ShutdownMode, ShutdownMethod
# version: 3.0.3 # version: 3.0.3
# resources: None # resources: None
substitutions_1 = [('.*After line.*', ''), ('.*Data source.*', '.*Data source'), ('.*HANGING_STATEMENT_BLOB_ID.*', '')] substitutions_1 = [('.*After line.*', ''), ('.*Data source.*', '.*Data source'),
('.*HANGING_STATEMENT_BLOB_ID.*', '')]
init_script_1 = """ init_script_1 = """
create sequence g; create sequence g;
commit; commit;
set term ^; set term ^;
create or alter procedure sp_unreachable returns( unreachable_address varchar(50) ) as create or alter procedure sp_unreachable returns( unreachable_address varchar(50) ) as
begin begin
for for
execute statement ('select mon$remote_address from mon$attachments a where a.mon$attachment_id = current_connection') execute statement ('select mon$remote_address from mon$attachments a where a.mon$attachment_id = current_connection')
on external '192.0.2.2:' || rdb$get_context('SYSTEM', 'DB_NAME') on external '192.0.2.2:' || rdb$get_context('SYSTEM', 'DB_NAME')
as user 'SYSDBA' password 'masterkey' role left(replace( uuid_to_char(gen_uuid()), '-', ''), 31) as user 'SYSDBA' password 'masterkey' role left(replace( uuid_to_char(gen_uuid()), '-', ''), 31)
into unreachable_address into unreachable_address
do do
suspend; suspend;
end end
^ ^
create or alter procedure sp_get_data returns( unreachable_address varchar(50) ) as create or alter procedure sp_get_data returns( unreachable_address varchar(50) ) as
begin begin
for for
execute statement ('select u.unreachable_address from sp_unreachable as u') execute statement ('select u.unreachable_address from sp_unreachable as u')
on external 'localhost:' || rdb$get_context('SYSTEM', 'DB_NAME') on external 'localhost:' || rdb$get_context('SYSTEM', 'DB_NAME')
as user 'SYSDBA' password 'masterkey' role left(replace( uuid_to_char(gen_uuid()), '-', ''), 31) as user 'SYSDBA' password 'masterkey' role left(replace( uuid_to_char(gen_uuid()), '-', ''), 31)
into unreachable_address into unreachable_address
do do
suspend; suspend;
end end
^ ^
set term ;^ set term ;^
commit; commit;
""" """
db_1 = db_factory(sql_dialect=3, init=init_script_1) db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import subprocess # import subprocess
# import re # import re
# import time # import time
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# db_conn.close() # db_conn.close()
# #
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def flush_and_close( file_handle ): # def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync # # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f, # # If you're starting with a Python file object f,
# # first do f.flush(), and # # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os # global os
# #
# file_handle.flush() # file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"! # # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno()) # os.fsync(file_handle.fileno())
# file_handle.close() # file_handle.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def cleanup( f_names_list ): # def cleanup( f_names_list ):
# global os # global os
# for f in f_names_list: # for f in f_names_list:
@ -134,22 +140,22 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# else: # else:
# print('Unrecognized type of element:', f, ' - can not be treated as file.') # print('Unrecognized type of element:', f, ' - can not be treated as file.')
# del_name = None # del_name = None
# #
# if del_name and os.path.isfile( del_name ): # if del_name and os.path.isfile( del_name ):
# os.remove( del_name ) # os.remove( del_name )
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# f_hang_sql = open( os.path.join(context['temp_directory'],'tmp_hang_5685.sql'), 'w') # f_hang_sql = open( os.path.join(context['temp_directory'],'tmp_hang_5685.sql'), 'w')
# f_hang_sql.write( 'set list on; set count on; select * from sp_get_data;' ) # f_hang_sql.write( 'set list on; set count on; select * from sp_get_data;' )
# flush_and_close( f_hang_sql ) # flush_and_close( f_hang_sql )
# #
# sql_kill=''' # sql_kill='''
# set list on; # set list on;
# set blob all; # set blob all;
# select gen_id(g,1) as ITERATION_NO from rdb$database; # select gen_id(g,1) as ITERATION_NO from rdb$database;
# commit; # commit;
# #
# select # select
# sign(a.mon$attachment_id) as hanging_attach_connection # sign(a.mon$attachment_id) as hanging_attach_connection
# ,left(a.mon$remote_protocol,3) as hanging_attach_protocol # ,left(a.mon$remote_protocol,3) as hanging_attach_protocol
@ -158,93 +164,93 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# from rdb$database d # from rdb$database d
# left join mon$attachments a on a.mon$remote_process containing 'isql' # left join mon$attachments a on a.mon$remote_process containing 'isql'
# -- do NOT use, field not existed in 2.5.x: and a.mon$system_flag is distinct from 1 # -- do NOT use, field not existed in 2.5.x: and a.mon$system_flag is distinct from 1
# and a.mon$attachment_id is distinct from current_connection # and a.mon$attachment_id is distinct from current_connection
# left join mon$statements s on # left join mon$statements s on
# a.mon$attachment_id = s.mon$attachment_id # a.mon$attachment_id = s.mon$attachment_id
# and s.mon$state = 1 -- 4.0 Classic: 'SELECT RDB$MAP_USING, RDB$MAP_PLUGIN, ... FROM RDB$AUTH_MAPPING', mon$state = 0 # and s.mon$state = 1 -- 4.0 Classic: 'SELECT RDB$MAP_USING, RDB$MAP_PLUGIN, ... FROM RDB$AUTH_MAPPING', mon$state = 0
# ; # ;
# #
# set count on; # set count on;
# delete from mon$attachments a # delete from mon$attachments a
# where # where
# a.mon$attachment_id <> current_connection # a.mon$attachment_id <> current_connection
# and a.mon$remote_process containing 'isql' # and a.mon$remote_process containing 'isql'
# ; # ;
# commit; # commit;
# ''' # '''
# #
# f_kill_sql = open( os.path.join(context['temp_directory'],'tmp_kill_5685.sql'), 'w') # f_kill_sql = open( os.path.join(context['temp_directory'],'tmp_kill_5685.sql'), 'w')
# f_kill_sql.write( sql_kill ) # f_kill_sql.write( sql_kill )
# flush_and_close( f_kill_sql ) # flush_and_close( f_kill_sql )
# #
# f_hang_log = open( os.path.join(context['temp_directory'],'tmp_hang_5685.log'), 'w') # f_hang_log = open( os.path.join(context['temp_directory'],'tmp_hang_5685.log'), 'w')
# f_hang_err = open( os.path.join(context['temp_directory'],'tmp_hang_5685.err'), 'w') # f_hang_err = open( os.path.join(context['temp_directory'],'tmp_hang_5685.err'), 'w')
# #
# #
# # WARNING: we launch ISQL here in async mode in order to have ability to kill its process if it will hang! # # WARNING: we launch ISQL here in async mode in order to have ability to kill its process if it will hang!
# ############################################ # ############################################
# p_hang_pid=subprocess.Popen( [ context['isql_path'], dsn, "-i", f_hang_sql.name ], # p_hang_pid=subprocess.Popen( [ context['isql_path'], dsn, "-i", f_hang_sql.name ],
# stdout = f_hang_log, # stdout = f_hang_log,
# stderr = f_hang_err # stderr = f_hang_err
# ) # )
# #
# time.sleep(1) # time.sleep(1)
# #
# #
# f_kill_log = open( os.path.join(context['temp_directory'],'tmp_kill_5685.log'), 'w') # f_kill_log = open( os.path.join(context['temp_directory'],'tmp_kill_5685.log'), 'w')
# f_kill_err = open( os.path.join(context['temp_directory'],'tmp_kill_5685.err'), 'w') # f_kill_err = open( os.path.join(context['temp_directory'],'tmp_kill_5685.err'), 'w')
# #
# for i in (1,2): # for i in (1,2):
# subprocess.call( [ context['isql_path'], dsn, "-i", f_kill_sql.name ], # subprocess.call( [ context['isql_path'], dsn, "-i", f_kill_sql.name ],
# stdout = f_kill_log, # stdout = f_kill_log,
# stderr = f_kill_err # stderr = f_kill_err
# ) # )
# #
# flush_and_close( f_kill_log ) # flush_and_close( f_kill_log )
# flush_and_close( f_kill_err ) # flush_and_close( f_kill_err )
# #
# ############################################## # ##############################################
# p_hang_pid.terminate() # p_hang_pid.terminate()
# flush_and_close( f_hang_log ) # flush_and_close( f_hang_log )
# flush_and_close( f_hang_err ) # flush_and_close( f_hang_err )
# #
# time.sleep(2) # time.sleep(2)
# #
# f_shut_log = open( os.path.join(context['temp_directory'],'tmp_shut_5685.log'), 'w') # f_shut_log = open( os.path.join(context['temp_directory'],'tmp_shut_5685.log'), 'w')
# f_shut_err = open( os.path.join(context['temp_directory'],'tmp_shut_5685.err'), 'w') # f_shut_err = open( os.path.join(context['temp_directory'],'tmp_shut_5685.err'), 'w')
# #
# subprocess.call( [ context['gfix_path'], dsn, "-shut", "full", "-force", "0" ], # subprocess.call( [ context['gfix_path'], dsn, "-shut", "full", "-force", "0" ],
# stdout = f_shut_log, # stdout = f_shut_log,
# stderr = f_shut_err # stderr = f_shut_err
# ) # )
# #
# subprocess.call( [ context['gstat_path'], dsn, "-h"], # subprocess.call( [ context['gstat_path'], dsn, "-h"],
# stdout = f_shut_log, # stdout = f_shut_log,
# stderr = f_shut_err # stderr = f_shut_err
# ) # )
# #
# subprocess.call( [ context['gfix_path'], dsn, "-online" ], # subprocess.call( [ context['gfix_path'], dsn, "-online" ],
# stdout = f_shut_log, # stdout = f_shut_log,
# stderr = f_shut_err # stderr = f_shut_err
# ) # )
# #
# subprocess.call( [ context['gstat_path'], dsn, "-h"], # subprocess.call( [ context['gstat_path'], dsn, "-h"],
# stdout = f_shut_log, # stdout = f_shut_log,
# stderr = f_shut_err # stderr = f_shut_err
# ) # )
# #
# flush_and_close( f_shut_log ) # flush_and_close( f_shut_log )
# flush_and_close( f_shut_err ) # flush_and_close( f_shut_err )
# #
# # Check results: # # Check results:
# ################ # ################
# #
# with open( f_hang_log.name,'r') as f: # with open( f_hang_log.name,'r') as f:
# for line in f: # for line in f:
# if line.split(): # if line.split():
# print('HANGED ATTACH, STDOUT: ', ' '.join(line.split()) ) # print('HANGED ATTACH, STDOUT: ', ' '.join(line.split()) )
# #
# #
# # 01-mar-2021: hanged ISQL can issue *different* messages to STDERR starting from line #4: # # 01-mar-2021: hanged ISQL can issue *different* messages to STDERR starting from line #4:
# # case-1: # # case-1:
# # ------- # # -------
@ -254,7 +260,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # 4 Statement failed, SQLSTATE = 08006 <<< # # 4 Statement failed, SQLSTATE = 08006 <<<
# # 5 Error writing data to the connection. <<< # # 5 Error writing data to the connection. <<<
# # 6 -send_packet/send <<< # # 6 -send_packet/send <<<
# #
# # case-2: # # case-2:
# # 1 Statement failed, SQLSTATE = 08003 # # 1 Statement failed, SQLSTATE = 08003
# # 2 connection shutdown # # 2 connection shutdown
@ -262,7 +268,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # 4 Statement failed, SQLSTATE = 08003 <<< # # 4 Statement failed, SQLSTATE = 08003 <<<
# # 5 connection shutdown <<< # # 5 connection shutdown <<<
# # 6 -Killed by database administrator. <<< # # 6 -Killed by database administrator. <<<
# #
# # We can ignore messages like '-send_packet/send' and '-Killed by database administrator.', # # We can ignore messages like '-send_packet/send' and '-Killed by database administrator.',
# # but we have to take in account first two ('SQLSTATE = ...' and 'Error ... connection' / 'connection shutdown') # # but we have to take in account first two ('SQLSTATE = ...' and 'Error ... connection' / 'connection shutdown')
# # because they exactly say that session was terminated. # # because they exactly say that session was terminated.
@ -272,13 +278,13 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # 2 <pattern about closed connection> // Error writing data to... / Error reading data from... / connection shutdown # # 2 <pattern about closed connection> // Error writing data to... / Error reading data from... / connection shutdown
# # 3 <pattern about failed statement> // SQLSTATE = 08006 or 08003 # # 3 <pattern about failed statement> // SQLSTATE = 08006 or 08003
# # 4 <pattern about closed connection> // Error writing data to... / Error reading data from... / connection shutdown # # 4 <pattern about closed connection> // Error writing data to... / Error reading data from... / connection shutdown
# #
# # Use pattern matching for this purpose: # # Use pattern matching for this purpose:
# # # #
# pattern_for_failed_statement = re.compile('Statement failed, SQLSTATE = (08006|08003)') # pattern_for_failed_statement = re.compile('Statement failed, SQLSTATE = (08006|08003)')
# pattern_for_connection_close = re.compile('(Error (reading|writing) data (from|to) the connection)|(connection shutdown)') # pattern_for_connection_close = re.compile('(Error (reading|writing) data (from|to) the connection)|(connection shutdown)')
# pattern_for_ignored_messages = re.compile('(-send_packet/send)|(-Killed by database administrator.)') # pattern_for_ignored_messages = re.compile('(-send_packet/send)|(-Killed by database administrator.)')
# #
# msg_prefix = 'HANGED ATTACH, STDERR: ' # msg_prefix = 'HANGED ATTACH, STDERR: '
# with open( f_hang_err.name,'r') as f: # with open( f_hang_err.name,'r') as f:
# for line in f: # for line in f:
@ -291,72 +297,153 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# print( msg_prefix, '<found pattern about closed connection>') # print( msg_prefix, '<found pattern about closed connection>')
# else: # else:
# print( msg_prefix, ' '.join(line.split()) ) # print( msg_prefix, ' '.join(line.split()) )
# #
# #
# with open( f_kill_log.name,'r') as f: # with open( f_kill_log.name,'r') as f:
# for line in f: # for line in f:
# if line.split(): # if line.split():
# print('KILLER ATTACH, STDOUT: ', ' '.join(line.split()) ) # print('KILLER ATTACH, STDOUT: ', ' '.join(line.split()) )
# #
# with open( f_kill_err.name,'r') as f: # with open( f_kill_err.name,'r') as f:
# for line in f: # for line in f:
# if line.split(): # if line.split():
# print('KILLER ATTACH, UNEXPECTED STDERR: ', ' '.join(line.split()) ) # print('KILLER ATTACH, UNEXPECTED STDERR: ', ' '.join(line.split()) )
# #
# with open( f_shut_err.name,'r') as f: # with open( f_shut_err.name,'r') as f:
# for line in f: # for line in f:
# if line.split(): # if line.split():
# print('DB SHUTDOWN, UNEXPECTED STDERR: ', ' '.join(line.split()) ) # print('DB SHUTDOWN, UNEXPECTED STDERR: ', ' '.join(line.split()) )
# #
# #
# ############################### # ###############################
# # Cleanup. # # Cleanup.
# time.sleep(1) # time.sleep(1)
# #
# f_list=( # f_list=(
# f_hang_sql # f_hang_sql
# ,f_hang_log # ,f_hang_log
# ,f_hang_err # ,f_hang_err
# ,f_kill_sql # ,f_kill_sql
# ,f_kill_log # ,f_kill_log
# ,f_kill_err # ,f_kill_err
# ,f_shut_log # ,f_shut_log
# ,f_shut_err # ,f_shut_err
# ) # )
# cleanup( f_list ) # cleanup( f_list )
# #
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
HANGED ATTACH, STDOUT: Records affected: 0 HANGED ATTACH, STDOUT: Records affected: 0
HANGED ATTACH, STDERR: Statement failed, SQLSTATE = 42000 HANGED ATTACH, STDERR: Statement failed, SQLSTATE = 42000
HANGED ATTACH, STDERR: Execute statement error at isc_dsql_fetch : HANGED ATTACH, STDERR: Execute statement error at isc_dsql_fetch :
HANGED ATTACH, STDERR: <found pattern about closed connection> HANGED ATTACH, STDERR: <found pattern about closed connection>
HANGED ATTACH, STDERR: Statement : select u.unreachable_address from sp_unreachable as u HANGED ATTACH, STDERR: Statement : select u.unreachable_address from sp_unreachable as u
.*Data source .*Data source
HANGED ATTACH, STDERR: -At procedure 'SP_GET_DATA' line: 3, col: 9 HANGED ATTACH, STDERR: -At procedure 'SP_GET_DATA' line: 3, col: 9
HANGED ATTACH, STDERR: <found pattern about failed statement> HANGED ATTACH, STDERR: <found pattern about failed statement>
HANGED ATTACH, STDERR: <found pattern about closed connection> HANGED ATTACH, STDERR: <found pattern about closed connection>
HANGED ATTACH, STDERR: <found pattern about failed statement> HANGED ATTACH, STDERR: <found pattern about failed statement>
HANGED ATTACH, STDERR: <found pattern about closed connection> HANGED ATTACH, STDERR: <found pattern about closed connection>
KILLER ATTACH, STDOUT: ITERATION_NO 1 KILLER ATTACH, STDOUT: ITERATION_NO 1
KILLER ATTACH, STDOUT: HANGING_ATTACH_CONNECTION 1 KILLER ATTACH, STDOUT: HANGING_ATTACH_CONNECTION 1
KILLER ATTACH, STDOUT: HANGING_ATTACH_PROTOCOL TCP KILLER ATTACH, STDOUT: HANGING_ATTACH_PROTOCOL TCP
KILLER ATTACH, STDOUT: HANGING_STATEMENT_STATE 1 KILLER ATTACH, STDOUT: HANGING_STATEMENT_STATE 1
KILLER ATTACH, STDOUT: select * from sp_get_data KILLER ATTACH, STDOUT: select * from sp_get_data
KILLER ATTACH, STDOUT: Records affected: 1 KILLER ATTACH, STDOUT: Records affected: 1
KILLER ATTACH, STDOUT: ITERATION_NO 2 KILLER ATTACH, STDOUT: ITERATION_NO 2
KILLER ATTACH, STDOUT: HANGING_ATTACH_CONNECTION <null> KILLER ATTACH, STDOUT: HANGING_ATTACH_CONNECTION <null>
KILLER ATTACH, STDOUT: HANGING_ATTACH_PROTOCOL <null> KILLER ATTACH, STDOUT: HANGING_ATTACH_PROTOCOL <null>
KILLER ATTACH, STDOUT: HANGING_STATEMENT_STATE <null> KILLER ATTACH, STDOUT: HANGING_STATEMENT_STATE <null>
KILLER ATTACH, STDOUT: Records affected: 0 KILLER ATTACH, STDOUT: Records affected: 0
""" """
kill_script = """
set list on;
set blob all;
select gen_id(g,1) as ITERATION_NO from rdb$database;
commit;
select
sign(a.mon$attachment_id) as hanging_attach_connection
,left(a.mon$remote_protocol,3) as hanging_attach_protocol
,s.mon$state as hanging_statement_state
,s.mon$sql_text as hanging_statement_blob_id
from rdb$database d
left join mon$attachments a on a.mon$remote_process containing 'isql'
-- do NOT use, field not existed in 2.5.x: and a.mon$system_flag is distinct from 1
and a.mon$attachment_id is distinct from current_connection
left join mon$statements s on
a.mon$attachment_id = s.mon$attachment_id
and s.mon$state = 1 -- 4.0 Classic: 'SELECT RDB$MAP_USING, RDB$MAP_PLUGIN, ... FROM RDB$AUTH_MAPPING', mon$state = 0
;
set count on;
delete from mon$attachments a
where
a.mon$attachment_id <> current_connection
and a.mon$remote_process containing 'isql'
;
commit;
"""
hang_script_1 = temp_file('hang_script.sql')
hang_stdout_1 = temp_file('hang_script.out')
hang_stderr_1 = temp_file('hang_script.err')
@pytest.mark.version('>=3.0.3') @pytest.mark.version('>=3.0.3')
@pytest.mark.xfail def test_1(act_1: Action, hang_script_1: Path, hang_stdout_1: Path, hang_stderr_1: Path,
def test_1(db_1): capsys):
pytest.fail("Test not IMPLEMENTED") hang_script_1.write_text('set list on; set count on; select * from sp_get_data;')
pattern_for_failed_statement = re.compile('Statement failed, SQLSTATE = (08006|08003)')
pattern_for_connection_close = re.compile('(Error (reading|writing) data (from|to) the connection)|(connection shutdown)')
pattern_for_ignored_messages = re.compile('(-send_packet/send)|(-Killed by database administrator.)')
killer_output = []
#
with open(hang_stdout_1, mode='w') as hang_out, open(hang_stderr_1, mode='w') as hang_err:
p_hang_sql = subprocess.Popen([act_1.vars['isql'], '-i', str(hang_script_1),
'-user', act_1.db.user,
'-password', act_1.db.password, act_1.db.dsn],
stdout=hang_out, stderr=hang_err)
try:
time.sleep(4)
for i in range(2):
act_1.reset()
act_1.isql(switches=[], input=kill_script)
killer_output.append(act_1.stdout)
finally:
p_hang_sql.terminate()
# Ensure that database is not busy
with act_1.connect_server() as srv:
srv.database.shutdown(database=act_1.db.db_path, mode=ShutdownMode.FULL,
method=ShutdownMethod.FORCED, timeout=0)
srv.database.bring_online(database=act_1.db.db_path)
#
output = []
for line in hang_stdout_1.read_text().splitlines():
if line.strip():
output.append(f'HANGED ATTACH, STDOUT: {line}')
for line in hang_stderr_1.read_text().splitlines():
if line.strip():
if pattern_for_ignored_messages.search(line):
continue
elif pattern_for_failed_statement.search(line):
msg = '<found pattern about failed statement>'
elif pattern_for_connection_close.search(line):
msg = '<found pattern about closed connection>'
else:
msg = line
output.append(f'HANGED ATTACH, STDERR: {msg}')
for step in killer_output:
for line in step.splitlines():
if line.strip():
output.append(f"KILLER ATTACH, STDOUT: {' '.join(line.split())}")
# Check
act_1.reset()
act_1.expected_stdout = expected_stdout_1
act_1.stdout = '\n'.join(output)
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,25 +2,29 @@
# #
# id: bugs.core_5704 # id: bugs.core_5704
# title: Avoid UPDATE of RDB$DATABASE by ALTER DATABASE statement when possible # title: Avoid UPDATE of RDB$DATABASE by ALTER DATABASE statement when possible
# decription: # decription:
# Instead of doing 'nbackup -L' plus fill database with lot of new data and then 'nbackup -N' with waiting for # Instead of doing 'nbackup -L' plus fill database with lot of new data and then 'nbackup -N' with waiting for
# delta will be integrated into main file, we can get the same result by invoking 'alter database add difference file' # delta will be integrated into main file, we can get the same result by invoking 'alter database add difference file'
# statement in the 1st attachment in RC NO_REC_VERS and WITHOUT COMMITTING it, and then attempt to establish new connect # statement in the 1st attachment in RC NO_REC_VERS and WITHOUT COMMITTING it, and then attempt to establish new connect
# using ES/EDS. Second attachment should be made without any problem, despite that transaction in 1st connect not yet # using ES/EDS. Second attachment should be made without any problem, despite that transaction in 1st connect not yet
# committed or rolled back. # committed or rolled back.
# #
# Confirmed lock of rdb$database record (which leads to inability to establish new connect) on WI-V3.0.3.32837. # Confirmed lock of rdb$database record (which leads to inability to establish new connect) on WI-V3.0.3.32837.
# Works fine on (SS, CS): # Works fine on (SS, CS):
# 3.0.3.32876: OK, 5.266s. # 3.0.3.32876: OK, 5.266s.
# 4.0.0.852: OK, 5.594s. # 4.0.0.852: OK, 5.594s.
# #
# tracker_id: CORE-5704 # tracker_id: CORE-5704
# min_versions: ['3.0.3'] # min_versions: ['3.0.3']
# versions: 3.0.3 # versions: 3.0.3
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action import subprocess
import time
from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
from firebird.driver import ShutdownMode, ShutdownMethod
# version: 3.0.3 # version: 3.0.3
# resources: None # resources: None
@ -33,36 +37,36 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import subprocess # import subprocess
# from subprocess import Popen # from subprocess import Popen
# import time # import time
# from fdb import services # from fdb import services
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# db_file = db_conn.database_name # db_file = db_conn.database_name
# #
# db_conn.close() # db_conn.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def flush_and_close( file_handle ): # def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync # # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f, # # If you're starting with a Python file object f,
# # first do f.flush(), and # # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os # global os
# #
# file_handle.flush() # file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"! # # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno()) # os.fsync(file_handle.fileno())
# file_handle.close() # file_handle.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def cleanup( f_names_list ): # def cleanup( f_names_list ):
# global os # global os
# for i in range(len( f_names_list )): # for i in range(len( f_names_list )):
@ -73,22 +77,22 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# else: # else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None # del_name = None
# #
# if del_name and os.path.isfile( del_name ): # if del_name and os.path.isfile( del_name ):
# os.remove( del_name ) # os.remove( del_name )
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# usr=user_name # usr=user_name
# pwd=user_password # pwd=user_password
# new_diff_file=os.path.join(context['temp_directory'],'tmp_new_diff_5704.tmp') # new_diff_file=os.path.join(context['temp_directory'],'tmp_new_diff_5704.tmp')
# new_main_file=os.path.join(context['temp_directory'],'tmp_new_main_5704.tmp') # new_main_file=os.path.join(context['temp_directory'],'tmp_new_main_5704.tmp')
# #
# eds_query=''' # eds_query='''
# set count on; # set count on;
# set list on; # set list on;
# set autoddl off; # set autoddl off;
# #
# set term ^; # set term ^;
# create or alter procedure sp_connect returns(check_eds_result int) as # create or alter procedure sp_connect returns(check_eds_result int) as
# declare usr varchar(31); # declare usr varchar(31);
@ -105,54 +109,54 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# end # end
# ^ # ^
# set term ^; # set term ^;
# #
# commit; # commit;
# set transaction read committed no record_version lock timeout 1; # set transaction read committed no record_version lock timeout 1;
# #
# alter database add difference file '%(new_diff_file)s'; # alter database add difference file '%(new_diff_file)s';
# select * from sp_connect; # select * from sp_connect;
# #
# rollback; # rollback;
# select * from rdb$files; # select * from rdb$files;
# rollback; # rollback;
# #
# set transaction read committed no record_version lock timeout 1; # set transaction read committed no record_version lock timeout 1;
# #
# alter database add file '%(new_main_file)s'; # alter database add file '%(new_main_file)s';
# select * from sp_connect; # select * from sp_connect;
# --select * from rdb$files; # --select * from rdb$files;
# rollback; # rollback;
# select * from rdb$files; # select * from rdb$files;
# ''' # '''
# #
# f_eds_to_local_host_sql = open( os.path.join(context['temp_directory'],'tmp_local_host_5704.sql'), 'w') # f_eds_to_local_host_sql = open( os.path.join(context['temp_directory'],'tmp_local_host_5704.sql'), 'w')
# f_eds_to_local_host_sql.write( eds_query % locals() ) # f_eds_to_local_host_sql.write( eds_query % locals() )
# flush_and_close( f_eds_to_local_host_sql ) # flush_and_close( f_eds_to_local_host_sql )
# #
# f_eds_to_local_host_log = open( os.path.join(context['temp_directory'],'tmp_local_host_5704.log'), 'w') # f_eds_to_local_host_log = open( os.path.join(context['temp_directory'],'tmp_local_host_5704.log'), 'w')
# f_eds_to_local_host_err = open( os.path.join(context['temp_directory'],'tmp_local_host_5704.err'), 'w') # f_eds_to_local_host_err = open( os.path.join(context['temp_directory'],'tmp_local_host_5704.err'), 'w')
# #
# # WARNING: we launch ISQL here in async mode in order to have ability to kill its process if it will hang! # # WARNING: we launch ISQL here in async mode in order to have ability to kill its process if it will hang!
# ############################################ # ############################################
# p_isql_to_local_host=subprocess.Popen( [context['isql_path'], dsn, "-i", f_eds_to_local_host_sql.name ], # p_isql_to_local_host=subprocess.Popen( [context['isql_path'], dsn, "-i", f_eds_to_local_host_sql.name ],
# stdout = f_eds_to_local_host_log, # stdout = f_eds_to_local_host_log,
# stderr = f_eds_to_local_host_err # stderr = f_eds_to_local_host_err
# ) # )
# #
# time.sleep(3) # time.sleep(3)
# #
# p_isql_to_local_host.terminate() # p_isql_to_local_host.terminate()
# flush_and_close( f_eds_to_local_host_log ) # flush_and_close( f_eds_to_local_host_log )
# flush_and_close( f_eds_to_local_host_err ) # flush_and_close( f_eds_to_local_host_err )
# #
# #
# # Make DB shutdown and bring online because some internal server process still can be active! # # Make DB shutdown and bring online because some internal server process still can be active!
# # If we skip this step than runtime error related to dropping test DB can occur! # # If we skip this step than runtime error related to dropping test DB can occur!
# ######################################### # #########################################
# #
# f_db_reset_log=open( os.path.join(context['temp_directory'],'tmp_reset_5704.log'), 'w') # f_db_reset_log=open( os.path.join(context['temp_directory'],'tmp_reset_5704.log'), 'w')
# f_db_reset_err=open( os.path.join(context['temp_directory'],'tmp_reset_5704.err'), 'w') # f_db_reset_err=open( os.path.join(context['temp_directory'],'tmp_reset_5704.err'), 'w')
# #
# f_db_reset_log.write('Point before DB shutdown.'+os.linesep) # f_db_reset_log.write('Point before DB shutdown.'+os.linesep)
# f_db_reset_log.seek(0,2) # f_db_reset_log.seek(0,2)
# subprocess.call( [context['fbsvcmgr_path'], "localhost:service_mgr", # subprocess.call( [context['fbsvcmgr_path'], "localhost:service_mgr",
@ -163,7 +167,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stderr = f_db_reset_err # stderr = f_db_reset_err
# ) # )
# f_db_reset_log.write(os.linesep+'Point after DB shutdown.'+os.linesep) # f_db_reset_log.write(os.linesep+'Point after DB shutdown.'+os.linesep)
# #
# subprocess.call( [context['fbsvcmgr_path'], "localhost:service_mgr", # subprocess.call( [context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_properties", "prp_db_online", # "action_properties", "prp_db_online",
# "dbname", db_file, # "dbname", db_file,
@ -171,63 +175,122 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stdout = f_db_reset_log, # stdout = f_db_reset_log,
# stderr = f_db_reset_err # stderr = f_db_reset_err
# ) # )
# #
# f_db_reset_log.write(os.linesep+'Point after DB online.'+os.linesep) # f_db_reset_log.write(os.linesep+'Point after DB online.'+os.linesep)
# flush_and_close( f_db_reset_log ) # flush_and_close( f_db_reset_log )
# flush_and_close( f_db_reset_err ) # flush_and_close( f_db_reset_err )
# #
# with open( f_eds_to_local_host_log.name,'r') as f: # with open( f_eds_to_local_host_log.name,'r') as f:
# for line in f: # for line in f:
# if line.split(): # if line.split():
# print('STDOUT in ISQL: ', ' '.join(line.split()) ) # print('STDOUT in ISQL: ', ' '.join(line.split()) )
# #
# with open( f_eds_to_local_host_err.name,'r') as f: # with open( f_eds_to_local_host_err.name,'r') as f:
# for line in f: # for line in f:
# if line.split(): # if line.split():
# print('UNEXPECTED STDERR in '+f_eds_to_local_host_err.name+': '+line) # print('UNEXPECTED STDERR in '+f_eds_to_local_host_err.name+': '+line)
# #
# with open( f_db_reset_log.name,'r') as f: # with open( f_db_reset_log.name,'r') as f:
# for line in f: # for line in f:
# if line.split(): # if line.split():
# print('STDOUT in DB reset: ', ' '.join(line.split()) ) # print('STDOUT in DB reset: ', ' '.join(line.split()) )
# #
# with open( f_db_reset_err.name,'r') as f: # with open( f_db_reset_err.name,'r') as f:
# for line in f: # for line in f:
# if line.split(): # if line.split():
# print('UNEXPECTED STDERR in '+f_db_reset_log.name+': '+line) # print('UNEXPECTED STDERR in '+f_db_reset_log.name+': '+line)
# #
# ############################### # ###############################
# # Cleanup. # # Cleanup.
# time.sleep(1) # time.sleep(1)
# #
# f_list=( # f_list=(
# f_eds_to_local_host_sql # f_eds_to_local_host_sql
# ,f_eds_to_local_host_log # ,f_eds_to_local_host_log
# ,f_eds_to_local_host_err # ,f_eds_to_local_host_err
# ,f_db_reset_log # ,f_db_reset_log
# ,f_db_reset_err # ,f_db_reset_err
# ) # )
# cleanup( f_list ) # cleanup( f_list )
# #
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
STDOUT in ISQL: CHECK_EDS_RESULT 1 CHECK_EDS_RESULT 1
STDOUT in ISQL: Records affected: 1 Records affected: 1
STDOUT in ISQL: Records affected: 0 Records affected: 0
STDOUT in ISQL: CHECK_EDS_RESULT 1 CHECK_EDS_RESULT 1
STDOUT in ISQL: Records affected: 1 Records affected: 1
STDOUT in ISQL: Records affected: 0 Records affected: 0
STDOUT in DB reset: Point before DB shutdown. """
STDOUT in DB reset: Point after DB shutdown.
STDOUT in DB reset: Point after DB online. eds_script = temp_file('eds_script.sql')
""" eds_output = temp_file('eds_script.out')
new_diff_file = temp_file('_new_diff_5704.tmp')
new_main_file = temp_file('new_main_5704.tmp')
@pytest.mark.version('>=3.0.3') @pytest.mark.version('>=3.0.3')
@pytest.mark.xfail def test_1(act_1: Action, eds_script: Path, eds_output: Path, new_diff_file: Path,
def test_1(db_1): new_main_file: Path):
pytest.fail("Test not IMPLEMENTED") eds_script.write_text(f"""
set count on;
set list on;
set autoddl off;
set term ^;
create or alter procedure sp_connect returns(check_eds_result int) as
declare usr varchar(31);
declare pwd varchar(31);
declare v_sttm varchar(255) = 'select 1 from rdb$database';
begin
usr ='{act_1.db.user}';
pwd = '{act_1.db.password}';
execute statement v_sttm
on external 'localhost:' || rdb$get_context('SYSTEM','DB_NAME')
as user usr password pwd
into check_eds_result;
suspend;
end
^
set term ^;
commit;
set transaction read committed no record_version lock timeout 1;
alter database add difference file '{new_diff_file}';
select * from sp_connect;
rollback;
select * from rdb$files;
rollback;
set transaction read committed no record_version lock timeout 1;
alter database add file '{new_main_file}';
select * from sp_connect;
--select * from rdb$files;
rollback;
select * from rdb$files;
""")
#
with open(eds_output, mode='w') as eds_out:
p_eds_sql = subprocess.Popen([act_1.vars['isql'], '-i', str(eds_script),
'-user', act_1.db.user,
'-password', act_1.db.password, act_1.db.dsn],
stdout=eds_out, stderr=subprocess.STDOUT)
try:
time.sleep(4)
finally:
p_eds_sql.terminate()
# Ensure that database is not busy
with act_1.connect_server() as srv:
srv.database.shutdown(database=act_1.db.db_path, mode=ShutdownMode.FULL,
method=ShutdownMethod.FORCED, timeout=0)
srv.database.bring_online(database=act_1.db.db_path)
# Check
act_1.expected_stdout = expected_stdout_1
act_1.stdout = eds_output.read_text()
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,20 +2,20 @@
# #
# id: bugs.core_5706 # id: bugs.core_5706
# title: Trace config with misplaced "{" lead firebird to crash # title: Trace config with misplaced "{" lead firebird to crash
# decription: # decription:
# We create trace config with following INVALID content: # We create trace config with following INVALID content:
# database = (%[\\/](security[[:digit:]]).fdb|(security.db)) # database = (%[\\/](security[[:digit:]]).fdb|(security.db))
# enabled = false # enabled = false
# { # {
# } # }
# #
# database = # database =
# { # {
# enabled = true # enabled = true
# log_connections = true # log_connections = true
# } # }
# #
# Then we run new process with ISQL with connect to test DB. # Then we run new process with ISQL with connect to test DB.
# This immediately should cause raise error in the 1st (trace) process: # This immediately should cause raise error in the 1st (trace) process:
# 1 Trace session ID 1 started # 1 Trace session ID 1 started
# 2 Error creating trace session for database "C:\\MIX\\FIREBIRD\\FB30\\SECURITY3.FDB": # 2 Error creating trace session for database "C:\\MIX\\FIREBIRD\\FB30\\SECURITY3.FDB":
@ -33,15 +33,16 @@
# 3.0.5.33160: OK, 6.882s. # 3.0.5.33160: OK, 6.882s.
# 3.0.5.33152: OK, 7.767s. # 3.0.5.33152: OK, 7.767s.
# 3.0.4.33054: OK, 8.622s. # 3.0.4.33054: OK, 8.622s.
# #
# #
# tracker_id: CORE-5706 # tracker_id: CORE-5706
# min_versions: ['3.0.3'] # min_versions: ['3.0.3']
# versions: 3.0.3 # versions: 3.0.3
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from difflib import unified_diff
from firebird.qa import db_factory, python_act, Action
# version: 3.0.3 # version: 3.0.3
# resources: None # resources: None
@ -54,34 +55,34 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import subprocess # import subprocess
# from subprocess import Popen # from subprocess import Popen
# import difflib # import difflib
# import time # import time
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# db_conn.close() # db_conn.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def flush_and_close( file_handle ): # def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync # # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f, # # If you're starting with a Python file object f,
# # first do f.flush(), and # # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os # global os
# #
# file_handle.flush() # file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"! # # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno()) # os.fsync(file_handle.fileno())
# file_handle.close() # file_handle.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def cleanup( f_names_list ): # def cleanup( f_names_list ):
# global os # global os
# for i in range(len( f_names_list )): # for i in range(len( f_names_list )):
@ -92,14 +93,14 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# else: # else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None # del_name = None
# #
# if del_name and os.path.isfile( del_name ): # if del_name and os.path.isfile( del_name ):
# os.remove( del_name ) # os.remove( del_name )
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def svc_get_fb_log( f_fb_log ): # def svc_get_fb_log( f_fb_log ):
# #
# global subprocess # global subprocess
# subprocess.call([ context['fbsvcmgr_path'], # subprocess.call([ context['fbsvcmgr_path'],
# "localhost:service_mgr", # "localhost:service_mgr",
@ -108,9 +109,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stdout=f_fb_log, stderr=subprocess.STDOUT # stdout=f_fb_log, stderr=subprocess.STDOUT
# ) # )
# return # return
# #
# #
# #
# txt30 = r'''# Trace config, format for 3.0. Generated auto, do not edit! # txt30 = r'''# Trace config, format for 3.0. Generated auto, do not edit!
# # ::: NOTE ::: # # ::: NOTE :::
# # First 'database' section here INTENTIONALLY was written WRONG! # # First 'database' section here INTENTIONALLY was written WRONG!
@ -118,55 +119,55 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# enabled = false # enabled = false
# { # {
# } # }
# #
# database = # database =
# { # {
# enabled = true # enabled = true
# log_connections = true # log_connections = true
# } # }
# ''' # '''
# #
# fn_trccfg=open( os.path.join(context['temp_directory'],'tmp_trace_5706_3x.cfg'), 'w') # fn_trccfg=open( os.path.join(context['temp_directory'],'tmp_trace_5706_3x.cfg'), 'w')
# fn_trccfg.write(txt30) # fn_trccfg.write(txt30)
# flush_and_close( fn_trccfg ) # flush_and_close( fn_trccfg )
# #
# f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_5706_fblog_before.txt'), 'w') # f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_5706_fblog_before.txt'), 'w')
# svc_get_fb_log( f_fblog_before ) # svc_get_fb_log( f_fblog_before )
# flush_and_close( f_fblog_before ) # flush_and_close( f_fblog_before )
# #
# #
# # ############################################################## # # ##############################################################
# # S T A R T T R A C E i n S E P A R A T E P R O C E S S # # S T A R T T R A C E i n S E P A R A T E P R O C E S S
# # ############################################################## # # ##############################################################
# #
# fn_trclog=open( os.path.join(context['temp_directory'],'tmp_trace_5706_3x.log'), 'w') # fn_trclog=open( os.path.join(context['temp_directory'],'tmp_trace_5706_3x.log'), 'w')
# p_trace = Popen([context['fbsvcmgr_path'] , "localhost:service_mgr" , "action_trace_start" , "trc_cfg" , fn_trccfg.name], stdout=fn_trclog, stderr=subprocess.STDOUT) # p_trace = Popen([context['fbsvcmgr_path'] , "localhost:service_mgr" , "action_trace_start" , "trc_cfg" , fn_trccfg.name], stdout=fn_trclog, stderr=subprocess.STDOUT)
# #
# # We run here ISQL only in order to "wake up" trace session and force it to raise error in its log. # # We run here ISQL only in order to "wake up" trace session and force it to raise error in its log.
# # NO message like 'Statement failed, SQLSTATE = 08004/connection rejected by remote interface' should appear now! # # NO message like 'Statement failed, SQLSTATE = 08004/connection rejected by remote interface' should appear now!
# runProgram('isql', [ dsn, '-q', '-n' ], 'quit;') # runProgram('isql', [ dsn, '-q', '-n' ], 'quit;')
# #
# f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_5706_fblog_after.txt'), 'w') # f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_5706_fblog_after.txt'), 'w')
# svc_get_fb_log( f_fblog_after ) # svc_get_fb_log( f_fblog_after )
# flush_and_close( f_fblog_after ) # flush_and_close( f_fblog_after )
# #
# #
# # _!_!_!_!_!_!_!_!_!_! do NOT reduce this delay: firebird.log get new messages NOT instantly !_!_!_!_!_!_!_!_ # # _!_!_!_!_!_!_!_!_!_! do NOT reduce this delay: firebird.log get new messages NOT instantly !_!_!_!_!_!_!_!_
# # Currently firebird.log can stay with OLD content if heavy concurrent workload exists on the same host! # # Currently firebird.log can stay with OLD content if heavy concurrent workload exists on the same host!
# time.sleep(1) # time.sleep(1)
# #
# # #################################################### # # ####################################################
# # G E T A C T I V E T R A C E S E S S I O N I D # # G E T A C T I V E T R A C E S E S S I O N I D
# # #################################################### # # ####################################################
# # Save active trace session info into file for further parsing it and obtain session_id back (for stop): # # Save active trace session info into file for further parsing it and obtain session_id back (for stop):
# #
# fn_trclst=open( os.path.join(context['temp_directory'],'tmp_trace_5706_3x.lst'), 'w') # fn_trclst=open( os.path.join(context['temp_directory'],'tmp_trace_5706_3x.lst'), 'w')
# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr", "action_trace_list"], stdout=fn_trclst, stderr=subprocess.STDOUT) # subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr", "action_trace_list"], stdout=fn_trclst, stderr=subprocess.STDOUT)
# flush_and_close( fn_trclst ) # flush_and_close( fn_trclst )
# #
# # Do not remove this line. # # Do not remove this line.
# time.sleep(1) # time.sleep(1)
# #
# trcssn=0 # trcssn=0
# with open( fn_trclst.name,'r') as f: # with open( fn_trclst.name,'r') as f:
# for line in f: # for line in f:
@ -177,9 +178,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# trcssn=word # trcssn=word
# i=i+1 # i=i+1
# break # break
# #
# # Result: `trcssn` is ID of active trace session. Now we have to terminate it: # # Result: `trcssn` is ID of active trace session. Now we have to terminate it:
# #
# # #################################################### # # ####################################################
# # S E N D R E Q U E S T T R A C E T O S T O P # # S E N D R E Q U E S T T R A C E T O S T O P
# # #################################################### # # ####################################################
@ -187,39 +188,39 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# fn_nul = open(os.devnull, 'w') # fn_nul = open(os.devnull, 'w')
# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr", "action_trace_stop","trc_id", trcssn], stdout=fn_nul) # subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr", "action_trace_stop","trc_id", trcssn], stdout=fn_nul)
# fn_nul.close() # fn_nul.close()
# #
# p_trace.terminate() # p_trace.terminate()
# fn_trclog.close() # fn_trclog.close()
# #
# # Do not remove this line. # # Do not remove this line.
# #time.sleep(2) # #time.sleep(2)
# #
# # Compare firebird.log versions BEFORE and AFTER this test: # # Compare firebird.log versions BEFORE and AFTER this test:
# ###################### # ######################
# #
# oldfb=open(f_fblog_before.name, 'r') # oldfb=open(f_fblog_before.name, 'r')
# newfb=open(f_fblog_after.name, 'r') # newfb=open(f_fblog_after.name, 'r')
# #
# difftext = ''.join(difflib.unified_diff( # difftext = ''.join(difflib.unified_diff(
# oldfb.readlines(), # oldfb.readlines(),
# newfb.readlines() # newfb.readlines()
# )) # ))
# oldfb.close() # oldfb.close()
# newfb.close() # newfb.close()
# #
# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_5706_diff.txt'), 'w') # f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_5706_diff.txt'), 'w')
# f_diff_txt.write(difftext) # f_diff_txt.write(difftext)
# flush_and_close( f_diff_txt ) # flush_and_close( f_diff_txt )
# #
# # Check logs: # # Check logs:
# ############# # #############
# with open( f_diff_txt.name,'r') as f: # with open( f_diff_txt.name,'r') as f:
# for line in f: # for line in f:
# if line.startswith('+'): # if line.startswith('+'):
# print( 'UNEXPECTED DIFF IN FIREBIRD.LOG: ' + (' '.join(line.split()).upper()) ) # print( 'UNEXPECTED DIFF IN FIREBIRD.LOG: ' + (' '.join(line.split()).upper()) )
# #
# #
# #
# # NB! Lines starting from 2nd in the following error block: # # NB! Lines starting from 2nd in the following error block:
# # Trace session ID 1 started # # Trace session ID 1 started
# # Error creating trace session for database "C:\\MIX\\FIREBIRD\\FB30\\SECURITY3.FDB": # # Error creating trace session for database "C:\\MIX\\FIREBIRD\\FB30\\SECURITY3.FDB":
@ -227,31 +228,31 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # line 2: error while compiling regular expression "(%[\\/](security3).fdb|(security.db))" # # line 2: error while compiling regular expression "(%[\\/](security3).fdb|(security.db))"
# # - are duplicated in FB 3.0.3 Classic. # # - are duplicated in FB 3.0.3 Classic.
# # For this reason we collect all UNIQUE messages in the set() and output then only such distinct list. # # For this reason we collect all UNIQUE messages in the set() and output then only such distinct list.
# #
# #
# ''' # '''
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# DISABLED 29.07.2019: # DISABLED 29.07.2019:
# 1. TRACE LOG FOR CLASSIC STRONGLY DIFFERS FROM SS. # 1. TRACE LOG FOR CLASSIC STRONGLY DIFFERS FROM SS.
# 2. IT'S NO MATTER WHAT TRACE LOG CONTAINS, MAIN GOAL: # 2. IT'S NO MATTER WHAT TRACE LOG CONTAINS, MAIN GOAL:
# FIREBIRD.LOG MUST *NOT* DIFFER FROM ITSELF THAT IT WAS BEFORE THIS TEST RUN. # FIREBIRD.LOG MUST *NOT* DIFFER FROM ITSELF THAT IT WAS BEFORE THIS TEST RUN.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# #
# trc_unique_msg=set() # trc_unique_msg=set()
# with open( fn_trclog.name,'r') as f: # with open( fn_trclog.name,'r') as f:
# for line in f: # for line in f:
# if 'error' in line.lower(): # if 'error' in line.lower():
# trc_unique_msg.add( ' '.join(line.split()).upper() ) # trc_unique_msg.add( ' '.join(line.split()).upper() )
# #
# for p in sorted(trc_unique_msg): # for p in sorted(trc_unique_msg):
# print(p) # print(p)
# ''' # '''
# #
# #
# # CLEAN UP # # CLEAN UP
# ########## # ##########
# time.sleep(1) # time.sleep(1)
# f_list=( # f_list=(
# fn_trclog # fn_trclog
# ,fn_trclst # ,fn_trclst
# ,fn_trccfg # ,fn_trccfg
@ -260,17 +261,35 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# ,f_diff_txt # ,f_diff_txt
# ) # )
# cleanup( f_list ) # cleanup( f_list )
# #
# #, 'substitutions':[('FOR DATABASE.*','FOR DATABASE'), ('.*REGULAR EXPRESSION.*','REGULAR EXPRESSION ERROR'), ('TRACE SESSION ID [0-9]+ STARTED', 'TRACE SESSION ID STARTED') ] # #, 'substitutions':[('FOR DATABASE.*','FOR DATABASE'), ('.*REGULAR EXPRESSION.*','REGULAR EXPRESSION ERROR'), ('TRACE SESSION ID [0-9]+ STARTED', 'TRACE SESSION ID STARTED') ]
# #
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
trace_conf = """
# ::: NOTE :::
# First 'database' section here INTENTIONALLY was written WRONG!
database = (%[\\\\/](security[[:digit:]]).fdb|(security.db))
enabled = false
{
}
database =
{
enabled = true
log_connections = true
}
"""
@pytest.mark.version('>=3.0.3') @pytest.mark.version('>=3.0.3')
@pytest.mark.xfail def test_1(act_1: Action):
def test_1(db_1): log_before = act_1.get_firebird_log()
pytest.fail("Test not IMPLEMENTED") with act_1.trace(config=trace_conf, keep_log=False):
# We run here ISQL only in order to "wake up" trace session and force it to raise error in its log.
# NO message like 'Statement failed, SQLSTATE = 08004/connection rejected by remote interface' should appear now!
act_1.isql(switches=['-n', '-q'], input='quit;')
log_after = act_1.get_firebird_log()
assert list(unified_diff(log_before, log_after)) == []

View File

@ -2,7 +2,7 @@
# #
# id: bugs.core_5719 # id: bugs.core_5719
# title: FB >= 3 crashes when restoring backup made by FB 2.5. # title: FB >= 3 crashes when restoring backup made by FB 2.5.
# decription: # decription:
# This test also present in GTCS list, see it here: # This test also present in GTCS list, see it here:
# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/SV_HIDDEN_VAR_2_5.script # https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/SV_HIDDEN_VAR_2_5.script
# Confirmed crash on: # Confirmed crash on:
@ -11,14 +11,18 @@
# Works fine on: # Works fine on:
# FB30SS, build 3.0.3.32897: OK, 3.891s. # FB30SS, build 3.0.3.32897: OK, 3.891s.
# FB40SS, build 4.0.0.872: OK, 4.421s. # FB40SS, build 4.0.0.872: OK, 4.421s.
# #
# tracker_id: CORE-5719 # tracker_id: CORE-5719
# min_versions: ['3.0'] # min_versions: ['3.0']
# versions: 3.0 # versions: 3.0
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action import zipfile
from difflib import unified_diff
from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
from firebird.driver import SrvRestoreFlag
# version: 3.0 # version: 3.0
# resources: None # resources: None
@ -31,35 +35,35 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import time # import time
# import zipfile # import zipfile
# import difflib # import difflib
# import subprocess # import subprocess
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# #
# db_conn.close() # db_conn.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def flush_and_close( file_handle ): # def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync # # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f, # # If you're starting with a Python file object f,
# # first do f.flush(), and # # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os # global os
# #
# file_handle.flush() # file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"! # # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno()) # os.fsync(file_handle.fileno())
# file_handle.close() # file_handle.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def cleanup( f_names_list ): # def cleanup( f_names_list ):
# global os # global os
# for i in range(len( f_names_list )): # for i in range(len( f_names_list )):
@ -70,16 +74,16 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# else: # else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None # del_name = None
# #
# if del_name and os.path.isfile( del_name ): # if del_name and os.path.isfile( del_name ):
# os.remove( del_name ) # os.remove( del_name )
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def svc_get_fb_log( f_fb_log ): # def svc_get_fb_log( f_fb_log ):
# #
# global subprocess # global subprocess
# #
# subprocess.call( [ context['fbsvcmgr_path'], # subprocess.call( [ context['fbsvcmgr_path'],
# "localhost:service_mgr", # "localhost:service_mgr",
# "action_get_fb_log" # "action_get_fb_log"
@ -87,21 +91,21 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stdout=f_fb_log, stderr=subprocess.STDOUT # stdout=f_fb_log, stderr=subprocess.STDOUT
# ) # )
# return # return
# #
# #
# zf = zipfile.ZipFile( os.path.join(context['files_location'],'core5719-ods-11_2.zip') ) # zf = zipfile.ZipFile( os.path.join(context['files_location'],'core5719-ods-11_2.zip') )
# tmpfbk = 'core5719-ods-11_2.fbk' # tmpfbk = 'core5719-ods-11_2.fbk'
# zf.extract( tmpfbk, '$(DATABASE_LOCATION)') # zf.extract( tmpfbk, '$(DATABASE_LOCATION)')
# zf.close() # zf.close()
# #
# tmpfbk='$(DATABASE_LOCATION)'+tmpfbk # tmpfbk='$(DATABASE_LOCATION)'+tmpfbk
# tmpfdb='$(DATABASE_LOCATION)'+'tmp_5719_check_restored.fdb' # tmpfdb='$(DATABASE_LOCATION)'+'tmp_5719_check_restored.fdb'
# #
# f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_5719_fblog_before.txt'), 'w') # f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_5719_fblog_before.txt'), 'w')
# svc_get_fb_log( f_fblog_before ) # svc_get_fb_log( f_fblog_before )
# flush_and_close( f_fblog_before ) # flush_and_close( f_fblog_before )
# #
# #
# f_restore_log=open( os.path.join(context['temp_directory'],'tmp_5719_check_restored.log'), 'w') # f_restore_log=open( os.path.join(context['temp_directory'],'tmp_5719_check_restored.log'), 'w')
# subprocess.check_call([context['fbsvcmgr_path'],"localhost:service_mgr", # subprocess.check_call([context['fbsvcmgr_path'],"localhost:service_mgr",
# "action_restore", # "action_restore",
@ -110,66 +114,66 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# "res_replace", # "res_replace",
# "verbose" # "verbose"
# ], # ],
# stdout=f_restore_log, # stdout=f_restore_log,
# stderr=subprocess.STDOUT) # stderr=subprocess.STDOUT)
# flush_and_close( f_restore_log ) # flush_and_close( f_restore_log )
# #
# f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_5719_fblog_after.txt'), 'w') # f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_5719_fblog_after.txt'), 'w')
# svc_get_fb_log( f_fblog_after ) # svc_get_fb_log( f_fblog_after )
# flush_and_close( f_fblog_after ) # flush_and_close( f_fblog_after )
# #
# #
# f_validation_log=open( os.path.join(context['temp_directory'],'tmp_5719_validation.log'), 'w') # f_validation_log=open( os.path.join(context['temp_directory'],'tmp_5719_validation.log'), 'w')
# subprocess.check_call([context['fbsvcmgr_path'],"localhost:service_mgr", # subprocess.check_call([context['fbsvcmgr_path'],"localhost:service_mgr",
# "action_validate", # "action_validate",
# "dbname", tmpfdb, # "dbname", tmpfdb,
# ], # ],
# stdout=f_validation_log, # stdout=f_validation_log,
# stderr=subprocess.STDOUT) # stderr=subprocess.STDOUT)
# flush_and_close( f_validation_log ) # flush_and_close( f_validation_log )
# #
# # Compare firebird.log versions BEFORE and AFTER this test: # # Compare firebird.log versions BEFORE and AFTER this test:
# ###################### # ######################
# #
# oldfb=open(f_fblog_before.name, 'r') # oldfb=open(f_fblog_before.name, 'r')
# newfb=open(f_fblog_after.name, 'r') # newfb=open(f_fblog_after.name, 'r')
# #
# difftext = ''.join(difflib.unified_diff( # difftext = ''.join(difflib.unified_diff(
# oldfb.readlines(), # oldfb.readlines(),
# newfb.readlines() # newfb.readlines()
# )) # ))
# oldfb.close() # oldfb.close()
# newfb.close() # newfb.close()
# #
# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_5719_diff.txt'), 'w') # f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_5719_diff.txt'), 'w')
# f_diff_txt.write(difftext) # f_diff_txt.write(difftext)
# flush_and_close( f_diff_txt ) # flush_and_close( f_diff_txt )
# #
# # Check logs: # # Check logs:
# ############# # #############
# with open( f_restore_log.name,'r') as f: # with open( f_restore_log.name,'r') as f:
# for line in f: # for line in f:
# if 'Error'.upper() in line.upper(): # if 'Error'.upper() in line.upper():
# print( 'UNEXPECTED ERROR IN RESTORE LOG: ' + (' '.join(line.split()).upper()) ) # print( 'UNEXPECTED ERROR IN RESTORE LOG: ' + (' '.join(line.split()).upper()) )
# #
# with open( f_validation_log.name,'r') as f: # with open( f_validation_log.name,'r') as f:
# for line in f: # for line in f:
# if 'Error'.upper() in line.upper(): # if 'Error'.upper() in line.upper():
# print( 'UNEXPECTED ERROR IN VALIDATION LOG: ' + (' '.join(line.split()).upper()) ) # print( 'UNEXPECTED ERROR IN VALIDATION LOG: ' + (' '.join(line.split()).upper()) )
# #
# #
# with open( f_diff_txt.name,'r') as f: # with open( f_diff_txt.name,'r') as f:
# for line in f: # for line in f:
# if line.startswith('+'): # if line.startswith('+'):
# print( 'UNEXPECTED DIFF IN FIREBIRD.LOG: ' + (' '.join(line.split()).upper()) ) # print( 'UNEXPECTED DIFF IN FIREBIRD.LOG: ' + (' '.join(line.split()).upper()) )
# #
# #
# ########## # ##########
# # Cleanup: # # Cleanup:
# ########## # ##########
# time.sleep(1) # time.sleep(1)
# #
# f_list=( # f_list=(
# f_restore_log # f_restore_log
# ,f_validation_log # ,f_validation_log
# ,f_fblog_before # ,f_fblog_before
@ -179,14 +183,29 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# ,tmpfdb # ,tmpfdb
# ) # )
# cleanup( f_list ) # cleanup( f_list )
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
fbk_file_1 = temp_file('core5719-ods-11_2.fbk')
fdb_file_1 = temp_file('check_restored_5719.fdb')
@pytest.mark.version('>=3.0') @pytest.mark.version('>=3.0')
@pytest.mark.xfail def test_1(act_1: Action, fbk_file_1: Path, fdb_file_1: Path):
def test_1(db_1): zipped_fbk_file = zipfile.Path(act_1.vars['files'] / 'core_5719-ods-11_2.zip',
pytest.fail("Test not IMPLEMENTED") at='core5719-ods-11_2.fbk')
fbk_file_1.write_bytes(zipped_fbk_file.read_bytes())
log_before = act_1.get_firebird_log()
#
with act_1.connect_server() as srv:
srv.database.restore(backup=fbk_file_1, database=fdb_file_1,
flags=SrvRestoreFlag.REPLACE, verbose=True)
restore_err = [line for line in srv if 'ERROR' in line.upper()]
log_after = act_1.get_firebird_log()
srv.database.validate(database=fdb_file_1)
validate_err = [line for line in srv if 'ERROR' in line.upper()]
#
assert restore_err == []
assert validate_err == []
assert list(unified_diff(log_before, log_after)) == []

View File

@ -2,7 +2,7 @@
# #
# id: bugs.core_5737 # id: bugs.core_5737
# title: Invalid parameters of gds transaction in ISQL # title: Invalid parameters of gds transaction in ISQL
# decription: # decription:
# ISQL hangs when trying to show various system objects in a case when other attachment has uncommitted changes to that objects # ISQL hangs when trying to show various system objects in a case when other attachment has uncommitted changes to that objects
# We create (in Python connection) one table TEST1 with PK and commit transaction. # We create (in Python connection) one table TEST1 with PK and commit transaction.
# Then we create second (similar) table TEST2 but do not commit transaction. # Then we create second (similar) table TEST2 but do not commit transaction.
@ -11,19 +11,22 @@
# 1) should NOT hang (it did this because of launching Tx in read committed NO record_version); # 1) should NOT hang (it did this because of launching Tx in read committed NO record_version);
# 2) should output only info about table TEST1 and ints PK index. # 2) should output only info about table TEST1 and ints PK index.
# 3) should not output any info about non-committed DDL of table TEST2. # 3) should not output any info about non-committed DDL of table TEST2.
# #
# Confirmed bug on 3.0.3.32837 and 4.0.0.800 (ISQL did hang when issued any of 'SHOW TABLE' / 'SHOW INDEX' copmmand). # Confirmed bug on 3.0.3.32837 and 4.0.0.800 (ISQL did hang when issued any of 'SHOW TABLE' / 'SHOW INDEX' copmmand).
# Checked on: # Checked on:
# 3.0.3.32901: OK, 3.938s. # 3.0.3.32901: OK, 3.938s.
# 4.0.0.875: OK, 3.969s. # 4.0.0.875: OK, 3.969s.
# #
# tracker_id: CORE-5737 # tracker_id: CORE-5737
# min_versions: ['3.0.3'] # min_versions: ['3.0.3']
# versions: 3.0.3 # versions: 3.0.3
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action import subprocess
import time
from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
# version: 3.0.3 # version: 3.0.3
# resources: None # resources: None
@ -36,34 +39,34 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import subprocess # import subprocess
# from subprocess import Popen # from subprocess import Popen
# import time # import time
# from fdb import services # from fdb import services
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# #
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def flush_and_close( file_handle ): # def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync # # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f, # # If you're starting with a Python file object f,
# # first do f.flush(), and # # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os # global os
# #
# file_handle.flush() # file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"! # # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno()) # os.fsync(file_handle.fileno())
# file_handle.close() # file_handle.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def cleanup( f_names_list ): # def cleanup( f_names_list ):
# global os # global os
# for i in range(len( f_names_list )): # for i in range(len( f_names_list )):
@ -74,72 +77,93 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# else: # else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None # del_name = None
# #
# if del_name and os.path.isfile( del_name ): # if del_name and os.path.isfile( del_name ):
# os.remove( del_name ) # os.remove( del_name )
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# cur = db_conn.cursor() # cur = db_conn.cursor()
# db_conn.execute_immediate('recreate table test1(id int primary key using descending index test1_id_pk_desc)') # db_conn.execute_immediate('recreate table test1(id int primary key using descending index test1_id_pk_desc)')
# db_conn.commit() # db_conn.commit()
# cur.execute('recreate table test2(id int primary key using descending index test2_id_pk_desc)') # cur.execute('recreate table test2(id int primary key using descending index test2_id_pk_desc)')
# #
# show_query=''' # show_query='''
# show table; # show table;
# show index; # show index;
# ''' # '''
# #
# f_show_command_sql = open( os.path.join(context['temp_directory'],'tmp_local_host_5737.sql'), 'w') # f_show_command_sql = open( os.path.join(context['temp_directory'],'tmp_local_host_5737.sql'), 'w')
# f_show_command_sql.write( show_query ) # f_show_command_sql.write( show_query )
# flush_and_close( f_show_command_sql ) # flush_and_close( f_show_command_sql )
# #
# f_show_command_log = open( os.path.join(context['temp_directory'],'tmp_local_host_5737.log'), 'w') # f_show_command_log = open( os.path.join(context['temp_directory'],'tmp_local_host_5737.log'), 'w')
# f_show_command_err = open( os.path.join(context['temp_directory'],'tmp_local_host_5737.err'), 'w') # f_show_command_err = open( os.path.join(context['temp_directory'],'tmp_local_host_5737.err'), 'w')
# #
# # WARNING: we launch ISQL here in async mode in order to have ability to kill its process if it will hang! # # WARNING: we launch ISQL here in async mode in order to have ability to kill its process if it will hang!
# ############################################ # ############################################
# p_isql_to_local_host = subprocess.Popen( [ context['isql_path'], dsn, "-i", f_show_command_sql.name ], # p_isql_to_local_host = subprocess.Popen( [ context['isql_path'], dsn, "-i", f_show_command_sql.name ],
# stdout = f_show_command_log, # stdout = f_show_command_log,
# stderr = f_show_command_err # stderr = f_show_command_err
# ) # )
# #
# time.sleep(2) # time.sleep(2)
# #
# p_isql_to_local_host.terminate() # p_isql_to_local_host.terminate()
# flush_and_close( f_show_command_log ) # flush_and_close( f_show_command_log )
# flush_and_close( f_show_command_err ) # flush_and_close( f_show_command_err )
# #
# with open( f_show_command_log.name,'r') as f: # with open( f_show_command_log.name,'r') as f:
# for line in f: # for line in f:
# if line.split(): # if line.split():
# print('STDOUT: ', ' '.join(line.split()) ) # print('STDOUT: ', ' '.join(line.split()) )
# #
# #
# with open( f_show_command_err.name,'r') as f: # with open( f_show_command_err.name,'r') as f:
# for line in f: # for line in f:
# if line.split(): # if line.split():
# print('STDERR: ', ' '.join(line.split()) ) # print('STDERR: ', ' '.join(line.split()) )
# #
# cur.close() # cur.close()
# #
# #
# # Cleanup. # # Cleanup.
# ########## # ##########
# time.sleep(1) # time.sleep(1)
# cleanup( (f_show_command_sql, f_show_command_log, f_show_command_err) ) # cleanup( (f_show_command_sql, f_show_command_log, f_show_command_err) )
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
STDOUT: TEST1 TEST1
STDOUT: TEST1_ID_PK_DESC UNIQUE DESCENDING INDEX ON TEST1(ID) TEST1_ID_PK_DESC UNIQUE DESCENDING INDEX ON TEST1(ID)
""" """
show_script_1 = temp_file('show_script.sql')
show_output_1 = temp_file('show_script.out')
@pytest.mark.version('>=3.0.3') @pytest.mark.version('>=3.0.3')
@pytest.mark.xfail def test_1(act_1: Action, show_script_1: Path, show_output_1: Path):
def test_1(db_1): show_script_1.write_text('show table;show index;')
pytest.fail("Test not IMPLEMENTED") with act_1.db.connect() as con:
con.execute_immediate('recreate table test1(id int primary key using descending index test1_id_pk_desc)')
con.commit()
c = con.cursor()
c.execute('recreate table test2(id int primary key using descending index test2_id_pk_desc)')
# WARNING: we launch ISQL here in async mode in order to have ability to kill its
# process if it will hang!
with open(show_output_1, mode='w') as show_out:
p_show_sql = subprocess.Popen([act_1.vars['isql'], '-i', str(show_script_1),
'-user', act_1.db.user,
'-password', act_1.db.password, act_1.db.dsn],
stdout=show_out, stderr=subprocess.STDOUT)
try:
time.sleep(4)
finally:
p_show_sql.terminate()
#
act_1.expected_stdout = expected_stdout_1
act_1.stdout = show_output_1.read_text()
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,21 +2,25 @@
# #
# id: bugs.core_5771 # id: bugs.core_5771
# title: Restore (without replace) when database already exists crashes gbak or Firebird (when run through service manager) # title: Restore (without replace) when database already exists crashes gbak or Firebird (when run through service manager)
# decription: # decription:
# Confirmed bug on 4.0.0.918 (as described in the ticket; 3.x is not affected). # Confirmed bug on 4.0.0.918 (as described in the ticket; 3.x is not affected).
# #
# tracker_id: CORE-5771 # tracker_id: CORE-5771
# min_versions: ['4.0'] # min_versions: ['4.0']
# versions: 4.0 # versions: 4.0
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from difflib import unified_diff
from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
# version: 4.0 # version: 4.0
# resources: None # resources: None
substitutions_1 = [] substitutions_1 = [('database .*tmp_core_5771.fdb already exists.', 'database tmp_core_5771.fdb already exists.'),
('opened file .*tmp_core_5771.fbk',
'opened file tmp_core_5771.fbk')]
init_script_1 = """""" init_script_1 = """"""
@ -24,34 +28,34 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import time # import time
# import difflib # import difflib
# import subprocess # import subprocess
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# #
# db_conn.close() # db_conn.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def flush_and_close( file_handle ): # def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync # # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f, # # If you're starting with a Python file object f,
# # first do f.flush(), and # # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os # global os
# #
# file_handle.flush() # file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"! # # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno()) # os.fsync(file_handle.fileno())
# file_handle.close() # file_handle.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def cleanup( f_names_list ): # def cleanup( f_names_list ):
# global os # global os
# for i in range(len( f_names_list )): # for i in range(len( f_names_list )):
@ -62,16 +66,16 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# else: # else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None # del_name = None
# #
# if del_name and os.path.isfile( del_name ): # if del_name and os.path.isfile( del_name ):
# os.remove( del_name ) # os.remove( del_name )
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def svc_get_fb_log( f_fb_log ): # def svc_get_fb_log( f_fb_log ):
# #
# global subprocess # global subprocess
# #
# subprocess.call( [ context['fbsvcmgr_path'], # subprocess.call( [ context['fbsvcmgr_path'],
# "localhost:service_mgr", # "localhost:service_mgr",
# "action_get_fb_log" # "action_get_fb_log"
@ -79,19 +83,19 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stdout=f_fb_log, stderr=subprocess.STDOUT # stdout=f_fb_log, stderr=subprocess.STDOUT
# ) # )
# return # return
# #
# #
# tmpfbk = 'tmp_core_5771.fbk' # tmpfbk = 'tmp_core_5771.fbk'
# tmpfbk='$(DATABASE_LOCATION)'+tmpfbk # tmpfbk='$(DATABASE_LOCATION)'+tmpfbk
# tmpfdb='$(DATABASE_LOCATION)'+'tmp_5771_restored.fdb' # tmpfdb='$(DATABASE_LOCATION)'+'tmp_5771_restored.fdb'
# #
# runProgram('gbak',['-b', dsn, tmpfbk]) # runProgram('gbak',['-b', dsn, tmpfbk])
# runProgram('gbak',['-rep', tmpfbk, 'localhost:'+tmpfdb]) # runProgram('gbak',['-rep', tmpfbk, 'localhost:'+tmpfdb])
# #
# f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_5771_fblog_before.txt'), 'w') # f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_5771_fblog_before.txt'), 'w')
# svc_get_fb_log( f_fblog_before ) # svc_get_fb_log( f_fblog_before )
# flush_and_close( f_fblog_before ) # flush_and_close( f_fblog_before )
# #
# f_restore_log=open( os.path.join(context['temp_directory'],'tmp_5771_check_restored.log'), 'w') # f_restore_log=open( os.path.join(context['temp_directory'],'tmp_5771_check_restored.log'), 'w')
# f_restore_err=open( os.path.join(context['temp_directory'],'tmp_5771_check_restored.err'), 'w') # f_restore_err=open( os.path.join(context['temp_directory'],'tmp_5771_check_restored.err'), 'w')
# subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr", # subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr",
@ -100,68 +104,86 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# "dbname", tmpfdb, # "dbname", tmpfdb,
# "verbose" # "verbose"
# ], # ],
# stdout=f_restore_log, # stdout=f_restore_log,
# stderr=f_restore_err) # stderr=f_restore_err)
# flush_and_close( f_restore_log ) # flush_and_close( f_restore_log )
# flush_and_close( f_restore_err ) # flush_and_close( f_restore_err )
# #
# f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_5771_fblog_after.txt'), 'w') # f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_5771_fblog_after.txt'), 'w')
# svc_get_fb_log( f_fblog_after ) # svc_get_fb_log( f_fblog_after )
# flush_and_close( f_fblog_after ) # flush_and_close( f_fblog_after )
# #
# #
# # Compare firebird.log versions BEFORE and AFTER this test: # # Compare firebird.log versions BEFORE and AFTER this test:
# ###################### # ######################
# #
# oldfb=open(f_fblog_before.name, 'r') # oldfb=open(f_fblog_before.name, 'r')
# newfb=open(f_fblog_after.name, 'r') # newfb=open(f_fblog_after.name, 'r')
# #
# difftext = ''.join(difflib.unified_diff( # difftext = ''.join(difflib.unified_diff(
# oldfb.readlines(), # oldfb.readlines(),
# newfb.readlines() # newfb.readlines()
# )) # ))
# oldfb.close() # oldfb.close()
# newfb.close() # newfb.close()
# #
# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_5771_diff.txt'), 'w') # f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_5771_diff.txt'), 'w')
# f_diff_txt.write(difftext) # f_diff_txt.write(difftext)
# flush_and_close( f_diff_txt ) # flush_and_close( f_diff_txt )
# #
# # Check logs: # # Check logs:
# ############# # #############
# with open( f_restore_log.name,'r') as f: # with open( f_restore_log.name,'r') as f:
# for line in f: # for line in f:
# line=line.replace('$(DATABASE_LOCATION)','') # line=line.replace('$(DATABASE_LOCATION)','')
# print( 'RESTORE STDOUT:' + ' '.join( line.split() ).upper() ) # print( 'RESTORE STDOUT:' + ' '.join( line.split() ).upper() )
# #
# with open( f_restore_err.name,'r') as f: # with open( f_restore_err.name,'r') as f:
# for line in f: # for line in f:
# line=line.replace('$(DATABASE_LOCATION)','') # line=line.replace('$(DATABASE_LOCATION)','')
# print( 'RESTORE STDERR: ' + ' '.join( line.split() ).upper() ) # print( 'RESTORE STDERR: ' + ' '.join( line.split() ).upper() )
# #
# with open( f_diff_txt.name,'r') as f: # with open( f_diff_txt.name,'r') as f:
# for line in f: # for line in f:
# if line.startswith('+'): # if line.startswith('+'):
# print( 'UNEXPECTED DIFF IN FIREBIRD.LOG: ' + (' '.join(line.split()).upper()) ) # print( 'UNEXPECTED DIFF IN FIREBIRD.LOG: ' + (' '.join(line.split()).upper()) )
# #
# #
# # Cleanup: # # Cleanup:
# ########## # ##########
# time.sleep(1) # time.sleep(1)
# cleanup( (f_restore_log,f_restore_err,f_fblog_before,f_fblog_after,f_diff_txt,tmpfbk,tmpfdb) ) # cleanup( (f_restore_log,f_restore_err,f_fblog_before,f_fblog_after,f_diff_txt,tmpfbk,tmpfdb) )
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
RESTORE STDOUT:GBAK:OPENED FILE TMP_CORE_5771.FBK gbak:opened file tmp_core_5771.fbk
RESTORE STDERR: DATABASE TMP_5771_RESTORED.FDB ALREADY EXISTS. TO REPLACE IT, USE THE -REP SWITCH """
RESTORE STDERR: -EXITING BEFORE COMPLETION DUE TO ERRORS
""" expected_stderr_1 = """
database tmp_core_5771.fdb already exists. To replace it, use the -REP switch
-Exiting before completion due to errors
"""
fbk_file = temp_file('tmp_core_5771.fbk')
fdb_file = temp_file('tmp_core_5771.fdb')
@pytest.mark.version('>=4.0') @pytest.mark.version('>=4.0')
@pytest.mark.xfail def test_1(act_1: Action, fbk_file: Path, fdb_file: Path):
def test_1(db_1): act_1.gbak(switches=['-b', act_1.db.dsn, str(fbk_file)])
pytest.fail("Test not IMPLEMENTED") act_1.gbak(switches=['-rep', str(fbk_file), f'localhost:{fdb_file}'])
#
log_before = act_1.get_firebird_log()
#
act_1.reset()
act_1.expected_stdout = expected_stdout_1
act_1.expected_stderr = expected_stderr_1
act_1.svcmgr(switches=['action_restore', 'bkp_file', str(fbk_file),
'dbname', str(fdb_file), 'verbose'])
#
log_after = act_1.get_firebird_log()
assert list(unified_diff(log_before, log_after)) == []
assert act_1.clean_stderr == act_1.clean_expected_stderr
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,7 +2,7 @@
# #
# id: bugs.core_5783 # id: bugs.core_5783
# title: execute statement ignores the text of the SQL-query after a comment of the form "-" # title: execute statement ignores the text of the SQL-query after a comment of the form "-"
# decription: # decription:
# We concatenate query from several elements and use ' # We concatenate query from several elements and use '
# ' delimiter only to split this query into lines. # ' delimiter only to split this query into lines.
# Also, we put single-line comment in SEPARATE line between 'select' and column/value that is obtained from DB. # Also, we put single-line comment in SEPARATE line between 'select' and column/value that is obtained from DB.
@ -18,7 +18,7 @@
# === # ===
# This query should NOT raise any exception and must produce normal output (string 'foo'). # This query should NOT raise any exception and must produce normal output (string 'foo').
# Thanks to hvlad for suggestions. # Thanks to hvlad for suggestions.
# #
# Confirmed bug on: # Confirmed bug on:
# 3.0.4.32924 # 3.0.4.32924
# 4.0.0.918 # 4.0.0.918
@ -33,14 +33,14 @@
# Checked on: # Checked on:
# 3.0.4.32941: OK, 1.187s. # 3.0.4.32941: OK, 1.187s.
# 4.0.0.947: OK, 1.328s. # 4.0.0.947: OK, 1.328s.
# #
# tracker_id: CORE-5783 # tracker_id: CORE-5783
# min_versions: ['3.0.4'] # min_versions: ['3.0.4']
# versions: 3.0.4 # versions: 3.0.4
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from firebird.qa import db_factory, python_act, Action
# version: 3.0.4 # version: 3.0.4
# resources: None # resources: None
@ -55,27 +55,28 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
#--- #---
# import sys # import sys
# import os # import os
# #
# cur = db_conn.cursor() # cur = db_conn.cursor()
# #
# # NB: one need to use TWO backslash characters ('\\r') as escape for CR only within fbtest. # # NB: one need to use TWO backslash characters ('\\r') as escape for CR only within fbtest.
# # Single '' should be used when running under "pure" Python control: # # Single '' should be used when running under "pure" Python control:
# #
# sql_expr = ' '.join( ('select', '\\r', '-- comment N1', '\\r', "'foo' as msg", '\\r', 'from', '\\r', '-- comment N2', '\\r', 'rdb$database') ) # sql_expr = ' '.join( ('select', '\\r', '-- comment N1', '\\r', "'foo' as msg", '\\r', 'from', '\\r', '-- comment N2', '\\r', 'rdb$database') )
# #
# for i in sql_expr.split('\\r'): # for i in sql_expr.split('\\r'):
# print('Query line: ' + i) # print('Query line: ' + i)
# #
# #sql_expr = 'select 1 FROM test' # #sql_expr = 'select 1 FROM test'
# cur.execute( sql_expr ) # cur.execute( sql_expr )
# for r in cur: # for r in cur:
# print( 'Query result: ' + r[0] ) # print( 'Query result: ' + r[0] )
# #
# cur.close() # cur.close()
# #
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
Query line: select Query line: select
@ -85,11 +86,19 @@ expected_stdout_1 = """
Query line: -- comment N2 Query line: -- comment N2
Query line: rdb$database Query line: rdb$database
Query result: foo Query result: foo
""" """
@pytest.mark.version('>=3.0.4') @pytest.mark.version('>=3.0.4')
@pytest.mark.xfail def test_1(act_1: Action, capsys):
def test_1(db_1): with act_1.db.connect() as con:
pytest.fail("Test not IMPLEMENTED") c = con.cursor()
sql_expr = "select \r -- comment N1 \r 'foo' as msg \r from \r -- comment N2 \r rdb$database"
for line in sql_expr.split('\r'):
print(f'Query line: {line}')
c.execute(sql_expr)
for row in c:
print(f'Query result: {row[0]}')
#
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,7 +2,7 @@
# #
# id: bugs.core_5790 # id: bugs.core_5790
# title: User with DROP DATABASE privilege can't drop database # title: User with DROP DATABASE privilege can't drop database
# decription: # decription:
# Confirmed bug on 3.0.4.32924 # Confirmed bug on 3.0.4.32924
# Works fine on: # Works fine on:
# 3.0.4.32947: OK, 2.906s. # 3.0.4.32947: OK, 2.906s.
@ -11,14 +11,15 @@
# Checked on: # Checked on:
# 4.0.0.1421 CS, SC, SS # 4.0.0.1421 CS, SC, SS
# 3.0.5.33097 CS, SS # 3.0.5.33097 CS, SS
# #
# tracker_id: CORE-5790 # tracker_id: CORE-5790
# min_versions: ['3.0.4'] # min_versions: ['3.0.4']
# versions: 3.0.4 # versions: 3.0.4
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from pathlib import Path
from firebird.qa import db_factory, python_act, Action, user_factory, User, temp_file
# version: 3.0.4 # version: 3.0.4
# resources: None # resources: None
@ -34,33 +35,33 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# import os # import os
# import time # import time
# import subprocess # import subprocess
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# #
# thisdb=db_conn.database_name # thisdb=db_conn.database_name
# tmpfdb='$(DATABASE_LOCATION)'+'tmp_5790.tmp' # tmpfdb='$(DATABASE_LOCATION)'+'tmp_5790.tmp'
# tmpusr='tmp$c5790' # tmpusr='tmp$c5790'
# #
# db_conn.close() # db_conn.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def flush_and_close( file_handle ): # def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync # # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f, # # If you're starting with a Python file object f,
# # first do f.flush(), and # # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os # global os
# #
# file_handle.flush() # file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"! # # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno()) # os.fsync(file_handle.fileno())
# file_handle.close() # file_handle.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def cleanup( f_names_list ): # def cleanup( f_names_list ):
# global os # global os
# for i in range(len( f_names_list )): # for i in range(len( f_names_list )):
@ -71,13 +72,13 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# else: # else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None # del_name = None
# #
# if del_name and os.path.isfile( del_name ): # if del_name and os.path.isfile( del_name ):
# os.remove( del_name ) # os.remove( del_name )
# #
# #-------------------------------------------- # #--------------------------------------------
# cleanup( (tmpfdb,) ) # cleanup( (tmpfdb,) )
# #
# sql_txt=''' # sql_txt='''
# create database 'localhost:%(tmpfdb)s'; # create database 'localhost:%(tmpfdb)s';
# alter database set linger to 0; # alter database set linger to 0;
@ -98,23 +99,23 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# ,r.rdb$field_name -- <null> # ,r.rdb$field_name -- <null>
# ,r.rdb$user_type -- 8 # ,r.rdb$user_type -- 8
# ,iif( r.rdb$object_type = decode( left(rdb$get_context('SYSTEM', 'ENGINE_VERSION'),1), '3',20, '4',21), 1, 0) "rdb_object_type_is_expected ?" # ,iif( r.rdb$object_type = decode( left(rdb$get_context('SYSTEM', 'ENGINE_VERSION'),1), '3',20, '4',21), 1, 0) "rdb_object_type_is_expected ?"
# from rdb$user_privileges r # from rdb$user_privileges r
# where r.rdb$user=upper('%(tmpusr)s'); # where r.rdb$user=upper('%(tmpusr)s');
# #
# -- this should NOT show any attachments: "Records affected: 0" must be shown here. # -- this should NOT show any attachments: "Records affected: 0" must be shown here.
# select * from mon$attachments where mon$attachment_id != current_connection; # select * from mon$attachments where mon$attachment_id != current_connection;
# commit; # commit;
# #
# drop database; # drop database;
# rollback; # rollback;
# #
# -- !!! 07.02.2019 only remote protocol must be used here !! # -- !!! 07.02.2019 only remote protocol must be used here !!
# -- Otherwise we will attempt to make local attach to security4.fdb # -- Otherwise we will attempt to make local attach to security4.fdb
# -- 335544344 : I/O error during "CreateFile (open)" operation for file "C:\\FB SS\\SECURITY4.FDB" # -- 335544344 : I/O error during "CreateFile (open)" operation for file "C:\\FB SS\\SECURITY4.FDB"
# -- 335544734 : Error while trying to open file # -- 335544734 : Error while trying to open file
# -- This is because securityN.fdb has by default linger = 60 seconds when we use SS, thus it is # -- This is because securityN.fdb has by default linger = 60 seconds when we use SS, thus it is
# -- stiil kept opened by FB server process. # -- stiil kept opened by FB server process.
# #
# connect 'localhost:%(thisdb)s'; -- OLD VERSION OF THIS TEST HAD ERROR HERE: connect '%(thisdb)s' # connect 'localhost:%(thisdb)s'; -- OLD VERSION OF THIS TEST HAD ERROR HERE: connect '%(thisdb)s'
# drop user %(tmpusr)s; # drop user %(tmpusr)s;
# commit; # commit;
@ -123,37 +124,38 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# --set echo on; # --set echo on;
# --select current_user, s.* from rdb$database left join sec$users s on s.sec$user_name not containing 'SYSDBA'; # --select current_user, s.* from rdb$database left join sec$users s on s.sec$user_name not containing 'SYSDBA';
# ''' % locals() # ''' % locals()
# #
# f_isql_cmd=open( os.path.join(context['temp_directory'],'tmp_5790.sql'), 'w') # f_isql_cmd=open( os.path.join(context['temp_directory'],'tmp_5790.sql'), 'w')
# f_isql_cmd.write(sql_txt) # f_isql_cmd.write(sql_txt)
# flush_and_close( f_isql_cmd ) # flush_and_close( f_isql_cmd )
# #
# f_isql_log=open( os.path.join(context['temp_directory'],'tmp_5790.log'), 'w') # f_isql_log=open( os.path.join(context['temp_directory'],'tmp_5790.log'), 'w')
# f_isql_err=open( os.path.join(context['temp_directory'],'tmp_5790.err'), 'w') # f_isql_err=open( os.path.join(context['temp_directory'],'tmp_5790.err'), 'w')
# subprocess.call( [ context['isql_path'], '-q', '-i', f_isql_cmd.name], stdout=f_isql_log, stderr=f_isql_err ) # subprocess.call( [ context['isql_path'], '-q', '-i', f_isql_cmd.name], stdout=f_isql_log, stderr=f_isql_err )
# flush_and_close( f_isql_log ) # flush_and_close( f_isql_log )
# flush_and_close( f_isql_err ) # flush_and_close( f_isql_err )
# #
# if os.path.isfile(tmpfdb): # if os.path.isfile(tmpfdb):
# print('### ERROR ### Database file was NOT deleted!') # print('### ERROR ### Database file was NOT deleted!')
# cleanup( tmpfdb, ) # cleanup( tmpfdb, )
# #
# with open(f_isql_log.name,'r') as f: # with open(f_isql_log.name,'r') as f:
# for line in f: # for line in f:
# print(line) # print(line)
# #
# with open(f_isql_err.name,'r') as f: # with open(f_isql_err.name,'r') as f:
# for line in f: # for line in f:
# print('UNEXPECTED STDERR: ' + line) # print('UNEXPECTED STDERR: ' + line)
# #
# # cleanup # # cleanup
# ######### # #########
# time.sleep(1) # time.sleep(1)
# f_list = (f_isql_log,f_isql_err,f_isql_cmd) # f_list = (f_isql_log,f_isql_err,f_isql_cmd)
# cleanup( f_list ) # cleanup( f_list )
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
RDB$USER TMP$C5790 RDB$USER TMP$C5790
@ -166,11 +168,40 @@ expected_stdout_1 = """
rdb_object_type_is_expected ? 1 rdb_object_type_is_expected ? 1
Records affected: 1 Records affected: 1
Records affected: 0 Records affected: 0
""" """
test_user = user_factory(name='tmp$c5790', password='123')
fdb_file = temp_file('tmp_5790.fdb')
@pytest.mark.version('>=3.0.4') @pytest.mark.version('>=3.0.4')
@pytest.mark.xfail def test_1(act_1: Action, test_user: User, fdb_file: Path):
def test_1(db_1): test_script = f"""
pytest.fail("Test not IMPLEMENTED") create database 'localhost:{fdb_file}';
alter database set linger to 0;
commit;
grant drop database to {test_user.name};
commit;
connect 'localhost:{fdb_file}' user {test_user.name} password '{test_user.password}';
set list on;
set count on;
select
r.rdb$user -- {test_user.name}
,r.rdb$grantor -- sysdba
,r.rdb$privilege -- o
,r.rdb$grant_option -- 0
,r.rdb$relation_name -- sql$database
,r.rdb$field_name -- <null>
,r.rdb$user_type -- 8
,iif( r.rdb$object_type = decode( left(rdb$get_context('SYSTEM', 'ENGINE_VERSION'),1), '3',20, '4',21), 1, 0) "rdb_object_type_is_expected ?"
from rdb$user_privileges r
where r.rdb$user=upper('{test_user.name}');
-- this should NOT show any attachments: "Records affected: 0" must be shown here.
select * from mon$attachments where mon$attachment_id != current_connection;
commit;
drop database;
rollback;
"""
act_1.isql(switches=['-q'], input=test_script)
assert not fdb_file.exists()

View File

@ -2,25 +2,25 @@
# #
# id: bugs.core_5793 # id: bugs.core_5793
# title: Error returned from DbCryptPlugin::setKey() is not shown # title: Error returned from DbCryptPlugin::setKey() is not shown
# decription: # decription:
# #
# Test database that is created by fbtest framework will be encrypted here using IBSurgeon Demo Encryption package # Test database that is created by fbtest framework will be encrypted here using IBSurgeon Demo Encryption package
# ( https://ib-aid.com/download-demo-firebird-encryption-plugin/ ; https://ib-aid.com/download/crypt/CryptTest.zip ) # ( https://ib-aid.com/download-demo-firebird-encryption-plugin/ ; https://ib-aid.com/download/crypt/CryptTest.zip )
# License file plugins\\dbcrypt.conf with unlimited expiration was provided by IBSurgeon to Firebird Foundation (FF). # License file plugins\\dbcrypt.conf with unlimited expiration was provided by IBSurgeon to Firebird Foundation (FF).
# This file was preliminary stored in FF Test machine. # This file was preliminary stored in FF Test machine.
# Test assumes that this file and all neccessary libraries already were stored into FB_HOME and %FB_HOME%\\plugins. # Test assumes that this file and all neccessary libraries already were stored into FB_HOME and %FB_HOME%\\plugins.
# #
# First, we try to encrypt DB with existing key and decrypt it aftee this - just to ensure that this mechanism works fine. # First, we try to encrypt DB with existing key and decrypt it aftee this - just to ensure that this mechanism works fine.
# Then we use statement 'alter database encrypt ...' with NON existing key and check parts of exception that will raise. # Then we use statement 'alter database encrypt ...' with NON existing key and check parts of exception that will raise.
# From these three parts (multi-string, int and bigint numbers) we check that 1st contains phrase about missed crypt key. # From these three parts (multi-string, int and bigint numbers) we check that 1st contains phrase about missed crypt key.
# ::: NOTE :::: # ::: NOTE ::::
# Text of messages differ in 3.0.5 vs 4.0.0: # Text of messages differ in 3.0.5 vs 4.0.0:
# 3.0.5: - Missing correct crypt key # 3.0.5: - Missing correct crypt key
# 4.0.0: - Missing database encryption key for your attachment # 4.0.0: - Missing database encryption key for your attachment
# - so we use regexp tool for check pattern matching. # - so we use regexp tool for check pattern matching.
# Because of different text related to missing plugin, this part is replaced with phrase: # Because of different text related to missing plugin, this part is replaced with phrase:
# <FOUND PATTERN-1 ABOUT MISSED ENCRYPTION KEY> -- both for 3.0.x and 4.0.x. # <FOUND PATTERN-1 ABOUT MISSED ENCRYPTION KEY> -- both for 3.0.x and 4.0.x.
# #
# Confirmed difference in error message (text decription, w/o sqlcode and gdscode): # Confirmed difference in error message (text decription, w/o sqlcode and gdscode):
# 1) 3.0.3.32900 # 1) 3.0.3.32900
# ===== # =====
@ -30,28 +30,28 @@
# - ALTER DATABASE failed # - ALTER DATABASE failed
# - Missing correct crypt key # - Missing correct crypt key
# ===== # =====
# #
# 2) 3.0.5.33139 - two lines were added: # 2) 3.0.5.33139 - two lines were added:
# ==== # ====
# - Plugin KeyHolder: # - Plugin KeyHolder:
# - Unknown key name FOO - key can't be found in KeyHolder.conf # - Unknown key name FOO - key can't be found in KeyHolder.conf
# ==== # ====
# #
# Checked on: # Checked on:
# 4.0.0.1524: OK, 4.674s. # 4.0.0.1524: OK, 4.674s.
# 3.0.5.33139: OK, 3.666s. # 3.0.5.33139: OK, 3.666s.
# #
# 15.04.2021. Adapted for run both on Windows and Linux. Checked on: # 15.04.2021. Adapted for run both on Windows and Linux. Checked on:
# Windows: 4.0.0.2416 # Windows: 4.0.0.2416
# Linux: 4.0.0.2416 # Linux: 4.0.0.2416
# #
# tracker_id: CORE-5793 # tracker_id: CORE-5793
# min_versions: ['3.0.4'] # min_versions: ['3.0.4']
# versions: 3.0.4 # versions: 3.0.4
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from firebird.qa import db_factory, python_act, Action
# version: 3.0.4 # version: 3.0.4
# resources: None # resources: None
@ -64,14 +64,14 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import re # import re
# import time # import time
# #
# engine = db_conn.engine_version # engine = db_conn.engine_version
# #
# #
# # 14.04.2021. # # 14.04.2021.
# # Name of encryption plugin depends on OS: # # Name of encryption plugin depends on OS:
# # * for Windows we (currently) use plugin by IBSurgeon, its name is 'dbcrypt'; # # * for Windows we (currently) use plugin by IBSurgeon, its name is 'dbcrypt';
@ -80,11 +80,11 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # ** 'fbSampleDbCrypt' for FB 4.x+ # # ** 'fbSampleDbCrypt' for FB 4.x+
# # # #
# PLUGIN_NAME = 'dbcrypt' if os.name == 'nt' else ( '"fbSampleDbCrypt"' if engine >= 4.0 else '"DbCrypt_example"') # PLUGIN_NAME = 'dbcrypt' if os.name == 'nt' else ( '"fbSampleDbCrypt"' if engine >= 4.0 else '"DbCrypt_example"')
# #
# cur = db_conn.cursor() # cur = db_conn.cursor()
# #
# print('1.1. Trying to encrypt with existing key.') # print('1.1. Trying to encrypt with existing key.')
# #
# ############################################## # ##############################################
# # WARNING! Do NOT use 'connection_obj.execute_immediate()' for ALTER DATABASE ENCRYPT... command! # # WARNING! Do NOT use 'connection_obj.execute_immediate()' for ALTER DATABASE ENCRYPT... command!
# # There is bug in FB driver which leads this command to fail with 'token unknown' message # # There is bug in FB driver which leads this command to fail with 'token unknown' message
@ -94,19 +94,19 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # See letter from Pavel Cisar, 20.01.20 10:36 # # See letter from Pavel Cisar, 20.01.20 10:36
# ############################################## # ##############################################
# cur.execute('alter database encrypt with %(PLUGIN_NAME)s key Red' % locals()) # cur.execute('alter database encrypt with %(PLUGIN_NAME)s key Red' % locals())
# #
# db_conn.commit() # db_conn.commit()
# #
# time.sleep(2) # time.sleep(2)
# #
# print('1.2. Delay completed, DB now must be encrypted.') # print('1.2. Delay completed, DB now must be encrypted.')
# #
# print('2.1. Trying to decrypt.') # print('2.1. Trying to decrypt.')
# cur.execute('alter database decrypt') # cur.execute('alter database decrypt')
# db_conn.commit() # db_conn.commit()
# time.sleep(2) # time.sleep(2)
# print('2.2. Delay completed, DB now must be decrypted.') # print('2.2. Delay completed, DB now must be decrypted.')
# #
# print('3.1. Trying to encrypt with non-existing key') # print('3.1. Trying to encrypt with non-existing key')
# try: # try:
# cur.execute('alter database encrypt with %(PLUGIN_NAME)s key no_such_key_foo' % locals()) # cur.execute('alter database encrypt with %(PLUGIN_NAME)s key no_such_key_foo' % locals())
@ -125,14 +125,14 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # Linux: # # Linux:
# # - Plugin CryptKeyHolder_example: # # - Plugin CryptKeyHolder_example:
# # Crypt key NO_SUCH_KEY_FOO not set # # Crypt key NO_SUCH_KEY_FOO not set
# #
# missed_key_ptn1 = re.compile('.*missing\\s+.*(crypt key|encryption key).*', re.IGNORECASE) # missed_key_ptn1 = re.compile('.*missing\\s+.*(crypt key|encryption key).*', re.IGNORECASE)
# missed_key_ptn2 = '' # missed_key_ptn2 = ''
# if os.name == 'nt': # if os.name == 'nt':
# missed_key_ptn2 = re.compile(".*Unknown\\s+key\\s+name.*key\\s+can't\\s+be\\s+found.*", re.IGNORECASE) # missed_key_ptn2 = re.compile(".*Unknown\\s+key\\s+name.*key\\s+can't\\s+be\\s+found.*", re.IGNORECASE)
# else: # else:
# missed_key_ptn2 = re.compile(".*Crypt\\s+key\\s+.*\\s+not\\s+set", re.IGNORECASE) # missed_key_ptn2 = re.compile(".*Crypt\\s+key\\s+.*\\s+not\\s+set", re.IGNORECASE)
# #
# for x in e.args: # for x in e.args:
# if isinstance( x, str): # if isinstance( x, str):
# for r in x.split('\\n'): # for r in x.split('\\n'):
@ -149,9 +149,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# finally: # finally:
# cur.close() # cur.close()
# db_conn.close() # db_conn.close()
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
1.1. Trying to encrypt with existing key. 1.1. Trying to encrypt with existing key.
@ -164,14 +165,13 @@ expected_stdout_1 = """
- unsuccessful metadata update - unsuccessful metadata update
- ALTER DATABASE failed - ALTER DATABASE failed
<FOUND PATTERN-1 ABOUT MISSED ENCRYPTION KEY> <FOUND PATTERN-1 ABOUT MISSED ENCRYPTION KEY>
<FOUND PATTERN-2 ABOUT MISSED ENCRYPTION KEY> <FOUND PATTERN-2 ABOUT MISSED ENCRYPTION KEY>
-607 -607
335544351 335544351
""" """
@pytest.mark.version('>=3.0.4') @pytest.mark.version('>=3.0.4')
@pytest.mark.xfail def test_1(act_1: Action):
def test_1(db_1): pytest.skip("Requires encryption plugin")
pytest.fail("Test not IMPLEMENTED")

View File

@ -2,17 +2,17 @@
# #
# id: bugs.core_5796 # id: bugs.core_5796
# title: gstat may produce faulty report about presence of some none-encrypted pages in database # title: gstat may produce faulty report about presence of some none-encrypted pages in database
# decription: # decription:
# We create new database ('tmp_core_5796.fdb') and try to encrypt it usng IBSurgeon Demo Encryption package # We create new database ('tmp_core_5796.fdb') and try to encrypt it usng IBSurgeon Demo Encryption package
# ( https://ib-aid.com/download-demo-firebird-encryption-plugin/ ; https://ib-aid.com/download/crypt/CryptTest.zip ) # ( https://ib-aid.com/download-demo-firebird-encryption-plugin/ ; https://ib-aid.com/download/crypt/CryptTest.zip )
# License file plugins\\dbcrypt.conf with unlimited expiration was provided by IBSurgeon to Firebird Foundation (FF). # License file plugins\\dbcrypt.conf with unlimited expiration was provided by IBSurgeon to Firebird Foundation (FF).
# This file was preliminary stored in FF Test machine. # This file was preliminary stored in FF Test machine.
# Test assumes that this file and all neccessary libraries already were stored into FB_HOME and %FB_HOME%\\plugins. # Test assumes that this file and all neccessary libraries already were stored into FB_HOME and %FB_HOME%\\plugins.
# #
# After test database will be created, we try to encrypt it using 'alter database encrypt with <plugin_name> ...' command # After test database will be created, we try to encrypt it using 'alter database encrypt with <plugin_name> ...' command
# (where <plugin_name> = dbcrypt - name of .dll in FB_HOME\\plugins\\ folder that implements encryption). # (where <plugin_name> = dbcrypt - name of .dll in FB_HOME\\plugins\\ folder that implements encryption).
# Then we allow engine to complete this job - take delay about 1..2 seconds BEFORE detach from database. # Then we allow engine to complete this job - take delay about 1..2 seconds BEFORE detach from database.
# #
# After this we detach from DB, run 'gstat -h' and filter its attributes and messages from 'Variable header' section. # After this we detach from DB, run 'gstat -h' and filter its attributes and messages from 'Variable header' section.
# In the output of gstat we check that its 'tail' will look like this: # In the output of gstat we check that its 'tail' will look like this:
# === # ===
@ -22,25 +22,27 @@
# Encryption key name: RED # Encryption key name: RED
# === # ===
# (concrete values for checksum and hash will be ignored - see 'substitutions' section). # (concrete values for checksum and hash will be ignored - see 'substitutions' section).
# #
# Finally, we change this temp DB statee to full shutdown in order to have 100% ability to drop this file. # Finally, we change this temp DB statee to full shutdown in order to have 100% ability to drop this file.
# #
# 15.04.2021. Adapted for run both on Windows and Linux. Checked on: # 15.04.2021. Adapted for run both on Windows and Linux. Checked on:
# Windows: 4.0.0.2416 # Windows: 4.0.0.2416
# Linux: 4.0.0.2416 # Linux: 4.0.0.2416
# #
# tracker_id: CORE-5796 # tracker_id: CORE-5796
# min_versions: ['3.0.4'] # min_versions: ['3.0.4']
# versions: 3.0.4 # versions: 3.0.4
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from firebird.qa import db_factory, python_act, Action
# version: 3.0.4 # version: 3.0.4
# resources: None # resources: None
substitutions_1 = [('ATTRIBUTES .* ENCRYPTED, PLUGIN .*', 'ATTRIBUTES ENCRYPTED'), ('CRYPT CHECKSUM.*', 'CRYPT CHECKSUM'), ('KEY HASH.*', 'KEY HASH'), ('ENCRYPTION KEY NAME.*', 'ENCRYPTION KEY')] substitutions_1 = [('ATTRIBUTES .* ENCRYPTED, PLUGIN .*', 'ATTRIBUTES ENCRYPTED'),
('CRYPT CHECKSUM.*', 'CRYPT CHECKSUM'), ('KEY HASH.*', 'KEY HASH'),
('ENCRYPTION KEY NAME.*', 'ENCRYPTION KEY')]
init_script_1 = """""" init_script_1 = """"""
@ -48,35 +50,35 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import time # import time
# import subprocess # import subprocess
# import re # import re
# import fdb # import fdb
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# engine = db_conn.engine_version # engine = db_conn.engine_version
# db_conn.close() # db_conn.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def flush_and_close( file_handle ): # def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync # # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f, # # If you're starting with a Python file object f,
# # first do f.flush(), and # # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os # global os
# #
# file_handle.flush() # file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"! # # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno()) # os.fsync(file_handle.fileno())
# file_handle.close() # file_handle.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def cleanup( f_names_list ): # def cleanup( f_names_list ):
# global os # global os
# for i in range(len( f_names_list )): # for i in range(len( f_names_list )):
@ -88,20 +90,20 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# print('type(f_names_list[i])=',type(f_names_list[i])) # print('type(f_names_list[i])=',type(f_names_list[i]))
# del_name = None # del_name = None
# #
# if del_name and os.path.isfile( del_name ): # if del_name and os.path.isfile( del_name ):
# os.remove( del_name ) # os.remove( del_name )
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# #
# tmpfdb='$(DATABASE_LOCATION)'+'tmp_core_5796.fdb' # tmpfdb='$(DATABASE_LOCATION)'+'tmp_core_5796.fdb'
# #
# cleanup( (tmpfdb,) ) # cleanup( (tmpfdb,) )
# #
# con = fdb.create_database( dsn = 'localhost:'+tmpfdb ) # con = fdb.create_database( dsn = 'localhost:'+tmpfdb )
# cur = con.cursor() # cur = con.cursor()
# #
# # 14.04.2021. # # 14.04.2021.
# # Name of encryption plugin depends on OS: # # Name of encryption plugin depends on OS:
# # * for Windows we (currently) use plugin by IBSurgeon, its name is 'dbcrypt'; # # * for Windows we (currently) use plugin by IBSurgeon, its name is 'dbcrypt';
@ -110,7 +112,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # ** 'fbSampleDbCrypt' for FB 4.x+ # # ** 'fbSampleDbCrypt' for FB 4.x+
# # # #
# PLUGIN_NAME = 'dbcrypt' if os.name == 'nt' else ( '"fbSampleDbCrypt"' if engine >= 4.0 else '"DbCrypt_example"') # PLUGIN_NAME = 'dbcrypt' if os.name == 'nt' else ( '"fbSampleDbCrypt"' if engine >= 4.0 else '"DbCrypt_example"')
# #
# ############################################## # ##############################################
# # WARNING! Do NOT use 'connection_obj.execute_immediate()' for ALTER DATABASE ENCRYPT... command! # # WARNING! Do NOT use 'connection_obj.execute_immediate()' for ALTER DATABASE ENCRYPT... command!
# # There is bug in FB driver which leads this command to fail with 'token unknown' message # # There is bug in FB driver which leads this command to fail with 'token unknown' message
@ -119,64 +121,65 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # One need to to use only cursor_obj.execute() for encryption! # # One need to to use only cursor_obj.execute() for encryption!
# # See letter from Pavel Cisar, 20.01.20 10:36 # # See letter from Pavel Cisar, 20.01.20 10:36
# ############################################## # ##############################################
# #
# cur.execute('alter database encrypt with %(PLUGIN_NAME)s key Red' % locals()) # cur.execute('alter database encrypt with %(PLUGIN_NAME)s key Red' % locals())
# con.commit() # con.commit()
# time.sleep(2) # time.sleep(2)
# # ^ # # ^
# # +-------- !! ALLOW BACKGROUND ENCRYPTION PROCESS TO COMPLETE ITS JOB !! # # +-------- !! ALLOW BACKGROUND ENCRYPTION PROCESS TO COMPLETE ITS JOB !!
# #
# con.close() # con.close()
# #
# #--------------------------------- get DB header info -------------------- # #--------------------------------- get DB header info --------------------
# #
# f_gstat_log = open( os.path.join(context['temp_directory'],'tmp_dbstat_5796.log'), 'w') # f_gstat_log = open( os.path.join(context['temp_directory'],'tmp_dbstat_5796.log'), 'w')
# f_gstat_err = open( os.path.join(context['temp_directory'],'tmp_dbstat_5796.err'), 'w') # f_gstat_err = open( os.path.join(context['temp_directory'],'tmp_dbstat_5796.err'), 'w')
# #
# subprocess.call( [ context['gstat_path'], "-e", "localhost:"+tmpfdb], # subprocess.call( [ context['gstat_path'], "-e", "localhost:"+tmpfdb],
# stdout = f_gstat_log, # stdout = f_gstat_log,
# stderr = f_gstat_err # stderr = f_gstat_err
# ) # )
# #
# #
# flush_and_close( f_gstat_log ) # flush_and_close( f_gstat_log )
# flush_and_close( f_gstat_err ) # flush_and_close( f_gstat_err )
# #
# #--------------------------------- shutdown temp DB -------------------- # #--------------------------------- shutdown temp DB --------------------
# #
# f_dbshut_log = open( os.path.join(context['temp_directory'],'tmp_dbshut_5796.log'), 'w') # f_dbshut_log = open( os.path.join(context['temp_directory'],'tmp_dbshut_5796.log'), 'w')
# subprocess.call( [ context['gfix_path'], 'localhost:'+tmpfdb, "-shut", "full", "-force", "0" ], # subprocess.call( [ context['gfix_path'], 'localhost:'+tmpfdb, "-shut", "full", "-force", "0" ],
# stdout = f_dbshut_log, # stdout = f_dbshut_log,
# stderr = subprocess.STDOUT # stderr = subprocess.STDOUT
# ) # )
# flush_and_close( f_dbshut_log ) # flush_and_close( f_dbshut_log )
# #
# allowed_patterns = ( # allowed_patterns = (
# re.compile( '\\s*Attributes\\.*', re.IGNORECASE) # re.compile( '\\s*Attributes\\.*', re.IGNORECASE)
# ,re.compile('crypt\\s+checksum:\\s+\\S+', re.IGNORECASE) # ,re.compile('crypt\\s+checksum:\\s+\\S+', re.IGNORECASE)
# ,re.compile('key\\s+hash:\\s+\\S+', re.IGNORECASE) # ,re.compile('key\\s+hash:\\s+\\S+', re.IGNORECASE)
# ,re.compile('encryption\\s+key\\s+name:\\s+\\S+', re.IGNORECASE) # ,re.compile('encryption\\s+key\\s+name:\\s+\\S+', re.IGNORECASE)
# ) # )
# #
# with open( f_gstat_log.name,'r') as f: # with open( f_gstat_log.name,'r') as f:
# for line in f: # for line in f:
# match2some = filter( None, [ p.search(line) for p in allowed_patterns ] ) # match2some = filter( None, [ p.search(line) for p in allowed_patterns ] )
# if match2some: # if match2some:
# print( (' '.join( line.split()).upper() ) ) # print( (' '.join( line.split()).upper() ) )
# #
# with open( f_gstat_err.name,'r') as f: # with open( f_gstat_err.name,'r') as f:
# for line in f: # for line in f:
# print("Unexpected STDERR: "+line) # print("Unexpected STDERR: "+line)
# #
# # cleanup: # # cleanup:
# ########## # ##########
# time.sleep(1) # time.sleep(1)
# cleanup( (f_gstat_log, f_gstat_err, f_dbshut_log, tmpfdb) ) # cleanup( (f_gstat_log, f_gstat_err, f_dbshut_log, tmpfdb) )
# #
# #
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
ATTRIBUTES FORCE WRITE, ENCRYPTED, PLUGIN DBCRYPT ATTRIBUTES FORCE WRITE, ENCRYPTED, PLUGIN DBCRYPT
@ -186,8 +189,7 @@ expected_stdout_1 = """
""" """
@pytest.mark.version('>=3.0.4') @pytest.mark.version('>=3.0.4')
@pytest.mark.xfail def test_1(act_1: Action):
def test_1(db_1): pytest.skip("Requires encryption plugin")
pytest.fail("Test not IMPLEMENTED")

View File

@ -2,39 +2,40 @@
# #
# id: bugs.core_5802 # id: bugs.core_5802
# title: Field name max length check wrongly if national characters specified # title: Field name max length check wrongly if national characters specified
# decription: # decription:
# Confirmed bug on 3.0.4.32972, got error: # Confirmed bug on 3.0.4.32972, got error:
# Statement failed, SQLSTATE = 22001 # Statement failed, SQLSTATE = 22001
# arithmetic exception, numeric overflow, or string truncation # arithmetic exception, numeric overflow, or string truncation
# -string right truncation # -string right truncation
# -expected length 31, actual 31 # -expected length 31, actual 31
# #
# Though this ticket was fixed only for FB 4.x, Adriano notes that error message # Though this ticket was fixed only for FB 4.x, Adriano notes that error message
# was corrected in FB 3.0.6. Thus we check both major versions but use different # was corrected in FB 3.0.6. Thus we check both major versions but use different
# length of columns: 32 and 64. # length of columns: 32 and 64.
# Checked on: # Checked on:
# 4.0.0.1753 SS: 1.630s. # 4.0.0.1753 SS: 1.630s.
# 3.0.6.33237 SS: 0.562s. # 3.0.6.33237 SS: 0.562s.
# #
# 03-mar-2021. Re-implemented in order to have ability to run this test on Linux. # 03-mar-2021. Re-implemented in order to have ability to run this test on Linux.
# Test encodes to UTF8 all needed statements (SET NAMES; CONNECT; DDL and DML) and stores this text in .sql file. # Test encodes to UTF8 all needed statements (SET NAMES; CONNECT; DDL and DML) and stores this text in .sql file.
# NOTE: 'SET NAMES' contain character set that must be used for reproducing problem (WIN1251 in this test). # NOTE: 'SET NAMES' contain character set that must be used for reproducing problem (WIN1251 in this test).
# Then ISQL is launched in separate (child) process which performs all necessary actions (using required charset). # Then ISQL is launched in separate (child) process which performs all necessary actions (using required charset).
# Result will be redirected to log(s) which will be opened further via codecs.open(...encoding='cp1251'). # Result will be redirected to log(s) which will be opened further via codecs.open(...encoding='cp1251').
# Finally, its content will be converted to UTF8 for showing in expected_stdout. # Finally, its content will be converted to UTF8 for showing in expected_stdout.
# #
# Checked on: # Checked on:
# * Windows: 4.0.0.2377, 3.0.8.33420 # * Windows: 4.0.0.2377, 3.0.8.33420
# * Linux: 4.0.0.2377, 3.0.8.33415 # * Linux: 4.0.0.2377, 3.0.8.33415
# #
# #
# tracker_id: CORE-5802 # tracker_id: CORE-5802
# min_versions: ['3.0.6'] # min_versions: ['3.0.6']
# versions: 3.0.6 # versions: 3.0.6
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
# version: 3.0.6 # version: 3.0.6
# resources: None # resources: None
@ -47,31 +48,31 @@ db_1 = db_factory(charset='WIN1251', sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import codecs # import codecs
# import subprocess # import subprocess
# import time # import time
# engine = db_conn.engine_version # engine = db_conn.engine_version
# db_conn.close() # db_conn.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def flush_and_close( file_handle ): # def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync # # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f, # # If you're starting with a Python file object f,
# # first do f.flush(), and # # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os # global os
# #
# file_handle.flush() # file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"! # # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno()) # os.fsync(file_handle.fileno())
# file_handle.close() # file_handle.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def cleanup( f_names_list ): # def cleanup( f_names_list ):
# global os # global os
# for i in range(len( f_names_list )): # for i in range(len( f_names_list )):
@ -82,12 +83,12 @@ db_1 = db_factory(charset='WIN1251', sql_dialect=3, init=init_script_1)
# else: # else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None # del_name = None
# #
# if del_name and os.path.isfile( del_name ): # if del_name and os.path.isfile( del_name ):
# os.remove( del_name ) # os.remove( del_name )
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# if engine < 4: # if engine < 4:
# # Maximal number of characters in the column for FB 3.x is 31. # # Maximal number of characters in the column for FB 3.x is 31.
# # Here we use name of 32 characters and this must raise error # # Here we use name of 32 characters and this must raise error
@ -100,14 +101,14 @@ db_1 = db_factory(charset='WIN1251', sql_dialect=3, init=init_script_1)
# # with text "Name longer than database column size": # # with text "Name longer than database column size":
# # # #
# column_title = 'СъешьЖеЕщёЭтихПрекрасныхФранкоБулокВместоДурацкихМорковныхКотлет' # column_title = 'СъешьЖеЕщёЭтихПрекрасныхФранкоБулокВместоДурацкихМорковныхКотлет'
# #
# # Code to be executed further in separate ISQL process: # # Code to be executed further in separate ISQL process:
# ############################# # #############################
# sql_txt=''' # sql_txt='''
# set bail on; # set bail on;
# set names win1251; # set names win1251;
# connect '%(dsn)s' user '%(user_name)s' password '%(user_password)s'; # connect '%(dsn)s' user '%(user_name)s' password '%(user_password)s';
# #
# set list on; # set list on;
# set sqlda_display on; # set sqlda_display on;
# -- Maximal number of characters in the column for FB 3.x is 31. # -- Maximal number of characters in the column for FB 3.x is 31.
@ -115,44 +116,65 @@ db_1 = db_factory(charset='WIN1251', sql_dialect=3, init=init_script_1)
# -- with text "Name longer than database column size": # -- with text "Name longer than database column size":
# select 1 as "%(column_title)s" from rdb$database; # select 1 as "%(column_title)s" from rdb$database;
# ''' % dict(globals(), **locals()) # ''' % dict(globals(), **locals())
# #
# f_run_sql = open( os.path.join(context['temp_directory'], 'tmp_5802_win1251.sql'), 'w' ) # f_run_sql = open( os.path.join(context['temp_directory'], 'tmp_5802_win1251.sql'), 'w' )
# f_run_sql.write( sql_txt.decode('utf8').encode('cp1251') ) # f_run_sql.write( sql_txt.decode('utf8').encode('cp1251') )
# flush_and_close( f_run_sql ) # flush_and_close( f_run_sql )
# #
# # result: file tmp_5802_win1251.sql is encoded in win1251 # # result: file tmp_5802_win1251.sql is encoded in win1251
# #
# f_run_log = open( os.path.splitext(f_run_sql.name)[0]+'.log', 'w') # f_run_log = open( os.path.splitext(f_run_sql.name)[0]+'.log', 'w')
# subprocess.call( [ context['isql_path'], '-q', '-i', f_run_sql.name ], # subprocess.call( [ context['isql_path'], '-q', '-i', f_run_sql.name ],
# stdout = f_run_log, # stdout = f_run_log,
# stderr = subprocess.STDOUT # stderr = subprocess.STDOUT
# ) # )
# flush_and_close( f_run_log ) # result: output will be encoded in win1251 # flush_and_close( f_run_log ) # result: output will be encoded in win1251
# #
# with codecs.open(f_run_log.name, 'r', encoding='cp1251' ) as f: # with codecs.open(f_run_log.name, 'r', encoding='cp1251' ) as f:
# result_in_win1251 = f.readlines() # result_in_win1251 = f.readlines()
# #
# for i in result_in_win1251: # for i in result_in_win1251:
# print( i.encode('utf8') ) # print( i.encode('utf8') )
# #
# # cleanup: # # cleanup:
# ########### # ###########
# cleanup( (f_run_sql, f_run_log) ) # cleanup( (f_run_sql, f_run_log) )
# #
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """ act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stderr_1 = """
Statement failed, SQLSTATE = 42000 Statement failed, SQLSTATE = 42000
Dynamic SQL Error Dynamic SQL Error
-SQL error code = -104 -SQL error code = -104
-Name longer than database column size -Name longer than database column size
""" """
test_script = temp_file('test_script.sql')
@pytest.mark.version('>=3.0.6') @pytest.mark.version('>=3.0.6')
@pytest.mark.xfail def test_1(act_1: Action, test_script: Path):
def test_1(db_1): if act_1.is_version('<4'):
pytest.fail("Test not IMPLEMENTED") # Maximal number of characters in the column for FB 3.x is 31.
# Here we use name of 32 characters and this must raise error
# with text "Name longer than database column size":
column_title = 'СъешьЖеЕщёЭтихМягкихФранкоБулок'
else:
# Maximal number of characters in the column for FB 4.x is 63.
# Here we use name of 64 characters and this must raise error
# with text "Name longer than database column size":
column_title = 'СъешьЖеЕщёЭтихПрекрасныхФранкоБулокВместоДурацкихМорковныхКотлет'
# Code to be executed further in separate ISQL process:
test_script.write_text(f"""
set list on;
set sqlda_display on;
-- Maximal number of characters in the column for FB 3.x is 31.
-- Here we use name of 32 characters and this must raise error
-- with text "Name longer than database column size":
select 1 as "{column_title}" from rdb$database;
""", encoding='cp1251')
#
act_1.expected_stderr = expected_stderr_1
act_1.isql(switches=['-q'], input_file=test_script, charset='WIN1251')
assert act_1.clean_stderr == act_1.clean_expected_stderr

View File

@ -2,39 +2,39 @@
# #
# id: bugs.core_5808 # id: bugs.core_5808
# title: Support backup of encrypted databases # title: Support backup of encrypted databases
# decription: # decription:
# THIS TEST USES IBSurgeon Demo Encryption package # THIS TEST USES IBSurgeon Demo Encryption package
# ################################################ # ################################################
# ( https://ib-aid.com/download-demo-firebird-encryption-plugin/ ; https://ib-aid.com/download/crypt/CryptTest.zip ) # ( https://ib-aid.com/download-demo-firebird-encryption-plugin/ ; https://ib-aid.com/download/crypt/CryptTest.zip )
# License file plugins\\dbcrypt.conf with unlimited expiration was provided by IBSurgeon to Firebird Foundation (FF). # License file plugins\\dbcrypt.conf with unlimited expiration was provided by IBSurgeon to Firebird Foundation (FF).
# This file was preliminary stored in FF Test machine. # This file was preliminary stored in FF Test machine.
# Test assumes that this file and all neccessary libraries already were stored into FB_HOME and %FB_HOME%\\plugins. # Test assumes that this file and all neccessary libraries already were stored into FB_HOME and %FB_HOME%\\plugins.
# #
# After test database will be created, we try to encrypt it using 'alter database encrypt with <plugin_name> ...' command # After test database will be created, we try to encrypt it using 'alter database encrypt with <plugin_name> ...' command
# (where <plugin_name> = dbcrypt - name of .dll in FB_HOME\\plugins\\ folder that implements encryption). # (where <plugin_name> = dbcrypt - name of .dll in FB_HOME\\plugins\\ folder that implements encryption).
# Then we allow engine to complete this job - take delay about 1..2 seconds BEFORE detach from database. # Then we allow engine to complete this job - take delay about 1..2 seconds BEFORE detach from database.
# After this we make backup of encrypted database + restore. # After this we make backup of encrypted database + restore.
# #
# Then we make snapshot of firebird.log, run 'gfix -v -full' of restored database and once again take snapshot of firebird.log. # Then we make snapshot of firebird.log, run 'gfix -v -full' of restored database and once again take snapshot of firebird.log.
# Comparison of these two logs is result of validation. It should contain line about start and line with finish info. # Comparison of these two logs is result of validation. It should contain line about start and line with finish info.
# The latter must look like this: "Validation finished: 0 errors, 0 warnings, 0 fixed" # The latter must look like this: "Validation finished: 0 errors, 0 warnings, 0 fixed"
# #
# Checked on: # Checked on:
# 40sS, build 4.0.0.1487: OK, 6.552s. # 40sS, build 4.0.0.1487: OK, 6.552s.
# 40sC, build 4.0.0.1421: OK, 11.812s. # 40sC, build 4.0.0.1421: OK, 11.812s.
# 40Cs, build 4.0.0.1485: OK, 8.097s. # 40Cs, build 4.0.0.1485: OK, 8.097s.
# #
# 15.04.2021. Adapted for run both on Windows and Linux. Checked on: # 15.04.2021. Adapted for run both on Windows and Linux. Checked on:
# Windows: 4.0.0.2416 # Windows: 4.0.0.2416
# Linux: 4.0.0.2416 # Linux: 4.0.0.2416
# #
# tracker_id: CORE-5808 # tracker_id: CORE-5808
# min_versions: ['4.0'] # min_versions: ['4.0']
# versions: 4.0 # versions: 4.0
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from firebird.qa import db_factory, python_act, Action
# version: 4.0 # version: 4.0
# resources: None # resources: None
@ -47,24 +47,24 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import time # import time
# import difflib # import difflib
# import subprocess # import subprocess
# import re # import re
# import fdb # import fdb
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# db_conn.close() # db_conn.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def svc_get_fb_log( f_fb_log ): # def svc_get_fb_log( f_fb_log ):
# #
# global subprocess # global subprocess
# #
# subprocess.call( [ context['fbsvcmgr_path'], # subprocess.call( [ context['fbsvcmgr_path'],
# "localhost:service_mgr", # "localhost:service_mgr",
# "action_get_fb_log" # "action_get_fb_log"
@ -72,24 +72,24 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stdout=f_fb_log, stderr=subprocess.STDOUT # stdout=f_fb_log, stderr=subprocess.STDOUT
# ) # )
# return # return
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def flush_and_close( file_handle ): # def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync # # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f, # # If you're starting with a Python file object f,
# # first do f.flush(), and # # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os # global os
# #
# file_handle.flush() # file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"! # # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno()) # os.fsync(file_handle.fileno())
# file_handle.close() # file_handle.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def cleanup( f_names_list ): # def cleanup( f_names_list ):
# global os # global os
# for i in range(len( f_names_list )): # for i in range(len( f_names_list )):
@ -101,19 +101,19 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# print('type(f_names_list[i])=',type(f_names_list[i])) # print('type(f_names_list[i])=',type(f_names_list[i]))
# del_name = None # del_name = None
# #
# if del_name and os.path.isfile( del_name ): # if del_name and os.path.isfile( del_name ):
# os.remove( del_name ) # os.remove( del_name )
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# tmpfdb='$(DATABASE_LOCATION)'+'tmp_core_5808.fdb' # tmpfdb='$(DATABASE_LOCATION)'+'tmp_core_5808.fdb'
# tmpfbk='$(DATABASE_LOCATION)'+'tmp_core_5808.fbk' # tmpfbk='$(DATABASE_LOCATION)'+'tmp_core_5808.fbk'
# #
# f_list=( tmpfdb, tmpfbk ) # f_list=( tmpfdb, tmpfbk )
# cleanup( f_list ) # cleanup( f_list )
# #
# #
# # 14.04.2021. # # 14.04.2021.
# # Name of encryption plugin depends on OS: # # Name of encryption plugin depends on OS:
# # * for Windows we (currently) use plugin by IBSurgeon, its name is 'dbcrypt'; # # * for Windows we (currently) use plugin by IBSurgeon, its name is 'dbcrypt';
@ -122,10 +122,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # ** 'fbSampleDbCrypt' for FB 4.x+ # # ** 'fbSampleDbCrypt' for FB 4.x+
# # # #
# PLUGIN_NAME = 'dbcrypt' if os.name == 'nt' else '"fbSampleDbCrypt"' # PLUGIN_NAME = 'dbcrypt' if os.name == 'nt' else '"fbSampleDbCrypt"'
# #
# con = fdb.create_database( dsn = 'localhost:'+tmpfdb ) # con = fdb.create_database( dsn = 'localhost:'+tmpfdb )
# cur = con.cursor() # cur = con.cursor()
# #
# ############################################## # ##############################################
# # WARNING! Do NOT use 'connection_obj.execute_immediate()' for ALTER DATABASE ENCRYPT... command! # # WARNING! Do NOT use 'connection_obj.execute_immediate()' for ALTER DATABASE ENCRYPT... command!
# # There is bug in FB driver which leads this command to fail with 'token unknown' message # # There is bug in FB driver which leads this command to fail with 'token unknown' message
@ -135,114 +135,112 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # See letter from Pavel Cisar, 20.01.20 10:36 # # See letter from Pavel Cisar, 20.01.20 10:36
# ############################################## # ##############################################
# cur.execute('alter database encrypt with %(PLUGIN_NAME)s key Red' % locals()) # cur.execute('alter database encrypt with %(PLUGIN_NAME)s key Red' % locals())
# #
# con.commit() # con.commit()
# #
# time.sleep(2) # time.sleep(2)
# # ^ # # ^
# # +-------- !! ALLOW BACKGROUND ENCRYPTION PROCESS TO COMPLETE ITS JOB !! # # +-------- !! ALLOW BACKGROUND ENCRYPTION PROCESS TO COMPLETE ITS JOB !!
# #
# con.close() # con.close()
# #
# f_backup_log = open( os.path.join(context['temp_directory'],'tmp_backup_5808.log'), 'w') # f_backup_log = open( os.path.join(context['temp_directory'],'tmp_backup_5808.log'), 'w')
# f_backup_err = open( os.path.join(context['temp_directory'],'tmp_backup_5808.err'), 'w') # f_backup_err = open( os.path.join(context['temp_directory'],'tmp_backup_5808.err'), 'w')
# #
# subprocess.call( [ context['gbak_path'], "-v", "-b", 'localhost:' + tmpfdb, tmpfbk], # subprocess.call( [ context['gbak_path'], "-v", "-b", 'localhost:' + tmpfdb, tmpfbk],
# stdout = f_backup_log, # stdout = f_backup_log,
# stderr = f_backup_err # stderr = f_backup_err
# ) # )
# flush_and_close( f_backup_log ) # flush_and_close( f_backup_log )
# flush_and_close( f_backup_err ) # flush_and_close( f_backup_err )
# #
# #
# f_restore_log = open( os.path.join(context['temp_directory'],'tmp_restore_5808.log'), 'w') # f_restore_log = open( os.path.join(context['temp_directory'],'tmp_restore_5808.log'), 'w')
# f_restore_err = open( os.path.join(context['temp_directory'],'tmp_restore_5808.err'), 'w') # f_restore_err = open( os.path.join(context['temp_directory'],'tmp_restore_5808.err'), 'w')
# #
# subprocess.call( [ context['gbak_path'], "-v", "-rep", tmpfbk, 'localhost:'+tmpfdb], # subprocess.call( [ context['gbak_path'], "-v", "-rep", tmpfbk, 'localhost:'+tmpfdb],
# stdout = f_restore_log, # stdout = f_restore_log,
# stderr = f_restore_err # stderr = f_restore_err
# ) # )
# flush_and_close( f_restore_log ) # flush_and_close( f_restore_log )
# flush_and_close( f_restore_err ) # flush_and_close( f_restore_err )
# #
# f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_5808_fblog_before.txt'), 'w') # f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_5808_fblog_before.txt'), 'w')
# svc_get_fb_log( f_fblog_before ) # svc_get_fb_log( f_fblog_before )
# flush_and_close( f_fblog_before ) # flush_and_close( f_fblog_before )
# #
# #
# f_validate_log = open( os.path.join(context['temp_directory'],'tmp_validate_5808.log'), 'w') # f_validate_log = open( os.path.join(context['temp_directory'],'tmp_validate_5808.log'), 'w')
# f_validate_err = open( os.path.join(context['temp_directory'],'tmp_validate_5808.err'), 'w') # f_validate_err = open( os.path.join(context['temp_directory'],'tmp_validate_5808.err'), 'w')
# #
# subprocess.call( [ context['gfix_path'], "-v", "-full", tmpfdb ], # subprocess.call( [ context['gfix_path'], "-v", "-full", tmpfdb ],
# stdout = f_validate_log, # stdout = f_validate_log,
# stderr = f_validate_err # stderr = f_validate_err
# ) # )
# flush_and_close( f_validate_log ) # flush_and_close( f_validate_log )
# flush_and_close( f_validate_err ) # flush_and_close( f_validate_err )
# #
# f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_5808_fblog_after.txt'), 'w') # f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_5808_fblog_after.txt'), 'w')
# svc_get_fb_log( f_fblog_after ) # svc_get_fb_log( f_fblog_after )
# flush_and_close( f_fblog_after ) # flush_and_close( f_fblog_after )
# #
# #
# # Compare firebird.log versions BEFORE and AFTER this test: # # Compare firebird.log versions BEFORE and AFTER this test:
# ###################### # ######################
# #
# oldfb=open(f_fblog_before.name, 'r') # oldfb=open(f_fblog_before.name, 'r')
# newfb=open(f_fblog_after.name, 'r') # newfb=open(f_fblog_after.name, 'r')
# #
# difftext = ''.join(difflib.unified_diff( # difftext = ''.join(difflib.unified_diff(
# oldfb.readlines(), # oldfb.readlines(),
# newfb.readlines() # newfb.readlines()
# )) # ))
# oldfb.close() # oldfb.close()
# newfb.close() # newfb.close()
# #
# #
# with open( f_backup_err.name,'r') as f: # with open( f_backup_err.name,'r') as f:
# for line in f: # for line in f:
# print("UNEXPECTED PROBLEM ON BACKUP, STDERR: "+line) # print("UNEXPECTED PROBLEM ON BACKUP, STDERR: "+line)
# #
# with open( f_restore_err.name,'r') as f: # with open( f_restore_err.name,'r') as f:
# for line in f: # for line in f:
# print("UNEXPECTED PROBLEM ON RESTORE, STDERR: "+line) # print("UNEXPECTED PROBLEM ON RESTORE, STDERR: "+line)
# #
# #
# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_5808_diff.txt'), 'w') # f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_5808_diff.txt'), 'w')
# f_diff_txt.write(difftext) # f_diff_txt.write(difftext)
# flush_and_close( f_diff_txt ) # flush_and_close( f_diff_txt )
# #
# allowed_patterns = ( # allowed_patterns = (
# re.compile( '\\+\\s+Validation\\s+started', re.IGNORECASE) # re.compile( '\\+\\s+Validation\\s+started', re.IGNORECASE)
# ,re.compile( '\\+\\s+Validation\\s+finished:\\s+0\\s+errors,\\s+0\\s+warnings,\\s+0\\s+fixed', re.IGNORECASE) # ,re.compile( '\\+\\s+Validation\\s+finished:\\s+0\\s+errors,\\s+0\\s+warnings,\\s+0\\s+fixed', re.IGNORECASE)
# ) # )
# #
# #
# with open( f_diff_txt.name,'r') as f: # with open( f_diff_txt.name,'r') as f:
# for line in f: # for line in f:
# match2some = filter( None, [ p.search(line) for p in allowed_patterns ] ) # match2some = filter( None, [ p.search(line) for p in allowed_patterns ] )
# if match2some: # if match2some:
# print( (' '.join( line.split()).upper() ) ) # print( (' '.join( line.split()).upper() ) )
# #
# # CLEANUP: # # CLEANUP:
# ########## # ##########
# # do NOT remove this pause otherwise some of logs will not be enable for deletion and test will finish with # # do NOT remove this pause otherwise some of logs will not be enable for deletion and test will finish with
# # Exception raised while executing Python test script. exception: WindowsError: 32 # # Exception raised while executing Python test script. exception: WindowsError: 32
# time.sleep(1) # time.sleep(1)
# cleanup( (f_backup_log, f_backup_err, f_restore_log, f_restore_err, f_validate_log, f_validate_err, f_fblog_before, f_fblog_after, f_diff_txt, tmpfdb, tmpfbk) ) # cleanup( (f_backup_log, f_backup_err, f_restore_log, f_restore_err, f_validate_log, f_validate_err, f_fblog_before, f_fblog_after, f_diff_txt, tmpfdb, tmpfbk) )
# #
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
+ VALIDATION STARTED + VALIDATION STARTED
+ VALIDATION FINISHED: 0 ERRORS, 0 WARNINGS, 0 FIXED + VALIDATION FINISHED: 0 ERRORS, 0 WARNINGS, 0 FIXED
""" """
@pytest.mark.version('>=4.0') @pytest.mark.version('>=4.0')
@pytest.mark.xfail def test_1(act_1: Action):
def test_1(db_1): pytest.skip("Requires encryption plugin")
pytest.fail("Test not IMPLEMENTED")

View File

@ -2,17 +2,17 @@
# #
# id: bugs.core_5831 # id: bugs.core_5831
# title: Not user friendly output of gstat at encrypted database # title: Not user friendly output of gstat at encrypted database
# decription: # decription:
# We create new database ('tmp_core_5831.fdb') and try to encrypt it usng IBSurgeon Demo Encryption package # We create new database ('tmp_core_5831.fdb') and try to encrypt it usng IBSurgeon Demo Encryption package
# ( https://ib-aid.com/download-demo-firebird-encryption-plugin/ ; https://ib-aid.com/download/crypt/CryptTest.zip ) # ( https://ib-aid.com/download-demo-firebird-encryption-plugin/ ; https://ib-aid.com/download/crypt/CryptTest.zip )
# License file plugins\\dbcrypt.conf with unlimited expiration was provided by IBSurgeon to Firebird Foundation (FF). # License file plugins\\dbcrypt.conf with unlimited expiration was provided by IBSurgeon to Firebird Foundation (FF).
# This file was preliminary stored in FF Test machine. # This file was preliminary stored in FF Test machine.
# Test assumes that this file and all neccessary libraries already were stored into FB_HOME and %FB_HOME%\\plugins. # Test assumes that this file and all neccessary libraries already were stored into FB_HOME and %FB_HOME%\\plugins.
# #
# After test database will be created, we try to encrypt it using 'alter database encrypt with <plugin_name> ...' command # After test database will be created, we try to encrypt it using 'alter database encrypt with <plugin_name> ...' command
# (where <plugin_name> = dbcrypt - name of .dll in FB_HOME\\plugins\\ folder that implements encryption). # (where <plugin_name> = dbcrypt - name of .dll in FB_HOME\\plugins\\ folder that implements encryption).
# Then we allow engine to complete this job - take delay about 1..2 seconds BEFORE detach from database. # Then we allow engine to complete this job - take delay about 1..2 seconds BEFORE detach from database.
# #
# After this we detach from DB, run 'gstat -h' and filter its attributes and messages from 'Variable header' section. # After this we detach from DB, run 'gstat -h' and filter its attributes and messages from 'Variable header' section.
# In the output of gstat we check that its 'tail' will look like this: # In the output of gstat we check that its 'tail' will look like this:
# === # ===
@ -22,31 +22,32 @@
# Encryption key name: RED # Encryption key name: RED
# === # ===
# (concrete values for checksum and hash will be ignored - see 'substitutions' section). # (concrete values for checksum and hash will be ignored - see 'substitutions' section).
# #
# Finally, we change this temp DB statee to full shutdown in order to have 100% ability to drop this file. # Finally, we change this temp DB statee to full shutdown in order to have 100% ability to drop this file.
# #
# Checked on: # Checked on:
# 40sS, build 4.0.0.1487: OK, 3.347s. # 40sS, build 4.0.0.1487: OK, 3.347s.
# 40Cs, build 4.0.0.1487: OK, 3.506s. # 40Cs, build 4.0.0.1487: OK, 3.506s.
# 30sS, build 3.0.5.33120: OK, 2.697s. # 30sS, build 3.0.5.33120: OK, 2.697s.
# 30Cs, build 3.0.5.33120: OK, 3.054s. # 30Cs, build 3.0.5.33120: OK, 3.054s.
# #
# 15.04.2021. Adapted for run both on Windows and Linux. Checked on: # 15.04.2021. Adapted for run both on Windows and Linux. Checked on:
# Windows: 4.0.0.2416 # Windows: 4.0.0.2416
# Linux: 4.0.0.2416 # Linux: 4.0.0.2416
# #
# tracker_id: CORE-5831 # tracker_id: CORE-5831
# min_versions: ['3.0.4'] # min_versions: ['3.0.4']
# versions: 3.0.4 # versions: 3.0.4
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from firebird.qa import db_factory, python_act, Action
# version: 3.0.4 # version: 3.0.4
# resources: None # resources: None
substitutions_1 = [('ATTRIBUTES FORCE WRITE, ENCRYPTED, PLUGIN.*', 'ATTRIBUTES FORCE WRITE, ENCRYPTED'), ('CRYPT CHECKSUM.*', 'CRYPT CHECKSUM'), ('KEY HASH.*', 'KEY HASH')] substitutions_1 = [('ATTRIBUTES FORCE WRITE, ENCRYPTED, PLUGIN.*', 'ATTRIBUTES FORCE WRITE, ENCRYPTED'),
('CRYPT CHECKSUM.*', 'CRYPT CHECKSUM'), ('KEY HASH.*', 'KEY HASH')]
init_script_1 = """""" init_script_1 = """"""
@ -54,35 +55,35 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import time # import time
# import subprocess # import subprocess
# import re # import re
# import fdb # import fdb
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# engine = db_conn.engine_version # engine = db_conn.engine_version
# db_conn.close() # db_conn.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def flush_and_close( file_handle ): # def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync # # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f, # # If you're starting with a Python file object f,
# # first do f.flush(), and # # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os # global os
# #
# file_handle.flush() # file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"! # # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno()) # os.fsync(file_handle.fileno())
# file_handle.close() # file_handle.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def cleanup( f_names_list ): # def cleanup( f_names_list ):
# global os # global os
# for i in range(len( f_names_list )): # for i in range(len( f_names_list )):
@ -94,20 +95,20 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# print('type(f_names_list[i])=',type(f_names_list[i])) # print('type(f_names_list[i])=',type(f_names_list[i]))
# del_name = None # del_name = None
# #
# if del_name and os.path.isfile( del_name ): # if del_name and os.path.isfile( del_name ):
# os.remove( del_name ) # os.remove( del_name )
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# #
# tmpfdb='$(DATABASE_LOCATION)'+'tmp_core_5831.fdb' # tmpfdb='$(DATABASE_LOCATION)'+'tmp_core_5831.fdb'
# #
# cleanup( (tmpfdb,) ) # cleanup( (tmpfdb,) )
# #
# con = fdb.create_database( dsn = 'localhost:'+tmpfdb ) # con = fdb.create_database( dsn = 'localhost:'+tmpfdb )
# cur = con.cursor() # cur = con.cursor()
# #
# # 14.04.2021. # # 14.04.2021.
# # Name of encryption plugin depends on OS: # # Name of encryption plugin depends on OS:
# # * for Windows we (currently) use plugin by IBSurgeon, its name is 'dbcrypt'; # # * for Windows we (currently) use plugin by IBSurgeon, its name is 'dbcrypt';
@ -116,7 +117,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # ** 'fbSampleDbCrypt' for FB 4.x+ # # ** 'fbSampleDbCrypt' for FB 4.x+
# # # #
# PLUGIN_NAME = 'dbcrypt' if os.name == 'nt' else ( '"fbSampleDbCrypt"' if engine >= 4.0 else '"DbCrypt_example"') # PLUGIN_NAME = 'dbcrypt' if os.name == 'nt' else ( '"fbSampleDbCrypt"' if engine >= 4.0 else '"DbCrypt_example"')
# #
# ############################################## # ##############################################
# # WARNING! Do NOT use 'connection_obj.execute_immediate()' for ALTER DATABASE ENCRYPT... command! # # WARNING! Do NOT use 'connection_obj.execute_immediate()' for ALTER DATABASE ENCRYPT... command!
# # There is bug in FB driver which leads this command to fail with 'token unknown' message # # There is bug in FB driver which leads this command to fail with 'token unknown' message
@ -126,68 +127,68 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # See letter from Pavel Cisar, 20.01.20 10:36 # # See letter from Pavel Cisar, 20.01.20 10:36
# ############################################## # ##############################################
# cur.execute('alter database encrypt with %(PLUGIN_NAME)s key Red' % locals()) # cur.execute('alter database encrypt with %(PLUGIN_NAME)s key Red' % locals())
# #
# con.commit() # con.commit()
# #
# time.sleep(2) # time.sleep(2)
# # ^ # # ^
# # +-------- !! ALLOW BACKGROUND ENCRYPTION PROCESS TO COMPLETE ITS JOB !! # # +-------- !! ALLOW BACKGROUND ENCRYPTION PROCESS TO COMPLETE ITS JOB !!
# # DO NOT set this delay less than 2 seconds otherwise "crypt process" will be in the output! # # DO NOT set this delay less than 2 seconds otherwise "crypt process" will be in the output!
# #
# con.close() # con.close()
# #
# f_gstat_log = open( os.path.join(context['temp_directory'],'tmp_shut_5831.log'), 'w') # f_gstat_log = open( os.path.join(context['temp_directory'],'tmp_shut_5831.log'), 'w')
# f_gstat_err = open( os.path.join(context['temp_directory'],'tmp_shut_5831.err'), 'w') # f_gstat_err = open( os.path.join(context['temp_directory'],'tmp_shut_5831.err'), 'w')
# #
# subprocess.call( [ context['gstat_path'], "-h", tmpfdb], # subprocess.call( [ context['gstat_path'], "-h", tmpfdb],
# stdout = f_gstat_log, # stdout = f_gstat_log,
# stderr = f_gstat_err # stderr = f_gstat_err
# ) # )
# #
# subprocess.call( [ context['gfix_path'], 'localhost:'+tmpfdb, "-shut", "full", "-force", "0" ], # subprocess.call( [ context['gfix_path'], 'localhost:'+tmpfdb, "-shut", "full", "-force", "0" ],
# stdout = f_gstat_log, # stdout = f_gstat_log,
# stderr = f_gstat_err # stderr = f_gstat_err
# ) # )
# #
# flush_and_close( f_gstat_log ) # flush_and_close( f_gstat_log )
# flush_and_close( f_gstat_err ) # flush_and_close( f_gstat_err )
# #
# allowed_patterns = ( # allowed_patterns = (
# re.compile( '\\s*Attributes\\.*', re.IGNORECASE) # re.compile( '\\s*Attributes\\.*', re.IGNORECASE)
# ,re.compile('crypt\\s+checksum:\\s+\\S+', re.IGNORECASE) # ,re.compile('crypt\\s+checksum:\\s+\\S+', re.IGNORECASE)
# ,re.compile('key\\s+hash:\\s+\\S+', re.IGNORECASE) # ,re.compile('key\\s+hash:\\s+\\S+', re.IGNORECASE)
# ,re.compile('encryption\\s+key\\s+name:\\s+\\S+', re.IGNORECASE) # ,re.compile('encryption\\s+key\\s+name:\\s+\\S+', re.IGNORECASE)
# ) # )
# #
# with open( f_gstat_log.name,'r') as f: # with open( f_gstat_log.name,'r') as f:
# for line in f: # for line in f:
# match2some = filter( None, [ p.search(line) for p in allowed_patterns ] ) # match2some = filter( None, [ p.search(line) for p in allowed_patterns ] )
# if match2some: # if match2some:
# print( (' '.join( line.split()).upper() ) ) # print( (' '.join( line.split()).upper() ) )
# #
# with open( f_gstat_err.name,'r') as f: # with open( f_gstat_err.name,'r') as f:
# for line in f: # for line in f:
# print("Unexpected STDERR: "+line) # print("Unexpected STDERR: "+line)
# #
# #
# # cleanuo: # # cleanuo:
# time.sleep(1) # time.sleep(1)
# cleanup( (f_gstat_log, f_gstat_err, tmpfdb) ) # cleanup( (f_gstat_log, f_gstat_err, tmpfdb) )
# #
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
ATTRIBUTES FORCE WRITE, ENCRYPTED, PLUGIN DBCRYPT ATTRIBUTES FORCE WRITE, ENCRYPTED, PLUGIN DBCRYPT
CRYPT CHECKSUM: MUB2NTJQCHH9RSHMP6XFAIIC2II= CRYPT CHECKSUM: MUB2NTJQCHH9RSHMP6XFAIIC2II=
KEY HASH: ASK88TFWBINVC6B1JVS9MFUH47C= KEY HASH: ASK88TFWBINVC6B1JVS9MFUH47C=
ENCRYPTION KEY NAME: RED ENCRYPTION KEY NAME: RED
""" """
@pytest.mark.version('>=3.0.4') @pytest.mark.version('>=3.0.4')
@pytest.mark.xfail def test_1(act_1: Action):
def test_1(db_1): pytest.skip("Requires encryption plugin")
pytest.fail("Test not IMPLEMENTED")

View File

@ -1,27 +1,29 @@
#coding:utf-8 #coding:utf-8
# #
# id: bugs.core_5833 # id: bugs.core_5833
# title: Server crashes on preparing empty query when trace is enabled # title: DDL triggers for some object types (views, exceptions, roles, indexes, domains) are lost in backup-restore process
# decription: # decription:
# We create DDL triggers for all cases that are enumerated in $FB_HOME/doc/sql.extensions/README.ddl_triggers.txt. # We create DDL triggers for all cases that are enumerated in $FB_HOME/doc/sql.extensions/README.ddl_triggers.txt.
# Then query to RDB$TRIGGERS table is applied to database and its results are stored in <log_file_1>. # Then query to RDB$TRIGGERS table is applied to database and its results are stored in <log_file_1>.
# After this we do backup and restore to new file, again apply query to RDB$TRIGGERS and store results to <log_file_2>. # After this we do backup and restore to new file, again apply query to RDB$TRIGGERS and store results to <log_file_2>.
# Finally we compare <log_file_1> and <log_file_2> but exclude from comparison lines which starts with to 'BLOB_ID' # Finally we compare <log_file_1> and <log_file_2> but exclude from comparison lines which starts with to 'BLOB_ID'
# (these are "prefixes" for RDB$TRIGGER_BLR and RDB$TRIGGER_SOURCE). # (these are "prefixes" for RDB$TRIGGER_BLR and RDB$TRIGGER_SOURCE).
# Difference should be empty. # Difference should be empty.
# #
# Confirmed bug on WI-T4.0.0.977 and WI-V3.0.4.32972. # Confirmed bug on WI-T4.0.0.977 and WI-V3.0.4.32972.
# Works fine on: # Works fine on:
# 30SS, build 3.0.4.32980: OK, 4.656s. # 30SS, build 3.0.4.32980: OK, 4.656s.
# 40SS, build 4.0.0.993: OK, 6.531s. # 40SS, build 4.0.0.993: OK, 6.531s.
# #
# tracker_id: CORE-5833 # tracker_id: CORE-5833
# min_versions: ['3.0'] # min_versions: ['3.0']
# versions: 3.0 # versions: 3.0
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from difflib import unified_diff
from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
# version: 3.0 # version: 3.0
# resources: None # resources: None
@ -34,34 +36,34 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import re # import re
# import subprocess # import subprocess
# import time # import time
# import difflib # import difflib
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# db_conn.close() # db_conn.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def flush_and_close( file_handle ): # def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync # # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f, # # If you're starting with a Python file object f,
# # first do f.flush(), and # # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os # global os
# #
# file_handle.flush() # file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"! # # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno()) # os.fsync(file_handle.fileno())
# file_handle.close() # file_handle.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def cleanup( f_names_list ): # def cleanup( f_names_list ):
# global os # global os
# for i in range(len( f_names_list )): # for i in range(len( f_names_list )):
@ -72,12 +74,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# else: # else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None # del_name = None
# #
# if del_name and os.path.isfile( del_name ): # if del_name and os.path.isfile( del_name ):
# os.remove( del_name ) # os.remove( del_name )
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# ddl_list = ''.join( # ddl_list = ''.join(
# ( # (
# 'CREATE TABLE,ALTER TABLE,DROP TABLE,CREATE PROCEDURE,ALTER PROCEDURE,DROP PROCEDURE' # 'CREATE TABLE,ALTER TABLE,DROP TABLE,CREATE PROCEDURE,ALTER PROCEDURE,DROP PROCEDURE'
@ -96,14 +98,14 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# #ddl_list += ',CREATE SEQUENCE,ALTER SEQUENCE,DROP SEQUENCE,CREATE USER,ALTER USER,DROP USER' # #ddl_list += ',CREATE SEQUENCE,ALTER SEQUENCE,DROP SEQUENCE,CREATE USER,ALTER USER,DROP USER'
# #ddl_list += ',CREATE INDEX,ALTER INDEX,DROP INDEX,CREATE COLLATION,DROP COLLATION,ALTER CHARACTER SET' # #ddl_list += ',CREATE INDEX,ALTER INDEX,DROP INDEX,CREATE COLLATION,DROP COLLATION,ALTER CHARACTER SET'
# #ddl_list += ',CREATE PACKAGE,ALTER PACKAGE,DROP PACKAGE,CREATE PACKAGE BODY,DROP PACKAGE BODY' # #ddl_list += ',CREATE PACKAGE,ALTER PACKAGE,DROP PACKAGE,CREATE PACKAGE BODY,DROP PACKAGE BODY'
# #
# #
# # Initial DDL: create all triggers # # Initial DDL: create all triggers
# ################################## # ##################################
# f_ddl_sql=open( os.path.join(context['temp_directory'],'tmp_ddl_triggers_5833.sql'), 'w') # f_ddl_sql=open( os.path.join(context['temp_directory'],'tmp_ddl_triggers_5833.sql'), 'w')
# f_ddl_sql.write('set bail on;\\n') # f_ddl_sql.write('set bail on;\\n')
# f_ddl_sql.write('set term ^;\\n') # f_ddl_sql.write('set term ^;\\n')
# #
# for i in ddl_list.split(','): # for i in ddl_list.split(','):
# for k in (1,2): # for k in (1,2):
# evt_time='before' if k==1 else 'after' # evt_time='before' if k==1 else 'after'
@ -113,14 +115,14 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# f_ddl_sql.write( " c = rdb$get_context('DDL_TRIGGER', 'OBJECT_NAME');\\n" ) # f_ddl_sql.write( " c = rdb$get_context('DDL_TRIGGER', 'OBJECT_NAME');\\n" )
# f_ddl_sql.write( 'end\\n^' ) # f_ddl_sql.write( 'end\\n^' )
# f_ddl_sql.write( '\\n' ) # f_ddl_sql.write( '\\n' )
# #
# #
# f_ddl_sql.write('set term ;^\\n') # f_ddl_sql.write('set term ;^\\n')
# f_ddl_sql.write('commit;\\n') # f_ddl_sql.write('commit;\\n')
# flush_and_close( f_ddl_sql ) # flush_and_close( f_ddl_sql )
# #
# runProgram('isql', [dsn, '-i', f_ddl_sql.name] ) # runProgram('isql', [dsn, '-i', f_ddl_sql.name] )
# #
# # Prepare check query: # # Prepare check query:
# ###################### # ######################
# sql_text=''' set blob all; # sql_text=''' set blob all;
@ -131,54 +133,54 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# where rdb$system_flag is distinct from 1 # where rdb$system_flag is distinct from 1
# order by 1; # order by 1;
# ''' # '''
# #
# f_chk_sql=open( os.path.join(context['temp_directory'],'tmp_check_trg_5833.sql'), 'w') # f_chk_sql=open( os.path.join(context['temp_directory'],'tmp_check_trg_5833.sql'), 'w')
# f_chk_sql.write( sql_text ) # f_chk_sql.write( sql_text )
# flush_and_close( f_chk_sql ) # flush_and_close( f_chk_sql )
# #
# # Query RDB$TRIGGERS before b/r: # # Query RDB$TRIGGERS before b/r:
# ################################ # ################################
# #
# f_xmeta1_log = open( os.path.join(context['temp_directory'],'tmp_xmeta1_5833.log'), 'w') # f_xmeta1_log = open( os.path.join(context['temp_directory'],'tmp_xmeta1_5833.log'), 'w')
# f_xmeta1_err = open( os.path.join(context['temp_directory'],'tmp_xmeta1_5833.err'), 'w') # f_xmeta1_err = open( os.path.join(context['temp_directory'],'tmp_xmeta1_5833.err'), 'w')
# #
# # Add to log result of query to rdb$triggers table: # # Add to log result of query to rdb$triggers table:
# subprocess.call( [context['isql_path'], dsn, "-i", f_chk_sql.name], # subprocess.call( [context['isql_path'], dsn, "-i", f_chk_sql.name],
# stdout = f_xmeta1_log, # stdout = f_xmeta1_log,
# stderr = f_xmeta1_err # stderr = f_xmeta1_err
# ) # )
# #
# flush_and_close( f_xmeta1_log ) # flush_and_close( f_xmeta1_log )
# flush_and_close( f_xmeta1_err ) # flush_and_close( f_xmeta1_err )
# #
# # Do backup and restore into temp file: # # Do backup and restore into temp file:
# ####################################### # #######################################
# tmp_bkup=os.path.join(context['temp_directory'],'tmp_backup_5833.fbk') # tmp_bkup=os.path.join(context['temp_directory'],'tmp_backup_5833.fbk')
# tmp_rest=os.path.join(context['temp_directory'],'tmp_restored_5833.fdb') # tmp_rest=os.path.join(context['temp_directory'],'tmp_restored_5833.fdb')
# if os.path.isfile(tmp_rest): # if os.path.isfile(tmp_rest):
# os.remove(tmp_rest) # os.remove(tmp_rest)
# #
# runProgram('gbak', ['-b', dsn, tmp_bkup ] ) # runProgram('gbak', ['-b', dsn, tmp_bkup ] )
# runProgram('gbak', ['-c', tmp_bkup, 'localhost:'+tmp_rest ] ) # runProgram('gbak', ['-c', tmp_bkup, 'localhost:'+tmp_rest ] )
# #
# #
# # Query RDB$TRIGGERS after b/r: # # Query RDB$TRIGGERS after b/r:
# ############################### # ###############################
# #
# f_xmeta2_log = open( os.path.join(context['temp_directory'],'tmp_xmeta2_5833.log'), 'w') # f_xmeta2_log = open( os.path.join(context['temp_directory'],'tmp_xmeta2_5833.log'), 'w')
# f_xmeta2_err = open( os.path.join(context['temp_directory'],'tmp_xmeta2_5833.err'), 'w') # f_xmeta2_err = open( os.path.join(context['temp_directory'],'tmp_xmeta2_5833.err'), 'w')
# #
# subprocess.call( [context['isql_path'], 'localhost:'+tmp_rest, "-i", f_chk_sql.name], # subprocess.call( [context['isql_path'], 'localhost:'+tmp_rest, "-i", f_chk_sql.name],
# stdout = f_xmeta2_log, # stdout = f_xmeta2_log,
# stderr = f_xmeta2_err # stderr = f_xmeta2_err
# ) # )
# #
# flush_and_close( f_xmeta2_log ) # flush_and_close( f_xmeta2_log )
# flush_and_close( f_xmeta2_err ) # flush_and_close( f_xmeta2_err )
# #
# # Every STDERR log should be EMPTY: # # Every STDERR log should be EMPTY:
# ################################### # ###################################
# #
# f_list = ( f_xmeta1_err, f_xmeta2_err ) # f_list = ( f_xmeta1_err, f_xmeta2_err )
# for i in range(len(f_list)): # for i in range(len(f_list)):
# f_name=f_list[i].name # f_name=f_list[i].name
@ -186,44 +188,96 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# with open( f_name,'r') as f: # with open( f_name,'r') as f:
# for line in f: # for line in f:
# print("Unexpected STDERR, file "+f_name+": "+line) # print("Unexpected STDERR, file "+f_name+": "+line)
# #
# #
# # DIFFERENCE between f_xmeta1_log and f_xmeta2_log should be EMPTY: # # DIFFERENCE between f_xmeta1_log and f_xmeta2_log should be EMPTY:
# #################### # ####################
# #
# old_rdb_triggers_data=open(f_xmeta1_log.name, 'r') # old_rdb_triggers_data=open(f_xmeta1_log.name, 'r')
# new_rdb_triggers_data=open(f_xmeta2_log.name, 'r') # new_rdb_triggers_data=open(f_xmeta2_log.name, 'r')
# #
# # NB: we should EXCLUDE from comparison lines which about to BLOB IDs for records: # # NB: we should EXCLUDE from comparison lines which about to BLOB IDs for records:
# # ~~~~~~~~~~~~~~~~~~~~~ # # ~~~~~~~~~~~~~~~~~~~~~
# #
# difftext = ''.join(difflib.unified_diff( # difftext = ''.join(difflib.unified_diff(
# [ line for line in old_rdb_triggers_data.readlines() if not line.startswith('BLOB_ID_FOR_TRG') ], # [ line for line in old_rdb_triggers_data.readlines() if not line.startswith('BLOB_ID_FOR_TRG') ],
# [ line for line in new_rdb_triggers_data.readlines() if not line.startswith('BLOB_ID_FOR_TRG') ] # [ line for line in new_rdb_triggers_data.readlines() if not line.startswith('BLOB_ID_FOR_TRG') ]
# )) # ))
# old_rdb_triggers_data.close() # old_rdb_triggers_data.close()
# new_rdb_triggers_data.close() # new_rdb_triggers_data.close()
# #
# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_5833_metadata_diff.txt'), 'w') # f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_5833_metadata_diff.txt'), 'w')
# f_diff_txt.write(difftext) # f_diff_txt.write(difftext)
# flush_and_close( f_diff_txt ) # flush_and_close( f_diff_txt )
# #
# with open( f_diff_txt.name,'r') as f: # with open( f_diff_txt.name,'r') as f:
# for line in f: # for line in f:
# print("Unexpected DIFF in metadata: "+line) # print("Unexpected DIFF in metadata: "+line)
# #
# # CLEANUP # # CLEANUP
# ######### # #########
# time.sleep(1) # time.sleep(1)
# cleanup( (f_ddl_sql, f_chk_sql, f_xmeta1_log, f_xmeta1_err, f_xmeta2_log, f_xmeta2_err, f_diff_txt, tmp_bkup, tmp_rest) ) # cleanup( (f_ddl_sql, f_chk_sql, f_xmeta1_log, f_xmeta1_err, f_xmeta2_log, f_xmeta2_err, f_diff_txt, tmp_bkup, tmp_rest) )
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
ddl_list = ['CREATE TABLE', 'ALTER TABLE', 'DROP TABLE',
'CREATE PROCEDURE', 'ALTER PROCEDURE', 'DROP PROCEDURE',
'CREATE FUNCTION', 'ALTER FUNCTION', 'DROP FUNCTION',
'CREATE TRIGGER', 'ALTER TRIGGER', 'DROP TRIGGER',
'CREATE EXCEPTION', 'ALTER EXCEPTION', 'DROP EXCEPTION',
'CREATE VIEW', 'ALTER VIEW', 'DROP VIEW',
'CREATE DOMAIN', 'ALTER DOMAIN', 'DROP DOMAIN',
'CREATE ROLE', 'ALTER ROLE', 'DROP ROLE',
'CREATE SEQUENCE', 'ALTER SEQUENCE', 'DROP SEQUENCE',
'CREATE USER', 'ALTER USER', 'DROP USER',
'CREATE INDEX', 'ALTER INDEX', 'DROP INDEX',
'CREATE COLLATION', 'DROP COLLATION', 'ALTER CHARACTER SET',
'CREATE PACKAGE', 'ALTER PACKAGE', 'DROP PACKAGE',
'CREATE PACKAGE BODY', 'DROP PACKAGE BODY']
test_script_1 = """
set blob all;
set list on;
set count on;
select rdb$trigger_name, rdb$trigger_type, rdb$trigger_source as blob_id_for_trg_source, rdb$trigger_blr as blob_id_for_trg_blr
from rdb$triggers
where rdb$system_flag is distinct from 1
order by 1;
"""
fbk_file = temp_file('tmp_5833.fbk')
fdb_file = temp_file('tmp_5833.fdb')
@pytest.mark.version('>=3.0') @pytest.mark.version('>=3.0')
@pytest.mark.xfail def test_1(act_1: Action, fbk_file: Path, fdb_file: Path):
def test_1(db_1): script = ['set bail on;', 'set term ^;']
pytest.fail("Test not IMPLEMENTED") # Initial DDL: create all triggers
for item in ddl_list:
for evt_time in ['before', 'after']:
script.append(f"recreate trigger trg_{evt_time}_{item.replace(' ', '_').lower()} active {evt_time} {item.lower()} as")
script.append(" declare c rdb$field_name;")
script.append("begin")
script.append(" c = rdb$get_context('DDL_TRIGGER', 'OBJECT_NAME');")
script.append("end ^")
script.append("")
script.append("set term ;^")
script.append("commit;")
act_1.isql(switches=[], input='\n'.join(script))
# Query RDB$TRIGGERS before b/r:
act_1.reset()
act_1.isql(switches=[], input=test_script_1)
meta_before = [line for line in act_1.stdout.splitlines() if not line.startswith('BLOB_ID_FOR_TRG')]
# B/S
act_1.reset()
act_1.gbak(switches=['-b', act_1.db.dsn, str(fbk_file)])
act_1.reset()
act_1.gbak(switches=['-c', str(fbk_file), f'localhost:{fdb_file}'])
# Query RDB$TRIGGERS after b/r:
act_1.reset()
act_1.isql(switches=[f'localhost:{fdb_file}'], input=test_script_1, connect_db=False)
meta_after = [line for line in act_1.stdout.splitlines() if not line.startswith('BLOB_ID_FOR_TRG')]
# Check
assert list(unified_diff(meta_before, meta_after)) == []

View File

@ -2,19 +2,19 @@
# #
# id: bugs.core_5837 # id: bugs.core_5837
# title: Inconsistent results when working with GLOBAL TEMPORARY TABLE ON COMMIT PRESERVE ROWS # title: Inconsistent results when working with GLOBAL TEMPORARY TABLE ON COMMIT PRESERVE ROWS
# decription: # decription:
# Samples were provided by Vlad, privately. # Samples were provided by Vlad, privately.
# Confirmed bug on 3.0.4.32972, 4.0.0.955; SUPERSERVER only (see also note in the ticket) # Confirmed bug on 3.0.4.32972, 4.0.0.955; SUPERSERVER only (see also note in the ticket)
# Works fine on: # Works fine on:
# 3.0.4.32985, 4.0.0.1000 # 3.0.4.32985, 4.0.0.1000
# #
# tracker_id: CORE-5837 # tracker_id: CORE-5837
# min_versions: ['3.0.3'] # min_versions: ['3.0.3']
# versions: 3.0.3 # versions: 3.0.3
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from firebird.qa import db_factory, python_act, Action
# version: 3.0.3 # version: 3.0.3
# resources: None # resources: None
@ -24,69 +24,87 @@ substitutions_1 = []
init_script_1 = """ init_script_1 = """
recreate global temporary table gtt(id int) on commit preserve rows; recreate global temporary table gtt(id int) on commit preserve rows;
commit; commit;
""" """
db_1 = db_factory(sql_dialect=3, init=init_script_1) db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import sys # import sys
# import subprocess # import subprocess
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# #
# db_conn.close() # db_conn.close()
# #
# con1=fdb.connect( dsn = dsn ) # con1=fdb.connect( dsn = dsn )
# con2=fdb.connect( dsn = dsn ) # con2=fdb.connect( dsn = dsn )
# #
# cur2=con2.cursor() # cur2=con2.cursor()
# #
# # Following 'select count' is MANDATORY for reproduce: # # Following 'select count' is MANDATORY for reproduce:
# ####################################### # #######################################
# cur2.execute('select count(*) from gtt'); # cur2.execute('select count(*) from gtt');
# for r in cur2: # for r in cur2:
# pass # pass
# #
# cur1=con1.cursor() # cur1=con1.cursor()
# cur1.execute('insert into gtt(id) values( ? )', (1,) ) # cur1.execute('insert into gtt(id) values( ? )', (1,) )
# cur1.execute('insert into gtt(id) values( ? )', (1,) ) # cur1.execute('insert into gtt(id) values( ? )', (1,) )
# #
# cur2.execute('insert into gtt(id) values( ? )', (2,) ) # cur2.execute('insert into gtt(id) values( ? )', (2,) )
# #
# con1.rollback() # con1.rollback()
# #
# #
# cur2.execute('insert into gtt(id) select 2 from rdb$types rows 200', (2,) ) # cur2.execute('insert into gtt(id) select 2 from rdb$types rows 200', (2,) )
# con2.commit() # con2.commit()
# #
# cur1.execute('insert into gtt(id) values( ? )', (11,) ) # cur1.execute('insert into gtt(id) values( ? )', (11,) )
# cur1.execute('insert into gtt(id) values( ? )', (11,) ) # cur1.execute('insert into gtt(id) values( ? )', (11,) )
# #
# print('con1.rollback: point before.') # print('con1.rollback: point before.')
# con1.rollback() # con1.rollback()
# print('con1.rollback: point after.') # print('con1.rollback: point after.')
# #
# #
# con1.close() # con1.close()
# con2.close() # con2.close()
# print('sample-2 finished OK.') # print('sample-2 finished OK.')
# #
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """ act_1 = python_act('db_1', substitutions=substitutions_1)
con1.rollback: point before.
con1.rollback: point after. #expected_stdout_1 = """
sample-2 finished OK. #con1.rollback: point before.
""" #con1.rollback: point after.
#sample-2 finished OK.
#"""
@pytest.mark.version('>=3.0.3') @pytest.mark.version('>=3.0.3')
@pytest.mark.xfail def test_1(act_1: Action):
def test_1(db_1): with act_1.db.connect() as con1, act_1.db.connect() as con2:
pytest.fail("Test not IMPLEMENTED") c2 = con2.cursor()
# Following 'select count' is MANDATORY for reproduce:
c2.execute('select count(*) from gtt').fetchall()
#
c1 = con1.cursor()
c1.execute('insert into gtt(id) values(?)', [1])
c1.execute('insert into gtt(id) values(?)', [1])
#
c2.execute('insert into gtt(id) values(?)', [2])
#
con1.rollback()
#
c2.execute('insert into gtt(id) select 2 from rdb$types rows 200', [2])
con2.commit()
#
c1.execute('insert into gtt(id) values(?)', [11])
c1.execute('insert into gtt(id) values(?)', [11])
#
con1.rollback()
# This test does not need to assert anything, it passes if we get here without error

View File

@ -2,51 +2,53 @@
# #
# id: bugs.core_5847 # id: bugs.core_5847
# title: "Malformed string" instead of key value in PK violation error message # title: "Malformed string" instead of key value in PK violation error message
# decription: # decription:
# Confirmed bug on: 3.0.4.32972, 4.0.0.955. # Confirmed bug on: 3.0.4.32972, 4.0.0.955.
# Works fine on: # Works fine on:
# FB25SC, build 2.5.9.27112: OK, 1.187s. # FB25SC, build 2.5.9.27112: OK, 1.187s.
# FB30SS, build 3.0.4.32992: OK, 1.485s. # FB30SS, build 3.0.4.32992: OK, 1.485s.
# FB40SS, build 4.0.0.1023: OK, 1.500s. # FB40SS, build 4.0.0.1023: OK, 1.500s.
# #
# tracker_id: CORE-5847 # tracker_id: CORE-5847
# min_versions: ['2.5.9'] # min_versions: ['2.5.9']
# versions: 2.5.9 # versions: 2.5.9
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from firebird.qa import db_factory, python_act, Action
from firebird.driver import DatabaseError
# version: 2.5.9 # version: 2.5.9
# resources: None # resources: None
substitutions_1 = [('Problematic key value is .*', 'Problematic key value is')] #substitutions_1 = [('Problematic key value is .*', 'Problematic key value is')]
substitutions_1 = []
init_script_1 = """ init_script_1 = """
recreate table test( recreate table test(
uid char(16) character set octets, uid char(16) character set octets,
constraint test_uid_pk primary key(uid) using index test_uid_pk constraint test_uid_pk primary key(uid) using index test_uid_pk
); );
commit; commit;
insert into test values( gen_uuid() ); insert into test values( gen_uuid() );
commit; commit;
""" """
db_1 = db_factory(sql_dialect=3, init=init_script_1) db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import sys # import sys
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# db_conn.close() # db_conn.close()
# #
# con1 = fdb.connect(dsn = dsn, charset = 'utf8') # con1 = fdb.connect(dsn = dsn, charset = 'utf8')
# con2 = fdb.connect(dsn = dsn) # con2 = fdb.connect(dsn = dsn)
# #
# sql_cmd='insert into test(uid) select uid from test rows 1' # sql_cmd='insert into test(uid) select uid from test rows 1'
# cur1=con1.cursor() # cur1=con1.cursor()
# cur2=con2.cursor() # cur2=con2.cursor()
@ -58,32 +60,36 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# for k,x in enumerate(e): # for k,x in enumerate(e):
# print(i,' ',k,':',x) # print(i,' ',k,':',x)
# i+=1 # i+=1
# #
# con1.close() # con1.close()
# con2.close() # con2.close()
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """ act_1 = python_act('db_1', substitutions=substitutions_1)
1 0 : Error while executing SQL statement:
- SQLCODE: -803
- violation of PRIMARY or UNIQUE KEY constraint "TEST_UID_PK" on table "TEST"
- Problematic key value is ("UID" = x'AA70F788EB634073AD328C284F775A3E')
1 1 : -803
1 2 : 335544665
2 0 : Error while executing SQL statement: #expected_stdout_1 = """
- SQLCODE: -803 #1 0 : Error while executing SQL statement:
- violation of PRIMARY or UNIQUE KEY constraint "TEST_UID_PK" on table "TEST" #- SQLCODE: -803
- Problematic key value is ("UID" = x'AA70F788EB634073AD328C284F775A3E') #- violation of PRIMARY or UNIQUE KEY constraint "TEST_UID_PK" on table "TEST"
2 1 : -803 #- Problematic key value is ("UID" = x'AA70F788EB634073AD328C284F775A3E')
2 2 : 335544665 #1 1 : -803
""" #1 2 : 335544665
#2 0 : Error while executing SQL statement:
#- SQLCODE: -803
#- violation of PRIMARY or UNIQUE KEY constraint "TEST_UID_PK" on table "TEST"
#- Problematic key value is ("UID" = x'AA70F788EB634073AD328C284F775A3E')
#2 1 : -803
#2 2 : 335544665
#"""
@pytest.mark.version('>=2.5.9') @pytest.mark.version('>=2.5.9')
@pytest.mark.xfail def test_1(act_1: Action):
def test_1(db_1): with act_1.db.connect(charset='utf8') as con1, act_1.db.connect() as con2:
pytest.fail("Test not IMPLEMENTED") c1 = con1.cursor()
c2 = con2.cursor()
cmd = 'insert into test(uid) select uid from test rows 1'
for c in [c1, c2]:
with pytest.raises(DatabaseError, match='.*Problematic key value is.*'):
c.execute(cmd)

View File

@ -2,7 +2,7 @@
# #
# id: bugs.core_5855 # id: bugs.core_5855
# title: Latest builds of Firebird 4.0 cannot backup DB with generators which contains space in the names # title: Latest builds of Firebird 4.0 cannot backup DB with generators which contains space in the names
# decription: # decription:
# Confirmed bug on 4.0.0.1036, got in STDERR: # Confirmed bug on 4.0.0.1036, got in STDERR:
# Dynamic SQL Error # Dynamic SQL Error
# -SQL error code = -104 # -SQL error code = -104
@ -13,14 +13,15 @@
# ::: NB::: # ::: NB:::
# As of nowadays, it is still possible to create sequence with name = single space character. # As of nowadays, it is still possible to create sequence with name = single space character.
# See note in ticket, 26/Jun/18 07:58 AM. # See note in ticket, 26/Jun/18 07:58 AM.
# #
# tracker_id: CORE-5855 # tracker_id: CORE-5855
# min_versions: ['3.0.0'] # min_versions: ['3.0.0']
# versions: 3.0 # versions: 3.0
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
# version: 3.0 # version: 3.0
# resources: None # resources: None
@ -32,43 +33,43 @@ init_script_1 = """
commit; commit;
comment on sequence "new sequence" is 'foo rio bar'; comment on sequence "new sequence" is 'foo rio bar';
commit; commit;
""" """
db_1 = db_factory(sql_dialect=3, init=init_script_1) db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import sys # import sys
# import subprocess # import subprocess
# import time # import time
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# thisdb=db_conn.database_name # thisdb=db_conn.database_name
# tmpbkp='$(DATABASE_LOCATION)tmp_core_5855.fbk' # tmpbkp='$(DATABASE_LOCATION)tmp_core_5855.fbk'
# tmpres='$(DATABASE_LOCATION)tmp_core_5855.tmp' # tmpres='$(DATABASE_LOCATION)tmp_core_5855.tmp'
# #
# db_conn.close() # db_conn.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def flush_and_close( file_handle ): # def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync # # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f, # # If you're starting with a Python file object f,
# # first do f.flush(), and # # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os # global os
# #
# file_handle.flush() # file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"! # # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno()) # os.fsync(file_handle.fileno())
# file_handle.close() # file_handle.close()
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# def cleanup( f_names_list ): # def cleanup( f_names_list ):
# global os # global os
# for i in range(len( f_names_list )): # for i in range(len( f_names_list )):
@ -79,12 +80,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# else: # else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None # del_name = None
# #
# if del_name and os.path.isfile( del_name ): # if del_name and os.path.isfile( del_name ):
# os.remove( del_name ) # os.remove( del_name )
# #
# #-------------------------------------------- # #--------------------------------------------
# #
# fn_bkp_log=open( os.path.join(context['temp_directory'],'tmp_5855_backup.log'), 'w') # fn_bkp_log=open( os.path.join(context['temp_directory'],'tmp_5855_backup.log'), 'w')
# fn_bkp_err=open( os.path.join(context['temp_directory'],'tmp_5855_backup.err'), 'w') # fn_bkp_err=open( os.path.join(context['temp_directory'],'tmp_5855_backup.err'), 'w')
# subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr", # subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr",
@ -92,15 +93,15 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stdout=fn_bkp_log, stderr=fn_bkp_err) # stdout=fn_bkp_log, stderr=fn_bkp_err)
# flush_and_close( fn_bkp_log ) # flush_and_close( fn_bkp_log )
# flush_and_close( fn_bkp_err ) # flush_and_close( fn_bkp_err )
# #
# backup_error_flag=0 # backup_error_flag=0
# with open(fn_bkp_err.name,'r') as f: # with open(fn_bkp_err.name,'r') as f:
# for line in f: # for line in f:
# backup_error_flag=1 # backup_error_flag=1
# print('UNEXPECTED STDERR DURING BACKUP '+fn_bkp_err.name+': '+line) # print('UNEXPECTED STDERR DURING BACKUP '+fn_bkp_err.name+': '+line)
# #
# cleanup( (fn_bkp_err, fn_bkp_log ) ) # cleanup( (fn_bkp_err, fn_bkp_log ) )
# #
# if backup_error_flag==0: # if backup_error_flag==0:
# fn_res_log=open( os.path.join(context['temp_directory'],'tmp_5855_restore.log'), 'w') # fn_res_log=open( os.path.join(context['temp_directory'],'tmp_5855_restore.log'), 'w')
# fn_res_err=open( os.path.join(context['temp_directory'],'tmp_5855_restore.err'), 'w') # fn_res_err=open( os.path.join(context['temp_directory'],'tmp_5855_restore.err'), 'w')
@ -109,72 +110,93 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stdout=fn_res_log, stderr=fn_res_err) # stdout=fn_res_log, stderr=fn_res_err)
# flush_and_close( fn_res_log ) # flush_and_close( fn_res_log )
# flush_and_close( fn_res_err ) # flush_and_close( fn_res_err )
# #
# sql_text=''' # sql_text='''
# set list on;
# set blob all;
# set list on; # set list on;
# select # set blob all;
# rdb$generator_name as seq_name, # set list on;
# rdb$initial_value as seq_init, # select
# rdb$generator_increment as seq_incr, # rdb$generator_name as seq_name,
# rdb$initial_value as seq_init,
# rdb$generator_increment as seq_incr,
# rdb$description as blob_id # rdb$description as blob_id
# from rdb$generators # from rdb$generators
# where rdb$system_flag is distinct from 1; # where rdb$system_flag is distinct from 1;
# ''' # '''
# #
# fn_sql_chk=open( os.path.join(context['temp_directory'],'tmp_5855_check.sql'), 'w') # fn_sql_chk=open( os.path.join(context['temp_directory'],'tmp_5855_check.sql'), 'w')
# fn_sql_chk.write(sql_text) # fn_sql_chk.write(sql_text)
# flush_and_close( fn_sql_chk ) # flush_and_close( fn_sql_chk )
# #
# fn_sql_log=open( os.path.join(context['temp_directory'],'tmp_5855_check.log'), 'w') # fn_sql_log=open( os.path.join(context['temp_directory'],'tmp_5855_check.log'), 'w')
# fn_sql_err=open( os.path.join(context['temp_directory'],'tmp_5855_check.err'), 'w') # fn_sql_err=open( os.path.join(context['temp_directory'],'tmp_5855_check.err'), 'w')
# subprocess.call( [ context['isql_path'], 'localhost:'+tmpres, "-i", fn_sql_chk.name ], # subprocess.call( [ context['isql_path'], 'localhost:'+tmpres, "-i", fn_sql_chk.name ],
# stdout=fn_sql_log, stderr=fn_sql_err # stdout=fn_sql_log, stderr=fn_sql_err
# ) # )
# #
# flush_and_close( fn_sql_log ) # flush_and_close( fn_sql_log )
# flush_and_close( fn_sql_err ) # flush_and_close( fn_sql_err )
# #
# for fe in ( fn_res_err, fn_sql_err ): # for fe in ( fn_res_err, fn_sql_err ):
# with open(fe.name,'r') as f: # with open(fe.name,'r') as f:
# for line in f: # for line in f:
# print('UNEXPECTED STDERR IN '+fe.name+': '+line) # print('UNEXPECTED STDERR IN '+fe.name+': '+line)
# #
# #
# with open(fn_res_log.name,'r') as f: # with open(fn_res_log.name,'r') as f:
# for line in f: # for line in f:
# # gbak: ERROR: # # gbak: ERROR:
# if 'ERROR:' in line: # if 'ERROR:' in line:
# print('UNEXPECTED ERROR IN '+fg.name+': '+line) # print('UNEXPECTED ERROR IN '+fg.name+': '+line)
# #
# with open(fn_sql_log.name,'r') as f: # with open(fn_sql_log.name,'r') as f:
# for line in f: # for line in f:
# print(line) # print(line)
# #
# # cleanup: # # cleanup:
# ########## # ##########
# time.sleep(1) # time.sleep(1)
# cleanup( (fn_res_err, fn_sql_err, fn_res_log, fn_sql_log, fn_sql_chk ) ) # cleanup( (fn_res_err, fn_sql_err, fn_res_log, fn_sql_log, fn_sql_chk ) )
# #
# ############################################################# # #############################################################
# #
# cleanup( (tmpbkp, tmpres) ) # cleanup( (tmpbkp, tmpres) )
# #
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
SEQ_NAME new sequence SEQ_NAME new sequence
SEQ_INIT 123 SEQ_INIT 123
SEQ_INCR -456 SEQ_INCR -456
foo rio bar foo rio bar
""" """
test_script_1 = """
set list on;
set blob all;
set list on;
select
rdb$generator_name as seq_name,
rdb$initial_value as seq_init,
rdb$generator_increment as seq_incr,
rdb$description as blob_id
from rdb$generators
where rdb$system_flag is distinct from 1;
"""
fbk_file = temp_file('tmp_core_5855.fbk')
fdb_file = temp_file('tmp_core_5855.fdb')
@pytest.mark.version('>=3.0') @pytest.mark.version('>=3.0')
@pytest.mark.xfail def test_1(act_1: Action, fbk_file: Path, fdb_file: Path):
def test_1(db_1): with act_1.connect_server() as srv:
pytest.fail("Test not IMPLEMENTED") srv.database.backup(database=act_1.db.db_path, backup=fbk_file)
srv.wait()
srv.database.restore(backup=fbk_file, database=fdb_file)
srv.wait()
act_1.expected_stdout = expected_stdout_1
act_1.isql(switches=[f'localhost:{fdb_file}'], input=test_script_1, connect_db=False)
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,7 +2,7 @@
# #
# id: bugs.core_5892 # id: bugs.core_5892
# title: SQL SECURITY DEFINER context is not properly evaluated for monitoring tables # title: SQL SECURITY DEFINER context is not properly evaluated for monitoring tables
# decription: # decription:
# Test is based on ticket sample: we create non-privileged user and allow him to call TWO procedures. # Test is based on ticket sample: we create non-privileged user and allow him to call TWO procedures.
# First SP is declared with DEFINER rights (i.e. with rights of SYSDBA), second - with rights of INVOKER. # First SP is declared with DEFINER rights (i.e. with rights of SYSDBA), second - with rights of INVOKER.
# When first SP is called by this (non-privileged!) user then he should see two other connections: # When first SP is called by this (non-privileged!) user then he should see two other connections:
@ -10,16 +10,16 @@
# 2) that was done by SYSDBA. # 2) that was done by SYSDBA.
# When second SP is called then this user should see only ONE connection (first from previous list). # When second SP is called then this user should see only ONE connection (first from previous list).
# Also this test checks ability to work with new context variable 'EFFECTIVE_USER' from 'SYSTEM' namespace. # Also this test checks ability to work with new context variable 'EFFECTIVE_USER' from 'SYSTEM' namespace.
# #
# Checked on 4.0.0.1479: OK, 1.623s. # Checked on 4.0.0.1479: OK, 1.623s.
# #
# tracker_id: CORE-5892 # tracker_id: CORE-5892
# min_versions: ['4.0'] # min_versions: ['4.0']
# versions: 4.0 # versions: 4.0
# qmid: None # qmid: None
import pytest import pytest
from firebird.qa import db_factory, isql_act, Action from firebird.qa import db_factory, python_act, Action, user_factory, User
# version: 4.0 # version: 4.0
# resources: None # resources: None
@ -32,68 +32,68 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1 # test_script_1
#--- #---
# #
# import os # import os
# import fdb # import fdb
# #
# os.environ["ISC_USER"] = user_name # os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password # os.environ["ISC_PASSWORD"] = user_password
# db_conn.close() # db_conn.close()
# #
# con1=fdb.connect( dsn = dsn ) #, user = 'SYSDBA', password = 'masterkey' ) # con1=fdb.connect( dsn = dsn ) #, user = 'SYSDBA', password = 'masterkey' )
# con1.execute_immediate("create or alter user TMP$C5892 password '123' using plugin Srp") # con1.execute_immediate("create or alter user TMP$C5892 password '123' using plugin Srp")
# con1.commit() # con1.commit()
# #
# con2=fdb.connect( dsn = dsn, user = 'TMP$C5892', password = '123' ) # con2=fdb.connect( dsn = dsn, user = 'TMP$C5892', password = '123' )
# con3=fdb.connect( dsn = dsn, user = 'TMP$C5892', password = '123' ) # con3=fdb.connect( dsn = dsn, user = 'TMP$C5892', password = '123' )
# #
# sp_definer_ddl = ''' # sp_definer_ddl = '''
# create or alter procedure sp_test_definer returns( another_name varchar(31), another_conn_id int, execution_context varchar(31) ) SQL SECURITY DEFINER # create or alter procedure sp_test_definer returns( another_name varchar(31), another_conn_id int, execution_context varchar(31) ) SQL SECURITY DEFINER
# as # as
# begin # begin
# execution_context = rdb$get_context('SYSTEM', 'EFFECTIVE_USER'); # execution_context = rdb$get_context('SYSTEM', 'EFFECTIVE_USER');
# for # for
# select mon$user, mon$attachment_id # select mon$user, mon$attachment_id
# from mon$attachments a # from mon$attachments a
# where a.mon$system_flag is distinct from 1 and a.mon$attachment_id != current_connection # where a.mon$system_flag is distinct from 1 and a.mon$attachment_id != current_connection
# into # into
# another_name, # another_name,
# another_conn_id # another_conn_id
# do suspend; # do suspend;
# end # end
# ''' # '''
# #
# sp_invoker_ddl = ''' # sp_invoker_ddl = '''
# create or alter procedure sp_test_invoker returns( another_name varchar(31), another_conn_id int, execution_context varchar(31) ) SQL SECURITY INVOKER # create or alter procedure sp_test_invoker returns( another_name varchar(31), another_conn_id int, execution_context varchar(31) ) SQL SECURITY INVOKER
# as # as
# begin # begin
# execution_context = rdb$get_context('SYSTEM', 'EFFECTIVE_USER'); # execution_context = rdb$get_context('SYSTEM', 'EFFECTIVE_USER');
# for # for
# select mon$user, mon$attachment_id # select mon$user, mon$attachment_id
# from mon$attachments a # from mon$attachments a
# where # where
# a.mon$system_flag is distinct from 1 # a.mon$system_flag is distinct from 1
# and a.mon$attachment_id != current_connection # and a.mon$attachment_id != current_connection
# and a.mon$user = current_user # and a.mon$user = current_user
# into # into
# another_name, # another_name,
# another_conn_id # another_conn_id
# do suspend; # do suspend;
# end # end
# ''' # '''
# #
# con1.execute_immediate( sp_definer_ddl ) # con1.execute_immediate( sp_definer_ddl )
# con1.execute_immediate( sp_invoker_ddl ) # con1.execute_immediate( sp_invoker_ddl )
# con1.commit() # con1.commit()
# #
# con1.execute_immediate( 'grant execute on procedure sp_test_definer to public' ) # con1.execute_immediate( 'grant execute on procedure sp_test_definer to public' )
# con1.execute_immediate( 'grant execute on procedure sp_test_invoker to public' ) # con1.execute_immediate( 'grant execute on procedure sp_test_invoker to public' )
# con1.commit() # con1.commit()
# #
# sql_chk_definer='select current_user as "definer_-_who_am_i", d.another_name as "definer_-_who_else_here", d.execution_context as "definer_-_effective_user" from rdb$database r left join sp_test_definer d on 1=1' # sql_chk_definer='select current_user as "definer_-_who_am_i", d.another_name as "definer_-_who_else_here", d.execution_context as "definer_-_effective_user" from rdb$database r left join sp_test_definer d on 1=1'
# sql_chk_invoker='select current_user as "invoker_-_who_am_i", d.another_name as "invoker_-_who_else_here", d.execution_context as "invoker_-_effective_user" from rdb$database r left join sp_test_invoker d on 1=1' # sql_chk_invoker='select current_user as "invoker_-_who_am_i", d.another_name as "invoker_-_who_else_here", d.execution_context as "invoker_-_effective_user" from rdb$database r left join sp_test_invoker d on 1=1'
# #
# #
# #--------------------------------- # #---------------------------------
# #print('=== result of call SP with DEFINER security ===') # #print('=== result of call SP with DEFINER security ===')
# cur2a=con2.cursor() # cur2a=con2.cursor()
@ -103,9 +103,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# for i in range(0,len(c2col)): # for i in range(0,len(c2col)):
# print( c2col[i][0],':', r[i] ) # print( c2col[i][0],':', r[i] )
# cur2a.close() # cur2a.close()
# #
# #--------------------------------- # #---------------------------------
# #
# #print('') # #print('')
# #print('=== result of call SP with INVOKER security ===') # #print('=== result of call SP with INVOKER security ===')
# cur2b=con2.cursor() # cur2b=con2.cursor()
@ -115,36 +115,94 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# for i in range(0,len(c2col)): # for i in range(0,len(c2col)):
# print( c2col[i][0],':', r[i] ) # print( c2col[i][0],':', r[i] )
# cur2b.close() # cur2b.close()
# #
# #--------------------------------- # #---------------------------------
# #
# con2.close() # con2.close()
# con3.close() # con3.close()
# #
# con1.execute_immediate('drop user TMP$C5892 using plugin Srp') # con1.execute_immediate('drop user TMP$C5892 using plugin Srp')
# con1.close() # con1.close()
# #
# #
#--- #---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """ expected_stdout_1 = """
definer_-_who_am_i : TMP$C5892 definer_-_who_am_i TMP$C5892
definer_-_who_else_here : SYSDBA definer_-_who_else_here SYSDBA
definer_-_effective_user : SYSDBA definer_-_effective_user SYSDBA
definer_-_who_am_i : TMP$C5892 definer_-_who_am_i TMP$C5892
definer_-_who_else_here : TMP$C5892 definer_-_who_else_here TMP$C5892
definer_-_effective_user : SYSDBA definer_-_effective_user SYSDBA
invoker_-_who_am_i : TMP$C5892 invoker_-_who_am_i TMP$C5892
invoker_-_who_else_here : TMP$C5892 invoker_-_who_else_here TMP$C5892
invoker_-_effective_user : TMP$C5892 invoker_-_effective_user TMP$C5892
""" """
sp_definer_ddl = """
create or alter procedure sp_test_definer returns( another_name varchar(31), another_conn_id int, execution_context varchar(31) ) SQL SECURITY DEFINER
as
begin
execution_context = rdb$get_context('SYSTEM', 'EFFECTIVE_USER');
for
select mon$user, mon$attachment_id
from mon$attachments a
where a.mon$system_flag is distinct from 1 and a.mon$attachment_id != current_connection
into
another_name,
another_conn_id
do suspend;
end
"""
sp_invoker_ddl = """
create or alter procedure sp_test_invoker returns( another_name varchar(31), another_conn_id int, execution_context varchar(31) ) SQL SECURITY INVOKER
as
begin
execution_context = rdb$get_context('SYSTEM', 'EFFECTIVE_USER');
for
select mon$user, mon$attachment_id
from mon$attachments a
where
a.mon$system_flag is distinct from 1
and a.mon$attachment_id != current_connection
and a.mon$user = current_user
into
another_name,
another_conn_id
do suspend;
end
"""
test_user = user_factory(name='TMP$C5892', password='123')
@pytest.mark.version('>=4.0') @pytest.mark.version('>=4.0')
@pytest.mark.xfail def test_1(act_1: Action, test_user: User, capsys):
def test_1(db_1): sql_chk_definer = 'select current_user as "definer_-_who_am_i", d.another_name as "definer_-_who_else_here", d.execution_context as "definer_-_effective_user" from rdb$database r left join sp_test_definer d on 1=1'
pytest.fail("Test not IMPLEMENTED") sql_chk_invoker = 'select current_user as "invoker_-_who_am_i", d.another_name as "invoker_-_who_else_here", d.execution_context as "invoker_-_effective_user" from rdb$database r left join sp_test_invoker d on 1=1'
with act_1.db.connect() as con1, \
act_1.db.connect(user=test_user.name, password=test_user.password) as con2, \
act_1.db.connect(user=test_user.name, password=test_user.password) as con3:
#
con1.execute_immediate(sp_definer_ddl)
con1.execute_immediate(sp_invoker_ddl)
con1.commit()
con1.execute_immediate('grant execute on procedure sp_test_definer to public')
con1.execute_immediate('grant execute on procedure sp_test_invoker to public')
con1.commit()
#
with con2.cursor() as c2:
c2.execute(sql_chk_definer)
act_1.print_data_list(c2)
#
with con2.cursor() as c2:
c2.execute(sql_chk_invoker)
act_1.print_data_list(c2)
# Check
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
assert act_1.clean_stdout == act_1.clean_expected_stdout