6
0
mirror of https://github.com/FirebirdSQL/firebird-qa.git synced 2025-01-22 21:43:06 +01:00

Merge branch 'FirebirdSQL:master' into master

This commit is contained in:
Anton Zuev 2024-11-12 15:17:51 +03:00 committed by GitHub
commit 4b5d84aba7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
129 changed files with 11763 additions and 1642 deletions

View File

@ -68,7 +68,7 @@ MaxIdentifierByteLength = 252
MaxIdentifierCharLength = 63
WireCryptPlugin = ChaCha, Arc4
StatementTimeout = 300
StatementTimeout = 600
ConnectionIdleTimeout = 0
ClearGTTAtRetaining = 0

View File

@ -70,7 +70,7 @@ MaxIdentifierByteLength = 252
MaxIdentifierCharLength = 63
WireCryptPlugin = ChaCha, Arc4
StatementTimeout = 300
StatementTimeout = 600
ConnectionIdleTimeout = 0
ClearGTTAtRetaining = 0
@ -78,6 +78,9 @@ ClientBatchBuffer = 131072
SnapshotsMemSize = 64K
TipCacheBlockSize = 4M
# Added 03-apr-2023:
ParallelWorkers = 1
# ParallelWorkers must be GREATER than 1 since 28.09.2024, see bugs/gh_8263_test.py
ParallelWorkers = 2
MaxParallelWorkers = 8
# SubQueryConversion = false
# OptimizeForFirstRows = false

View File

@ -70,7 +70,7 @@ MaxIdentifierByteLength = 252
MaxIdentifierCharLength = 63
WireCryptPlugin = ChaCha, Arc4
StatementTimeout = 300
StatementTimeout = 600
ConnectionIdleTimeout = 0
ClearGTTAtRetaining = 0
@ -78,6 +78,8 @@ ClientBatchBuffer = 131072
SnapshotsMemSize = 64K
TipCacheBlockSize = 4M
# Added 03-apr-2023:
ParallelWorkers = 1
# ParallelWorkers must be GREATER than 1 since 28.09.2024, see bugs/gh_8263_test.py
ParallelWorkers = 2
MaxParallelWorkers = 8
#OptimizeForFirstRows = false

View File

@ -21,5 +21,5 @@ pytest \
--md-report-output /qa-out/md_report.md \
--ignore=tests/functional/replication \
--ignore=tests/functional/basic/isql/test_08.py \
-m "not replication" \
-m "not replication and not encryption" \
"$@"

BIN
files/gh_7269.zip Normal file

Binary file not shown.

BIN
files/gh_7398.zip Normal file

Binary file not shown.

BIN
files/gh_8115.zip Normal file

Binary file not shown.

View File

@ -262,6 +262,24 @@ tmp_gh_8194_alias = $(dir_sampleDb)/qa/tmp_gh_8194.fdb
DefaultDbCachePages = 128
}
# https://github.com/FirebirdSQL/firebird/commit/fd0fa8a3a58fbfe7fdc0641b4e48258643d72127
# Let include file name into error message when creation of temp file failed
tmp_fd0fa8a3_alias = $(dir_sampleDb)/qa/tmp_fd0fa8a3.fdb
{
TempTableDirectory = <>
}
tmp_gh_6416_alias = $(dir_sampleDb)/qa/tmp_gh_6416.fdb
{
DataTypeCompatibility = 3.0
}
# https://github.com/FirebirdSQL/firebird/issues/8253
tmp_gh_8253_alias = $(dir_sampleDb)/qa/tmp_gh_8253.fdb
{
SecurityDatabase = tmp_gh_8253_alias
}
# Databases for replication tests:
#

View File

@ -3,6 +3,7 @@ console_output_style = count
# testpaths = tests
# addopts = --server local --install-terminal
markers =
intl: mark a test as dealing with non-ascii characters
scroll_cur: mark a test as dealing with scrollable cursor
es_eds: mark a test as dealing with ES/EDS mechanism
trace: mark a test as dealing with trace

View File

@ -274,6 +274,7 @@ expected_stdout_2 = """
Records affected: 0
"""
@pytest.mark.intl
@pytest.mark.version('>=4.0')
def test_2(act_2: Action):
act_2.expected_stdout = expected_stdout_2

View File

@ -8,18 +8,26 @@ DESCRIPTION:
JIRA: CORE-857
FBTEST: bugs.core_0857
NOTES:
[06.10.2022] pzotov
Could not complete adjusting for LINUX in new-qa.
DEFERRED.
[31.10.2024] pzotov
Bug was fixed for too old FB (2.0 RC4 / 2.1 Alpha 1), firebird-driver and/or QA-plugin
will not able to run on this version in order to reproduce problem.
Checked on 6.0.0.511 (Windows/Linux); 5.0.2.1550; 4.0.6.3165; 3.0.2.32670, 3,0,1,32609
"""
from pathlib import Path
import platform
import pytest
from firebird.qa import *
init_script = """
set echo on;
db = db_factory(charset='WIN1252')
act = isql_act('db', substitutions=[('[ \\t]+', ' ')])
tmp_sql = temp_file('tmp_core_0857.sql')
@pytest.mark.intl
@pytest.mark.version('>=3.0.0')
def test_1(act: Action, tmp_sql: Path):
test_script = """
set bail on;
create collation test_coll_ci_ai for win1252 from WIN_PTBR
case insensitive
@ -37,20 +45,8 @@ set bail on;
commit;
create view v_test as
select octet_length(t.f01) - octet_length(replace(t.f01, 'ß', '')) as "octet_length diff:" from test t;
"""
commit;
db = db_factory(charset='WIN1252', init=init_script)
expected_stdout = """
CONNECTION_CSET WIN1252
test_1 result: <null>
test_2 result: 1
ci_ai result: 1
between result: 1
octet_length diff: 1
"""
test_script = """
set list on;
select c.rdb$character_set_name as connection_cset
from mon$attachments a
@ -64,13 +60,19 @@ test_script = """
select * from v_test;
"""
act = isql_act('db', test_script)
# ::: NB :::
# For proper output of test, input script must be encoded in cp1252 rather than in UTF-8.
#
tmp_sql.write_text(test_script, encoding = 'cp1252')
@pytest.mark.skipif(platform.system() != 'Windows', reason='FIXME: see notes')
@pytest.mark.version('>=3')
def test_1(act: Action):
act.expected_stdout = expected_stdout
act.execute()
act.expected_stdout = """
CONNECTION_CSET WIN1252
test_1 result: <null>
test_2 result: 1
ci_ai result: 1
between result: 1
octet_length diff: 1
"""
act.isql(switches = ['-q'], input_file = tmp_sql, charset = 'win1252', combine_output = True)
assert act.clean_stdout == act.clean_expected_stdout

View File

@ -22,6 +22,7 @@ db = db_factory(init=init_script)
act = python_act('db')
@pytest.mark.intl
@pytest.mark.version('>=2.1')
def test_1(act: Action):
with act.db.connect() as con:

View File

@ -2,14 +2,14 @@
"""
ID: issue-1393
ISSUE: 1393
ISSUE: https://github.com/FirebirdSQL/firebird/issues/1393
TITLE: Non-ASCII quoted identifiers are not converted to metadata (UNICODE_FSS) charset
DESCRIPTION:
JIRA: CORE-986
FBTEST: bugs.core_0986
NOTES:
[25.11.2023] pzotov
Writing code requires more care since 6.0.0.150: ISQL does not allow specifying duplicate delimiters without any statements between them (two semicolon, two carets etc).
Writing code requires more care since 6.0.0.150: ISQL does not allow specifying duplicate delimiters without any statements between them (two semicolon, two carets etc)
"""
import pytest
@ -62,8 +62,7 @@ non_ascii_ddl='''
create role "манагер";
create role "начсклд";
-- TEMPLY COMMENTED UNTIL CORE-5209 IS OPEN:
-- ISQL -X ignores connection charset for text of EXCEPTION message (restoring it in initial charset when exception was created)
-- enabled since CORE-5209 was fixed:
recreate exception "Невзлет" 'Запись обломалась, ваши не пляшут. Но не стесняйтесь и обязательно заходите еще, мы всегда рады видеть вас. До скорой встречи, товарищ!';
commit;
@ -171,8 +170,6 @@ non_ascii_ddl='''
';
--------------------------------------------------
commit;
--/*
--TEMPLY COMMENTED UNTIL CORE-5221 IS OPEN:
set echo on;
show collation;
show domain;
@ -183,27 +180,29 @@ non_ascii_ddl='''
show view;
show procedure;
show role;
--*/
set list on;
set echo off;
select 'Metadata created OK.' as msg from rdb$database;
'''
tmp_file = temp_file('non_ascii_ddl.sql')
tmp_file = temp_file('tmp_0986_non_ascii_ddl.sql')
@pytest.mark.intl
@pytest.mark.version('>=3.0')
def test_1(act: Action, tmp_file: Path):
tmp_file.write_bytes(non_ascii_ddl.encode('cp1251'))
# run without specifying charset
################################
act.expected_stdout = expected_stdout_a
act.expected_stderr = expected_stderr_a_40 if act.is_version('>=4.0') else expected_stderr_a_30
act.isql(switches=['-q'], input_file=tmp_file, charset=None, io_enc='cp1251')
assert (act.clean_stdout == act.clean_expected_stdout and
act.clean_stderr == act.clean_expected_stderr)
# run with charset
# run _with_ charset
####################
act.reset()
act.isql(switches=['-q'], input_file=tmp_file, charset='win1251', io_enc='cp1251')
assert act.clean_stdout.endswith('Metadata created OK.')

View File

@ -261,6 +261,7 @@ expected_stdout = """
Records affected: 30
"""
@pytest.mark.intl
@pytest.mark.version('>=4.0')
def test_1(act: Action):
act.expected_stdout = expected_stdout

View File

@ -2,54 +2,106 @@
"""
ID: issue-1678
ISSUE: 1678
ISSUE: https://github.com/FirebirdSQL/firebird/issues/1678
TITLE: Problem with DISTINCT and insensitive collations
DESCRIPTION:
DESCRIPTION: See https://github.com/FirebirdSQL/firebird/issues/2965
JIRA: CORE-1254
FBTEST: bugs.core_1254
"""
import pytest
from firebird.qa import *
init_script = """CREATE TABLE TEST
(GROUP_ID VARCHAR(1) CHARACTER SET UTF8 COLLATE UNICODE_CI,
QUESTION INTEGER,
SCORE INTEGER);
COMMIT;
INSERT INTO TEST (GROUP_ID,QUESTION,SCORE) VALUES ('a',1,1);
INSERT INTO TEST (GROUP_ID,QUESTION,SCORE) VALUES ('a',2,1);
INSERT INTO TEST (GROUP_ID,QUESTION,SCORE) VALUES ('a',3,1);
INSERT INTO TEST (GROUP_ID,QUESTION,SCORE) VALUES ('A',1,1);
INSERT INTO TEST (GROUP_ID,QUESTION,SCORE) VALUES ('A',2,1);
INSERT INTO TEST (GROUP_ID,QUESTION,SCORE) VALUES ('A',3,1);
COMMIT;
"""
db = db_factory(charset='UTF8', init=init_script)
test_script = """SELECT GROUP_ID, QUESTION, SUM(SCORE) FROM TEST GROUP BY 1,2;
SELECT DISTINCT GROUP_ID, QUESTION FROM TEST;"""
act = isql_act('db', test_script)
expected_stdout = """
GROUP_ID QUESTION SUM
======== ============ =====================
a 1 2
a 2 2
a 3 2
NOTES:
1. Confirmed problem on 2.1.3.18185
Both queries: 'select ... group by ...' and 'select distinct ...' issued six rows:
GROUP_ID QUESTION
======== ============
a 1
a 2
a 3
A 1
A 2
A 3
(instead of expected three rows with 'a' or 'A' in the 1st column).
The only correct result issued when index was used.
2. Values in 1st column can vary if OptimizeForFirstRows = true (FB 5.x+).
Because of this, we have to check only COUNT of letters in this column
that are unique being compared using case SENSITIVE collation.
In all cases (for queries and with/without index) this count must be 1.
"""
import pytest
from firebird.qa import *
init_script = """
create table test(
group_id varchar(1) character set utf8 collate unicode_ci,
question integer,
score integer
);
commit;
insert into test (group_id,question,score) values ('a',1,11);
insert into test (group_id,question,score) values ('a',3,13);
insert into test (group_id,question,score) values ('A',1,14);
insert into test (group_id,question,score) values ('a',2,12);
insert into test (group_id,question,score) values ('A',2,15);
insert into test (group_id,question,score) values ('A',3,16);
commit;
-- See https://github.com/FirebirdSQL/firebird/issues/2965#issue-866882047
-- GROUP BY will use an index on multi-byte or insensitive collation only
-- when this index is: 1) UNIQUE and 2) ASCENDING.
create UNIQUE index test_gr_que_score on test(group_id, question, score);
commit;
"""
db = db_factory(charset='UTF8', init=init_script)
test_script = """
--set explain on;
--set plan on;
set list on;
alter index test_gr_que_score inactive;
commit;
select count(
distinct cast( group_id as varchar(1)
-- Check count of unique values in 1st column using
-- case SENSITIVE collation:
-- #########################
character set ascii
)
) as "case_SENSITIVE_distinct_gr_1"
from (
select group_id, question from test group by 1,2
);
select count( distinct cast(group_id as varchar(1) character set ascii)) as "case_SENSITIVE_distinct_gr_2"
from (
select distinct group_id, question from test
);
alter index test_gr_que_score active;
commit;
select count( distinct cast(group_id as varchar(1) character set ascii)) as "case_SENSITIVE_distinct_gr_3"
from (
select group_id, question from test group by 1,2
);
select count( distinct cast(group_id as varchar(1) character set ascii)) as "case_SENSITIVE_distinct_gr_4"
from (
select distinct group_id, question from test
);
"""
act = isql_act('db', test_script)
expected_stdout = """
case_SENSITIVE_distinct_gr_1 1
case_SENSITIVE_distinct_gr_2 1
case_SENSITIVE_distinct_gr_3 1
case_SENSITIVE_distinct_gr_4 1
"""
@pytest.mark.intl
@pytest.mark.version('>=3')
def test_1(act: Action):
act.expected_stdout = expected_stdout

View File

@ -7,56 +7,51 @@ TITLE: Can't create table using long username and UTF8 as attachment chars
DESCRIPTION:
JIRA: CORE-1292
FBTEST: bugs.core_1292
NOTES:
[23.08.2024] pzotov
1. Removed LIST() from initial query because it displays tokens in unpredictable order.
This can cause fail if we change OptimizeForFirstRows = true config parameter.
2. Found oddities when try to use non-ascii user name and substitute it using f-notation:
at least REVOKE and GRANT commands reflect this user name in the trace as encoded
in cp1251 instead of utf8. This causes:
335544321 : arithmetic exception, numeric overflow, or string truncation
335544565 : Cannot transliterate character between character sets
To be investigated further.
"""
import locale
import pytest
from firebird.qa import *
db = db_factory(charset='UTF8')
db = db_factory(charset = 'utf8')
test_script = """
act = python_act('db', substitutions = [ ('[ \t]+', ' '), ])
tmp_user = user_factory('db', name='Nebuchadnezzar_The_Babylon_Lord', password='123', plugin = 'Srp')
#tmp_user = user_factory('db', name='"НавохудоносорВластелинВавилона2"', password='123', plugin = 'Srp')
@pytest.mark.version('>=3')
def test_1(act: Action, tmp_user: User, capsys):
test_sql = f"""
set bail on;
set list on;
set wng off;
-- Drop old account if it remains from prevoius run:
set term ^;
execute block as
begin
begin
execute statement 'drop user Nebuchadnezzar2_King_of_Babylon' with autonomous transaction;
when any do begin end
end
end^
set term ;^
connect '{act.db.dsn}' user {act.db.user} password '{act.db.password}';
revoke all on all from {tmp_user.name};
grant create table to {tmp_user.name};
commit;
connect '{act.db.dsn}' user {tmp_user.name} password '{tmp_user.password}';
create user Nebuchadnezzar2_King_of_Babylon password 'guinness'; -- revoke admin role;
-- 1234567890123456789012345678901
-- 1 2 3
commit;
revoke all on all from Nebuchadnezzar2_King_of_Babylon;
set term ^;
execute block as
begin
if ( rdb$get_context('SYSTEM', 'ENGINE_VERSION') not starting with '2.5' ) then
begin
execute statement 'grant create table to Nebuchadnezzar2_King_of_Babylon';
end
end
^
set term ;^
commit;
connect '$(DSN)' user 'Nebuchadnezzar2_King_of_Babylon' password 'guinness';
select a.mon$user as who_am_i, c.rdb$character_set_name as my_connection_charset
from mon$attachments a
join rdb$character_sets c on a.mon$character_set_id = c.rdb$character_set_id
where a.mon$attachment_id = current_connection;
create table test(n int);
commit;
connect '$(DSN)' user 'SYSDBA' password 'masterkey';
connect '{act.db.dsn}' user {act.db.user} password '{act.db.password}';
set list on;
select usr_name, grantor, can_grant, tab_name,usr_type,obj_type, list(priv) priv_list
from (
select
p.rdb$user usr_name
,p.rdb$grantor grantor
@ -66,31 +61,58 @@ test_script = """
,p.rdb$object_type obj_type
,trim(p.rdb$privilege) priv
from rdb$user_privileges p
where upper(trim(p.rdb$relation_name)) = upper('test')
order by priv
)
group by usr_name, grantor, can_grant, tab_name,usr_type,obj_type;
commit;
drop user Nebuchadnezzar2_King_of_Babylon;
where
upper(trim(p.rdb$relation_name)) = upper('test')
and p.rdb$user = _utf8 '{tmp_user.name}' collate unicode_ci
order by priv;
commit;
"""
act = isql_act('db', test_script, substitutions=[('PRIV_LIST.*', '')])
expected_stdout = f"""
WHO_AM_I {tmp_user.name.upper()}
MY_CONNECTION_CHARSET UTF8
expected_stdout = """
USR_NAME NEBUCHADNEZZAR2_KING_OF_BABYLON
GRANTOR NEBUCHADNEZZAR2_KING_OF_BABYLON
USR_NAME {tmp_user.name.upper()}
GRANTOR {tmp_user.name.upper()}
CAN_GRANT 1
TAB_NAME TEST
USR_TYPE 8
OBJ_TYPE 0
D,I,R,S,U
PRIV D
USR_NAME {tmp_user.name.upper()}
GRANTOR {tmp_user.name.upper()}
CAN_GRANT 1
TAB_NAME TEST
USR_TYPE 8
OBJ_TYPE 0
PRIV I
USR_NAME {tmp_user.name.upper()}
GRANTOR {tmp_user.name.upper()}
CAN_GRANT 1
TAB_NAME TEST
USR_TYPE 8
OBJ_TYPE 0
PRIV R
USR_NAME {tmp_user.name.upper()}
GRANTOR {tmp_user.name.upper()}
CAN_GRANT 1
TAB_NAME TEST
USR_TYPE 8
OBJ_TYPE 0
PRIV S
USR_NAME {tmp_user.name.upper()}
GRANTOR {tmp_user.name.upper()}
CAN_GRANT 1
TAB_NAME TEST
USR_TYPE 8
OBJ_TYPE 0
PRIV U
"""
@pytest.mark.version('>=3')
def test_1(act: Action):
act.expected_stdout = expected_stdout
act.execute()
act.isql(switches = ['-q'], input = test_sql, charset = 'utf8', connect_db=False, credentials = False, combine_output = True, io_enc = locale.getpreferredencoding())
assert act.clean_stdout == act.clean_expected_stdout

View File

@ -48,6 +48,7 @@ expected_stdout = """
EXECUTE_STTM_SELECT милан
"""
@pytest.mark.intl
@pytest.mark.version('>=3.0')
def test_1(act: Action):
act.expected_stdout = expected_stdout

View File

@ -57,6 +57,7 @@ where pr.rdb$procedure_source containing '1'
and pr.rdb$procedure_name = upper('sp_test');
'''
@pytest.mark.intl
@pytest.mark.version('>=3')
def test_1(act: Action, tmp_file: Path):
tmp_file.write_bytes(sql_txt.encode('cp1251'))

View File

@ -4,19 +4,30 @@
ID: issue-1784
ISSUE: 1784
TITLE: French insensitive collation FR_FR_CI_AI
DESCRIPTION:
DESCRIPTION: Add French case-/accent-insensitive collation.
JIRA: CORE-1366
FBTEST: bugs.core_1366
NOTES:
[06.10.2022] pzotov
Could not complete adjusting for LINUX in new-qa.
DEFERRED.
[31.10.2024] pzotov
Bug was fixed for too old FB (2.1.8), firebird-driver and/or QA-plugin
will not able to run on this version in order to reproduce problem.
Checked on 6.0.0.511 (Windows/Linux); 5.0.2.1550; 4.0.6.3165; 3.0.2.32670, 3,0,1,32609
"""
import platform
from pathlib import Path
import pytest
from firebird.qa import *
init_script = """
db = db_factory(charset='ISO8859_1')
act = isql_act('db', substitutions=[('=.*', ''), ('[ \\t]+', ' ')])
tmp_sql = temp_file('tmp_core_1366.sql')
@pytest.mark.intl
@pytest.mark.version('>=3.0.0')
def test_1(act: Action, tmp_sql: Path):
test_script = f"""
recreate table test(id int);
commit;
@ -39,10 +50,6 @@ init_script = """
-- http://french.about.com/od/pronunciation/a/accents.htm
-- ### ONCE AGAIN ###
-- 1) for checking this under ISQL following must be encoded in ISO8859_1
-- 2) for running under fbt_run utility following must be encoded in UTF8.
-- (cedilla) is found only on the letter "C":
insert into test(id, cf) values( 1010, 'ç');
@ -76,21 +83,18 @@ init_script = """
insert into noac(id, nf) values( 1190, 'O');
insert into noac(id, nf) values( 1200, 'U');
commit;
"""
db = db_factory(charset='ISO8859_1', init=init_script)
test_script = """
select n.id n_id, n.nf, t.cf, t.id t_id
from noac n
left join test t on n.nf is not distinct from t.cf
order by n_id, t_id;
"""
act = isql_act('db', test_script, substitutions=[('=.*', ''), ('[ \t]+', ' ')])
expected_stdout = """
# https://github.com/FirebirdSQL/firebird/issues/1784#issuecomment-826188088
# ::: NB :::
# For proper output of test, input script must be encoded in ISO8859_1 rather than in UTF-8.
#
tmp_sql.write_text(test_script, encoding='iso8859_1')
act.expected_stdout = """
N_ID NF CF T_ID
============ ========== ========== ============
1150 A à 1030
@ -107,11 +111,5 @@ expected_stdout = """
1200 U ü 1080
1200 U û 1130
"""
@pytest.mark.skipif(platform.system() != 'Windows', reason='FIXME: see notes')
@pytest.mark.version('>=3')
def test_1(act: Action):
act.expected_stdout = expected_stdout
act.execute()
act.isql(switches = ['-q'], input_file = tmp_sql, charset = 'iso8859_1', combine_output = True)
assert act.clean_stdout == act.clean_expected_stdout

View File

@ -88,6 +88,7 @@ expected_stdout = """
RDB$COLLATION_NAME вид прописи
"""
@pytest.mark.intl
@pytest.mark.version('>=3')
def test_1(act: Action, tmp_file: Path):
tmp_file.write_bytes(sql_txt.encode('cp1251'))

View File

@ -7,45 +7,45 @@ TITLE: Allow usage of functions in LIST delimiter parameter
DESCRIPTION:
JIRA: CORE-1443
FBTEST: bugs.core_1453
NOTES:
[23.08.2024] pzotov
Reimplemented: we have to avoid to show result of LIST() call because unpredictable order of its tokens.
This can cause fail if we change OptimizeForFirstRows = true config parameter.
Instead, test apply char_len() to the result of list(<...>, <func>).
"""
import pytest
from firebird.qa import *
init_script = """CREATE TABLE T1 (ID INTEGER, NAME CHAR(20));
COMMIT;
INSERT INTO T1 (ID,NAME) VALUES (1,'ORANGE');
INSERT INTO T1 (ID,NAME) VALUES (1,'APPLE');
INSERT INTO T1 (ID,NAME) VALUES (1,'LEMON');
INSERT INTO T1 (ID,NAME) VALUES (2,'ORANGE');
INSERT INTO T1 (ID,NAME) VALUES (2,'APPLE');
INSERT INTO T1 (ID,NAME) VALUES (2,'PEAR');
COMMIT;
init_script = """
create table t1 (id integer, name char(20));
commit;
insert into t1 (id,name) values (1,'orange');
insert into t1 (id,name) values (1,'apple');
insert into t1 (id,name) values (1,'lemon');
insert into t1 (id,name) values (2,'orange');
insert into t1 (id,name) values (2,'apple');
insert into t1 (id,name) values (2,'pear');
commit;
"""
db = db_factory(init=init_script)
test_script = """select ID, LIST( trim(NAME), ASCII_CHAR(35) )
from T1
group by 1;
test_script = """
set list on;
select id, char_length(list( trim(name), ascii_char(35) )) chr_len
from t1
group by id
order by id;
"""
act = isql_act('db', test_script)
act = isql_act('db', test_script, substitutions = [ ('[ \t]+', ' '), ])
expected_stdout = """
ID LIST
============ =================
1 0:1
==============================================================================
LIST:
ORANGE#LEMON#APPLE
==============================================================================
2 0:2
==============================================================================
LIST:
PEAR#ORANGE#APPLE
==============================================================================
ID 1
CHR_LEN 18
ID 2
CHR_LEN 17
"""
@pytest.mark.version('>=2.5.0')

View File

@ -5,17 +5,57 @@ ID: issue-2149
ISSUE: 2149
TITLE: Unable to restore a database with inactive indices if any SP/trigger contains an explicit plan
DESCRIPTION:
We create table and indices for it.
Then we create trigger for this table, view, procedure, function and package - and all of them have DDL
which explicitly uses 'TEST ORDER <index>' in execution plan.
Such database then backed up and restored with command switch '-i(nactive)'.
Restore is logged and we check that this log does not contain 'gbak:error' message (and eventually completes OK).
Restored database must contain all created DB objects, i.e. we must have ability to explicitly specify them in SQL.
Table trigger (containing explicit PLAN clause in its DDL) also must exist and remain active.
Before this bug was fixed:
1) log of restore contained:
gbak: ERROR:Error while parsing function FN_WORKER's BLR
gbak: ERROR: index TEST_X cannot be used in the specified plan
2) restored database had NO indices that were explicitly specified in any DDL and any attempt to use appropriate
DB object failed with SQLSTATE = 42S02/39000/42000 ('Table/Procedure/Function} unknown').
JIRA: CORE-1725
FBTEST: bugs.core_1725
NOTES:
[28.10.2024] pzotov
1. Test fully re-implemented.
We do NOT extract metadata before and after restore (in order to compare it):
in FB 6.x 'gbak -i' leads to 'create INACTIVE index ...' statements in generated SQL
(see https://github.com/FirebirdSQL/firebird/issues/8091 - "Ability to create an inactive index").
Comparison of metadata that was before and after restore has no much sense.
Rather, we have to check SQL/DML that attempt to use DB object which DDL contain
explicitly specified execution plan.
All such actions must raise error related to invalid BLR, but *not* error about missing DB object.
BTW: it looks strange that such messages contain "-there is no index TEST_X for table TEST".
Such index definitely DOES exist but it is inactive.
2. Bug existed up to 17-jan-2019.
It was fixed by commits related to other issues, namely:
3.x: a74130019af89012cc1e04ba18bbc9c4a69e1a5d // 17.01.2019
4.x: fea7c61d9741dc142fa020bf3aa93af7e52e2002 // 17.01.2019
5.x: fea7c61d9741dc142fa020bf3aa93af7e52e2002 // 18.01.2019
("Attempted to fix CORE-2440, CORE-5118 and CORE-5900 together (expression indices contain NULL keys after restore).")
Checked on:
6.0.0.511-c4bc943; 5.0.2.1547-1e08f5e; 4.0.0.1384-fea7c61 (17-jan-2019, just after fix); 3.0.13.33793-3e62713
"""
import locale
import re
from collections import defaultdict
from difflib import unified_diff
from pathlib import Path
import pytest
from firebird.qa import *
from firebird.driver import SrvRestoreFlag, SrvRepairFlag
from io import BytesIO
from difflib import unified_diff
substitutions_1 = [('[ \t]+', ' ')]
init_script = """
set bail on;
@ -28,7 +68,6 @@ create or alter function fn_init returns int as begin end;
create or alter function fn_main returns int as begin end;
create or alter function fn_worker returns int as begin end;
create table test(id int primary key, x int, y int);
create index test_x on test(x);
create descending index test_y on test(y);
@ -155,8 +194,7 @@ begin
end
^
create or alter trigger trg_attach active on connect position 0 as
create or alter trigger test_bi for test active before insert position 0 as
declare c int;
begin
if ( rdb$get_context('USER_SESSION','INITIAL_DDL') is null ) then
@ -177,41 +215,180 @@ set term ;^
commit;
"""
db = db_factory(init=init_script)
substitutions = [('[ \t]+', ' '), ('(-)?invalid request BLR at offset \\d+', 'invalid request BLR at offset')]
act = python_act('db')
db = db_factory(init = init_script)
act = python_act('db', substitutions = substitutions)
tmp_fbk= temp_file('tmp_core_1725.fbk')
tmp_fdb = temp_file('tmp_core_1725.fdb')
@pytest.mark.version('>=3.0.6')
def test_1(act: Action):
# Extract metadata from initial DB
act.isql(switches=['-nod', '-x'])
meta_1 = act.stdout
def test_1(act: Action, tmp_fbk: Path, tmp_fdb: Path, capsys):
outcomes_map = defaultdict(str)
act.gbak(switches=['-b', act.db.dsn, str(tmp_fbk)])
# restore _WITHOUT_ building indices:
act.gbak(switches=['-rep', '-i', '-v', str(tmp_fbk), str(tmp_fdb) ], combine_output = True, io_enc = locale.getpreferredencoding())
watching_patterns = [re.compile(x, re.IGNORECASE) for x in (r'gbak:\s?ERROR(:)?\s?', r'gbak:finis.*\s+going home', r'gbak:adjust.*\s+flags')]
for line in act.clean_stdout.splitlines():
for p in watching_patterns:
if p.search(line):
outcomes_map['restore_log'] += line+'\n'
act.reset()
# backup + restore _WITHOUT_ building indices:
backup = BytesIO()
with act.connect_server() as srv:
srv.database.local_backup(database=act.db.db_path, backup_stream=backup)
backup.seek(0)
srv.database.local_restore(backup_stream=backup, database=act.db.db_path,
flags=SrvRestoreFlag.DEACTIVATE_IDX | SrvRestoreFlag.REPLACE)
# Get FB log before validation, run validation and get FB log after it:
log_before = act.get_firebird_log()
srv.database.repair(database=act.db.db_path, flags=SrvRepairFlag.CORRUPTION_CHECK)
#act.gfix(switches=['-v', '-full', act.db.dsn])
log_after = act.get_firebird_log()
# Extract metadata from restored DB
act.isql(switches=['-nod', '-x'])
meta_2 = act.stdout
###########################################################################
check_metadata = """
set list on;
set count on;
select ri.rdb$index_name, ri.rdb$index_inactive from rdb$indices ri where ri.rdb$relation_name = upper('test') and ri.rdb$index_name starting with upper('test');
select p.rdb$package_name, p.rdb$procedure_name as sp_name, p.rdb$valid_blr as sp_valid_blr
from rdb$procedures p
where p.rdb$system_flag is distinct from 1
order by p.rdb$package_name, p.rdb$procedure_name
;
select f.rdb$package_name, f.rdb$function_name as fn_name, f.rdb$valid_blr as fn_valid_blr
from rdb$functions f
where f.rdb$system_flag is distinct from 1
order by f.rdb$package_name, f.rdb$function_name
;
select rt.rdb$trigger_name, rt.rdb$trigger_inactive, rt.rdb$valid_blr as tg_valid_blr
from rdb$triggers rt
where
rt.rdb$system_flag is distinct from 1 and
rt.rdb$relation_name = upper('test')
;
set count off;
"""
act.isql(switches=['-nod', '-q', str(tmp_fdb)], input = check_metadata, credentials = True, charset = 'utf8', connect_db = False, combine_output = True, io_enc = locale.getpreferredencoding())
for line in act.clean_stdout.splitlines():
outcomes_map['check_metadata'] += line+'\n'
act.reset()
###########################################################################
check_avail_db_objects = """
set list on;
set echo on;
select * from v_worker;
execute procedure sp_main;
select fn_main() from rdb$database;
execute procedure pg_test.pg_sp_worker;
select pg_test.pg_fn_worker() from rdb$database;
insert into test(id, x, y) values(-1, -1, -1) returning id, x, y;
"""
act.isql(switches=['-nod', '-q', str(tmp_fdb)], input = check_avail_db_objects, credentials = True, charset = 'utf8', connect_db = False, combine_output = True, io_enc = locale.getpreferredencoding())
for line in act.clean_stdout.splitlines():
outcomes_map['check_avail_db_objects'] += line+'\n'
act.reset()
for k,v in outcomes_map.items():
print(k)
for p in v.splitlines():
print(p)
print('')
###########################################################################
act.expected_stdout = """
restore_log
gbak:finishing, closing, and going home
gbak:adjusting the ONLINE and FORCED WRITES flags
check_metadata
RDB$INDEX_NAME TEST_X
RDB$INDEX_INACTIVE 1
RDB$INDEX_NAME TEST_Y
RDB$INDEX_INACTIVE 1
Records affected: 2
RDB$PACKAGE_NAME <null>
SP_NAME SP_INIT
SP_VALID_BLR 1
RDB$PACKAGE_NAME <null>
SP_NAME SP_MAIN
SP_VALID_BLR 1
RDB$PACKAGE_NAME <null>
SP_NAME SP_WORKER
SP_VALID_BLR 1
RDB$PACKAGE_NAME PG_TEST
SP_NAME PG_SP_WORKER
SP_VALID_BLR 1
Records affected: 4
RDB$PACKAGE_NAME <null>
FN_NAME FN_INIT
FN_VALID_BLR 1
RDB$PACKAGE_NAME <null>
FN_NAME FN_MAIN
FN_VALID_BLR 1
RDB$PACKAGE_NAME <null>
FN_NAME FN_WORKER
FN_VALID_BLR 1
RDB$PACKAGE_NAME PG_TEST
FN_NAME PG_FN_WORKER
FN_VALID_BLR 1
Records affected: 4
RDB$TRIGGER_NAME TEST_BI
RDB$TRIGGER_INACTIVE 0
TG_VALID_BLR 1
Records affected: 1
check_avail_db_objects
select * from v_worker;
Statement failed, SQLSTATE = 42000
invalid request BLR at offset 35
-there is no index TEST_Y for table TEST
execute procedure sp_main;
Statement failed, SQLSTATE = 2F000
Error while parsing procedure SP_MAIN's BLR
-Error while parsing procedure SP_WORKER's BLR
-invalid request BLR at offset 66
-there is no index TEST_X for table TEST
select fn_main() from rdb$database;
Statement failed, SQLSTATE = 2F000
Error while parsing function FN_MAIN's BLR
-Error while parsing function FN_WORKER's BLR
-invalid request BLR at offset 72
-there is no index TEST_X for table TEST
execute procedure pg_test.pg_sp_worker;
Statement failed, SQLSTATE = 2F000
Error while parsing procedure PG_TEST.PG_SP_WORKER's BLR
-invalid request BLR at offset 66
-there is no index TEST_X for table TEST
select pg_test.pg_fn_worker() from rdb$database;
Statement failed, SQLSTATE = 2F000
Error while parsing function PG_TEST.PG_FN_WORKER's BLR
-invalid request BLR at offset 72
-there is no index TEST_X for table TEST
insert into test(id, x, y) values(-1, -1, -1) returning id, x, y;
Statement failed, SQLSTATE = 42000
invalid request BLR at offset
-there is no index TEST_X for table TEST
"""
act.stdout = capsys.readouterr().out
assert act.clean_stdout == act.clean_expected_stdout
act.reset()
# Restore with indices. This is necessary to drop the database safely otherwise connect
# to drop will fail in test treadown as connect trigger referes to index tat was not activated
with act.connect_server() as srv:
backup.seek(0)
srv.database.local_restore(backup_stream=backup, database=act.db.db_path,
flags=SrvRestoreFlag.REPLACE)
#
diff_meta = ''.join(unified_diff(meta_1.splitlines(), meta_2.splitlines()))
diff_log = [line for line in unified_diff(log_before, log_after) if line.startswith('+') and 'Validation finished:' in line]
# Checks
assert diff_meta == ''
assert diff_log == ['+\tValidation finished: 0 errors, 0 warnings, 0 fixed\n']

View File

@ -7,6 +7,10 @@ TITLE: Don't work subquery in COALESCE
DESCRIPTION:
JIRA: CORE-2051
FBTEST: bugs.core_2051
NOTES:
[12.09.2024] pzotov
Removed execution plan from expected output.
Requested by dimitr, letters with subj 'core_2051_test', since 11.09.2024 17:16.
"""
import pytest
@ -28,16 +32,13 @@ test_script = """
insert into test2 values(2);
commit;
set plan on;
set list on;
select coalesce((select t2.id from test2 t2 where t2.id = t1.id), 0) id2 from test1 t1 order by t1.id;
"""
act = isql_act('db', test_script)
act = isql_act('db', test_script, substitutions = [ ('[ \t]+',' ') ])
expected_stdout = """
PLAN (T2 INDEX (TEST2_PK))
PLAN (T1 ORDER TEST1_PK)
ID2 1
ID2 2
ID2 0
@ -46,6 +47,6 @@ expected_stdout = """
@pytest.mark.version('>=3')
def test_1(act: Action):
act.expected_stdout = expected_stdout
act.execute()
act.execute(combine_output = True)
assert act.clean_stdout == act.clean_expected_stdout

View File

@ -8,44 +8,76 @@ DESCRIPTION:
JIRA: CORE-2227
FBTEST: bugs.core_2227
NOTES:
[25.1.2022] pcisar
For yet unknown reason, ISQL gets malformed stdin from act.execute() although it was passed
correctly encoded in iso8859_1. Test changed to use script file writen in iso8859_1
which works fine.
[06.10.2022] pzotov
Could not complete adjusting for LINUX in new-qa.
DEFERRED.
[31.10.2024] pzotov
Bug was fixed for too old FB (2.1.2; 2.5 Beta1) so firebird-driver and/or QA-plugin
will not able to run on this version in order to reproduce problem.
Source for this test was taken from ticket almost w/o changes. Only aux view has been added ('v_conn_cset') for
showing current connection protocol and character set - we make query to this view two twice: one for TCP and then
for local protocol.
Checked on 6.0.0.511 (Windows/Linux); 5.0.2.1550; 4.0.6.3165; 3.0.2.32670, 3,0,1,32609
"""
import platform
import pytest
from pathlib import Path
import pytest
from firebird.qa import *
init_script = """
RECREATE TABLE TESTING (
"CÓDIGO" INTEGER
db = db_factory(charset='ISO8859_1')
act = isql_act('db', substitutions = [ ('[ \\t]+', ' '), ('TCPv(4|6)', 'TCP') ])
tmp_sql = temp_file('tmp_core_2227.sql')
@pytest.mark.intl
@pytest.mark.version('>=3.0.0')
def test_1(act: Action, tmp_sql: Path):
test_script = f"""
set bail on;
set list on;
recreate table testing (
"CÓDIGO" integer
);
"""
db = db_factory(charset='ISO8859_1', init=init_script)
test_script = """
SET TERM ^;
CREATE TRIGGER TESTING_I FOR TESTING
ACTIVE BEFORE INSERT POSITION 0
AS
BEGIN
NEW."CÓDIGO" = 1;
END
commit;
set term ^;
create trigger testing_i for testing active before insert position 0 as
begin
new."CÓDIGO" = 1;
end
^
set term ;^
commit;
create view v_conn_cset as
select
rdb$get_context('SYSTEM', 'NETWORK_PROTOCOL') as conn_protocol
,c.rdb$character_set_name as connection_cset
,r.rdb$character_set_name as db_default_cset
from mon$attachments a
join rdb$character_sets c on a.mon$character_set_id = c.rdb$character_set_id
cross join rdb$database r where a.mon$attachment_id=current_connection;
commit;
connect '{act.db.dsn}';
select * from v_conn_cset;
insert into testing default values returning "CÓDIGO";
rollback;
connect '{act.db.db_path}';
select * from v_conn_cset;
insert into testing default values returning "CÓDIGO";
"""
act = isql_act('db', test_script)
tmp_sql.write_text(test_script, encoding='iso8859_1')
act.expected_stdout = """
CONN_PROTOCOL TCPv4
CONNECTION_CSET ISO8859_1
DB_DEFAULT_CSET ISO8859_1
CÓDIGO 1
script_file = temp_file('test_script.sql')
@pytest.mark.skipif(platform.system() != 'Windows', reason='FIXME: see notes')
@pytest.mark.version('>=3')
def test_1(act: Action, script_file: Path):
script_file.write_text(test_script, encoding='iso8859_1')
act.isql(switches=[], input_file=script_file)
CONN_PROTOCOL <null>
CONNECTION_CSET ISO8859_1
DB_DEFAULT_CSET ISO8859_1
CÓDIGO 1
"""
act.isql(switches = ['-q'], input_file = tmp_sql, charset = 'iso8859_1', combine_output = True)
assert act.clean_stdout == act.clean_expected_stdout

View File

@ -45,6 +45,7 @@ expected_stdout = """
Records affected: 1
"""
@pytest.mark.intl
@pytest.mark.version('>=3')
def test_1(act: Action):
non_ascii_query = "select 'gång' as non_ascii_literal from rdb$database"

View File

@ -50,6 +50,7 @@ expected_stdout_b = """
VARC1_ASCII A.
"""
@pytest.mark.intl
@pytest.mark.version('>=3.0')
def test_1(act: Action):
act.script = test_script

File diff suppressed because it is too large Load Diff

View File

@ -22,6 +22,7 @@ db = db_factory(init=init_script)
act = python_act('db')
@pytest.mark.intl
@pytest.mark.version('>=3')
def test_1(act: Action):
with act.db.connect(charset='CP943C') as con:

View File

@ -3,29 +3,31 @@
"""
ID: issue-3296
ISSUE: 3296
TITLE: Exception when upper casing string with lowercase y trema (code 0xFF in ISO8859_1)
TITLE: Exception when upper casing string with 'ÿ' (lowercase y trema, code 0xFF in ISO8859_1)
DESCRIPTION:
Test creates table and fills it with non-ascii characters in init_script, using charset = UTF8.
Then it generates .sql script for running it in separae ISQL process.
This script makes connection to test DB using charset = ISO8859_1 and perform several queries.
Result will be redirected to .log and .err files (they will be encoded, of course, also in ISO8859_1).
Finally, we open .log file (using codecs package), convert its content to UTF8 and show in expected_stdout.
JIRA: CORE-2912
FBTEST: bugs.core_2912
NOTES:
[16.11.2021] pcisar
This test fails as UPPER('ÿ') does not work properly
[16.09.2022] pzotov
Trouble with 'ÿ' raises only on LINUX. All fine on Windows.
Mark for running on Windows was *temporary* added to this test. Problem will be investigated.
[31.10.2024] pzotov
Bug was fixed for too old FB (2.1.6; 2.5.3; 3.0 Alpha 1), firebird-driver and/or QA-plugin
will not able to run on this version in order to reproduce problem.
Checked on Windows: 3.0.8.33535, 4.0.1.2692, 5.0.0.730
Checked on 6.0.0.511 (Windows/Linux); 5.0.2.1551; 4.0.6.3165; 3.0.13.33794
"""
from pathlib import Path
import pytest
from firebird.qa import *
init_script = """
db = db_factory(charset='ISO8859_1')
act = isql_act('db', substitutions=[('[ \\t]+', ' ')])
tmp_sql = temp_file('tmp_core_2912.sql')
@pytest.mark.version('>=3.0.0')
def test_1(act: Action, tmp_sql: Path):
test_script = """
create table test(c varchar(10));
commit;
insert into test(c) values('ÿ');
@ -33,13 +35,6 @@ init_script = """
commit;
create index test_cu on test computed by (upper (c collate iso8859_1));
commit;
"""
db = db_factory(charset='ISO8859_1', init=init_script)
act = python_act('db')
test_script = """set names ISO8859_1;
set list on;
select upper('aÿb') au from rdb$database;
select c, upper(c) cu from test where c starting with upper('ÿ');
@ -55,7 +50,12 @@ test_script = """set names ISO8859_1;
select c, upper(c) cu from test where upper (c collate iso8859_1) starting with upper('Faÿ');
"""
expected_stdout = """
# ::: NB :::
# For proper output of test, input script must be encoded in iso8859_1.
#
tmp_sql.write_text(test_script, encoding = 'iso8859_1')
act.expected_stdout = """
AU AÿB
C ÿ
CU ÿ
@ -74,9 +74,6 @@ expected_stdout = """
CU FAÿ
"""
@pytest.mark.platform('Windows')
@pytest.mark.version('>=3')
def test_1(act: Action):
act.expected_stdout = expected_stdout
act.isql(switches=['-q'], charset='ISO8859_1', input=test_script)
act.isql(switches = ['-q'], input_file = tmp_sql, charset = 'iso8859_1', combine_output = True)
assert act.clean_stdout == act.clean_expected_stdout

View File

@ -41,21 +41,19 @@ test_script = """
join rdb$types rt on
rd.rdb$depended_on_type = rt.rdb$type
and rt.rdb$type_name containing upper('COLLATION')
order by 1;
order by dep_name, dep_on
;
"""
act = isql_act('db', test_script)
expected_stdout = """
DEP_NAME P1
DEP_ON WIN1250
DEP_ON_TYPE COLLATION
DEP_NAME P1
DEP_ON UTF8
DEP_ON_TYPE COLLATION
DEP_NAME P1
DEP_ON WIN1250
DEP_ON_TYPE COLLATION
DEP_NAME P2
DEP_ON WIN1250
DEP_ON_TYPE COLLATION

View File

@ -2267,6 +2267,7 @@ expected_stdout_2 = """
S_WHITE_SPACE 0
"""
@pytest.mark.intl
@pytest.mark.version('>=4.0')
def test_2(act_2: Action):
act_2.expected_stdout = expected_stdout_2

View File

@ -32,6 +32,7 @@ ASCII_VAL
"""
@pytest.mark.intl
@pytest.mark.version('>=3')
def test_1(act: Action):
act.expected_stdout = expected_stdout

View File

@ -203,6 +203,7 @@ expected_stdout = """
TRIMMED_CHAR_LEN 11
"""
@pytest.mark.intl
@pytest.mark.version('>=3.0')
def test_1(act: Action):
act.expected_stdout = expected_stdout

View File

@ -164,6 +164,7 @@ expected_stdout = """
EQUAL 1
"""
@pytest.mark.intl
@pytest.mark.version('>=3.0')
def test_1(act: Action):
act.expected_stdout = expected_stdout

View File

@ -48,6 +48,7 @@ After line 4 in file /tmp/pytest-of-pcisar/pytest-559/test_10/test_script.sql
script_file = temp_file('test_script.sql')
@pytest.mark.intl
@pytest.mark.version('>=3')
def test_1(act: Action, script_file: Path):
script_file.write_text("""

View File

@ -8,16 +8,65 @@ DESCRIPTION:
JIRA: CORE-3489
FBTEST: bugs.core_3489
NOTES:
[06.10.2022] pzotov
Could not complete adjusting for LINUX in new-qa.
DEFERRED.
[30.10.2024] pzotov
Bug was fixed for too old FB (3.0 Alpha 1), firebird-driver and/or QA-plugin
will not able to run on this version in order to reproduce problem.
Source for this test was taken from ticket almost w/o changes. Only aux view has been added ('v_conn_cset') for
showing current connection protocol and character set - we make query to this view two twice: one for TCP and then
for local protocol.
Checked on 6.0.0.511 (Windows/Linux); 5.0.2.1550; 4.0.6.3165; 3.0.2.32670, 3,0,1,32609
"""
import platform
import pytest
import locale
from pathlib import Path
import pytest
from firebird.qa import *
init_script = """
db = db_factory(charset='WIN1251')
act = python_act('db', substitutions=[('MSG_BLOB_ID.*', ''), ('TCPv(4|6)', 'TCP')])
expected_stdout = """
CONN_PROTOCOL TCP
CONNECTION_CSET WIN1251
DB_DEFAULT_CSET WIN1251
Records affected: 1
Это проверка на вывод строки "Йцукёнг"
Это проверка на вывод строки "Йцукёнг"
Records affected: 2
CONN_PROTOCOL <null>
CONNECTION_CSET WIN1251
DB_DEFAULT_CSET WIN1251
Records affected: 1
Это проверка на вывод строки "Йцукёнг"
Это проверка на вывод строки "Йцукёнг"
Records affected: 2
"""
tmp_sql = temp_file('tmp_core_3489.sql')
@pytest.mark.intl
@pytest.mark.version('>=3.0.0')
def test_1(act: Action, tmp_sql: Path):
tmp_sql.write_text(
f"""
set bail on;
set list on;
set blob all;
set count on;
set names win1251;
connect '{act.db.dsn}';
create view v_conn_cset as
select
rdb$get_context('SYSTEM', 'NETWORK_PROTOCOL') as conn_protocol
,c.rdb$character_set_name as connection_cset
,r.rdb$character_set_name as db_default_cset
from mon$attachments a
join rdb$character_sets c on a.mon$character_set_id = c.rdb$character_set_id
cross join rdb$database r where a.mon$attachment_id=current_connection;
set term ^;
create or alter procedure sp_test
returns (
@ -30,37 +79,25 @@ init_script = """
^
set term ;^
commit;
"""
db = db_factory(charset='WIN1251', init=init_script)
act = python_act('db', substitutions=[('MSG_BLOB_ID.*', '')])
expected_stdout = """
Это проверка на вывод строки "Йцукёнг"
Это проверка на вывод строки "Йцукёнг"
Records affected: 2
"""
script_file = temp_file('test_script.sql')
@pytest.mark.skipif(platform.system() != 'Windows', reason='FIXME: see notes')
@pytest.mark.version('>=3')
def test_1(act: Action, script_file: Path):
script_file.write_text("""
set list on;
set blob all;
set count on;
set list on;
--------------------------
connect '{act.db.dsn}'; -- check TCP protocol
select * from v_conn_cset;
select msg_blob_id
from sp_test
union
select msg_blob_id
from sp_test;
""", encoding='cp1251')
commit;
--------------------------
connect '{act.db.db_path}'; -- check local protocol
select * from v_conn_cset;
select msg_blob_id
from sp_test
union
select msg_blob_id
from sp_test;
"""
,encoding='cp1251')
act.expected_stdout = expected_stdout
act.isql(switches=[], input_file=script_file, charset='WIN1251')
act.isql(switches = ['-q'], input_file = tmp_sql, charset = 'WIN1251', combine_output = True, connect_db = False)
assert act.clean_stdout == act.clean_expected_stdout

View File

@ -37,6 +37,7 @@ tmp_role2 = role_factory('db', name = '"Groß"')
act = python_act('db')
@pytest.mark.intl
@pytest.mark.version('>=3.0')
def test_1(act: Action, tmp_user: User, tmp_role1: Role, tmp_role2: Role, capsys):
init_script = f"""

View File

@ -83,6 +83,7 @@ expected_stderr = """
- Russian: Новый остаток будет меньше нуля (-6)
"""
@pytest.mark.intl
@pytest.mark.version('>=3.0')
def test_1(act: Action):
act.expected_stderr = expected_stderr

View File

@ -7,6 +7,9 @@ TITLE: Non-ASCII data in SEC$USERS is not read correctly
DESCRIPTION:
JIRA: CORE-4301
FBTEST: bugs.core_4301
NOTES:
[04.09.2024] pzotov
Added 'using plugin Srp' into 'CREATE USER' statements, in order to check 'COMMENT ON USER' with non-ascii text.
"""
import pytest
@ -20,31 +23,46 @@ user_a = user_factory('db', name='u30a', password='u30a', do_not_create=True)
user_b = user_factory('db', name='u30b', password='u30b', do_not_create=True)
test_script = """
-- Note: this test differs from ticket: instead of add COMMENTS to users
-- it only defines their `firstname` attribute, because sec$users.sec$description
-- can be displayed only when plugin UserManager = Srp.
-- Field `firstname` is defined as:
-- VARCHAR(32) CHARACTER SET UNICODE_FSS COLLATE UNICODE_FSS
-- we can put in it max 16 non-ascii characters
create or alter user u30a password 'u30a' firstname 'Полиграф Шариков';
create or alter user u30b password 'u30b' firstname 'Léopold Frédéric';
create or alter user u30a password 'u30a' firstname 'Полиграф Шариков' using plugin Srp;
create or alter user u30b password 'u30b' firstname 'Léopold Frédéric' using plugin Srp;
commit;
comment on user u30a is 'это кто-то из наших';
comment on user u30b is 'é alguém do Brasil';
commit;
/*
show domain rdb$user;
show domain SEC$NAME_PART;
show table sec$users;
*/
set list on;
select u.sec$user_name, u.sec$first_name
select
-- 3.x: CHAR(31) CHARACTER SET UNICODE_FSS Nullable
-- 4.x, 5.x: (RDB$USER) CHAR(63) Nullable
-- FB 6.x: (RDB$USER) CHAR(63) CHARACTER SET UTF8 Nullable
u.sec$user_name
,u.sec$first_name -- (SEC$NAME_PART) VARCHAR(32) Nullable
,u.sec$description as descr_blob_id -- (RDB$DESCRIPTION) BLOB segment 80, subtype TEXT Nullable
from sec$users u
where upper(u.sec$user_name) in (upper('u30a'), upper('u30b'));
commit;
"""
act = isql_act('db', test_script)
act = isql_act('db', test_script, substitutions = [ ('DESCR_BLOB_ID.*',''),('[ \t]+',' ') ] )
expected_stdout = """
SEC$USER_NAME U30A
SEC$FIRST_NAME Полиграф Шариков
это кто-то из наших
SEC$USER_NAME U30B
SEC$FIRST_NAME Léopold Frédéric
é alguém do Brasil
"""
@pytest.mark.intl
@pytest.mark.version('>=3.0')
def test_1(act: Action, user_a: User, user_b: User):
act.expected_stdout = expected_stdout

View File

@ -101,7 +101,7 @@ def test_1(act: Action):
"""
# Case 1: Trace functions enabled
with act.trace(db_events=trace):
act.isql(switches=['-n', '-q'], input=func_script % (123, 456))
act.isql(switches=['-n', '-q'], input=func_script % (123, 456), combine_output = True)
#
for line in act.trace_log:
if (func_start_ptn.search(line)
@ -112,7 +112,7 @@ def test_1(act: Action):
# Case 2: Trace functions disabled
act.trace_log.clear()
with act.trace(db_events=trace[:-2]):
act.isql(switches=['-n', '-q'], input=func_script % (789, 987))
act.isql(switches=['-n', '-q'], input=func_script % (789, 987), combine_output = True)
#
for line in act.trace_log:
if (func_start_ptn.search(line)

View File

@ -21,8 +21,20 @@ NOTES:
See 'tx2.commit()' in the code. If we replace it with 'con2.commit()' then Tx2 will be
*silently* rolled back (!!despite that we issued con.commit() !!) and we will not get any
error messages. I'm not sure whether this correct or no.
Checked on 3.0.8.33535 (SS/CS), 4.0.1.2692 (SS/CS), 5.0.0.730
[22.08.2024] pzotov
* Changed DDL because of SubQueryConversion config parameter appearance.
We have to AVOID usage of queries which have plan that can be changed when firebird.conf has
SubQueryConversion = true. In that case some index can be excluded from plan and thus
it can be dropped on first iteration of 'for x_isol in tx_isol_lst' loop. This causes unexpected
error 'index not found' for subsequent checks.
* Added check for error message when we try to drop standalone function.
* Assert moved out to the point after loop in order to show whole result in case of some error
(rather than only one message block for some particular x_isol).
* Excluded check of FB 3.x (this version no more changed).
Checked on 6.0.0.442, 5.0.2.1479, 4.0.6.3142
"""
import pytest
@ -46,17 +58,24 @@ ddl_script = """
create index test1_id on test1(id);
commit;
create descending index test2_id_x_desc on test2(id,x);
create descending index test2_x on test2(x);
commit;
create or alter view v_test as select id,x from test1 where id between 15 and 30;
commit;
set term ^;
create or alter function fn_worker(a_x int) returns int as
declare v_id int;
begin
execute statement ('select max(b.id) from test2 b where b.x >= ?') (:a_x) into v_id;
return v_id;
end
^
create or alter procedure sp_worker(a_id int) returns(x int) as
begin
for
execute statement ('select v.x from v_test v where v.id = ? and exists(select * from test2 b where b.id = v.id)') (:a_id)
execute statement ('select v.x from v_test v where v.id = ? and v.id >= fn_worker(v.x)') (:a_id)
into x
do
suspend;
@ -84,48 +103,18 @@ ddl_script = """
commit;
"""
expected_stdout = """
lock conflict on no wait transaction
-unsuccessful metadata update
-object PROCEDURE "SP_TEST" is in use
(335544345, 335544351, 335544453)
lock conflict on no wait transaction
-unsuccessful metadata update
-object PROCEDURE "SP_WORKER" is in use
(335544345, 335544351, 335544453)
lock conflict on no wait transaction
-unsuccessful metadata update
-object VIEW "V_TEST" is in use
(335544345, 335544351, 335544453)
lock conflict on no wait transaction
-unsuccessful metadata update
-object TABLE "TEST2" is in use
(335544345, 335544351, 335544453)
lock conflict on no wait transaction
-unsuccessful metadata update
-object INDEX "TEST1_ID" is in use
(335544345, 335544351, 335544453)
lock conflict on no wait transaction
-unsuccessful metadata update
-object INDEX "TEST2_ID_X_DESC" is in use
(335544345, 335544351, 335544453)
"""
@pytest.mark.version('>=3.0.6')
@pytest.mark.version('>=4.0')
def test_1(act: Action, capsys):
act.isql(switches=[], input=ddl_script)
drop_commands = [ 'drop procedure sp_test',
'drop procedure sp_worker',
'drop function fn_worker',
'drop view v_test',
'drop table test2',
'drop index test1_id',
'drop index test2_id_x_desc']
'drop index test2_x'
]
tx_isol_lst = [ Isolation.READ_COMMITTED_NO_RECORD_VERSION,
Isolation.READ_COMMITTED_RECORD_VERSION,
@ -147,6 +136,7 @@ def test_1(act: Action, capsys):
for cmd in drop_commands:
with act.db.connect() as con2:
custom_tpb = tpb(isolation = x_isol, lock_timeout=0)
print(x_isol.name, cmd)
tx2 = con2.transaction_manager(custom_tpb)
tx2.begin()
cur2 = tx2.cursor()
@ -164,7 +154,218 @@ def test_1(act: Action, capsys):
print(e.__str__())
print(e.gds_codes)
act.expected_stdout = expected_stdout
act.expected_stdout = f"""
READ_COMMITTED_NO_RECORD_VERSION drop procedure sp_test
lock conflict on no wait transaction
-unsuccessful metadata update
-object PROCEDURE "SP_TEST" is in use
(335544345, 335544351, 335544453)
READ_COMMITTED_NO_RECORD_VERSION drop procedure sp_worker
lock conflict on no wait transaction
-unsuccessful metadata update
-object PROCEDURE "SP_WORKER" is in use
(335544345, 335544351, 335544453)
READ_COMMITTED_NO_RECORD_VERSION drop function fn_worker
lock conflict on no wait transaction
-unsuccessful metadata update
-object FUNCTION "FN_WORKER" is in use
(335544345, 335544351, 335544453)
READ_COMMITTED_NO_RECORD_VERSION drop view v_test
lock conflict on no wait transaction
-unsuccessful metadata update
-object VIEW "V_TEST" is in use
(335544345, 335544351, 335544453)
READ_COMMITTED_NO_RECORD_VERSION drop table test2
lock conflict on no wait transaction
-unsuccessful metadata update
-object TABLE "TEST2" is in use
(335544345, 335544351, 335544453)
READ_COMMITTED_NO_RECORD_VERSION drop index test1_id
lock conflict on no wait transaction
-unsuccessful metadata update
-object INDEX "TEST1_ID" is in use
(335544345, 335544351, 335544453)
READ_COMMITTED_NO_RECORD_VERSION drop index test2_x
lock conflict on no wait transaction
-unsuccessful metadata update
-object INDEX "TEST2_X" is in use
(335544345, 335544351, 335544453)
READ_COMMITTED_RECORD_VERSION drop procedure sp_test
lock conflict on no wait transaction
-unsuccessful metadata update
-object PROCEDURE "SP_TEST" is in use
(335544345, 335544351, 335544453)
READ_COMMITTED_RECORD_VERSION drop procedure sp_worker
lock conflict on no wait transaction
-unsuccessful metadata update
-object PROCEDURE "SP_WORKER" is in use
(335544345, 335544351, 335544453)
READ_COMMITTED_RECORD_VERSION drop function fn_worker
lock conflict on no wait transaction
-unsuccessful metadata update
-object FUNCTION "FN_WORKER" is in use
(335544345, 335544351, 335544453)
READ_COMMITTED_RECORD_VERSION drop view v_test
lock conflict on no wait transaction
-unsuccessful metadata update
-object VIEW "V_TEST" is in use
(335544345, 335544351, 335544453)
READ_COMMITTED_RECORD_VERSION drop table test2
lock conflict on no wait transaction
-unsuccessful metadata update
-object TABLE "TEST2" is in use
(335544345, 335544351, 335544453)
READ_COMMITTED_RECORD_VERSION drop index test1_id
lock conflict on no wait transaction
-unsuccessful metadata update
-object INDEX "TEST1_ID" is in use
(335544345, 335544351, 335544453)
READ_COMMITTED_RECORD_VERSION drop index test2_x
lock conflict on no wait transaction
-unsuccessful metadata update
-object INDEX "TEST2_X" is in use
(335544345, 335544351, 335544453)
SNAPSHOT drop procedure sp_test
lock conflict on no wait transaction
-unsuccessful metadata update
-object PROCEDURE "SP_TEST" is in use
(335544345, 335544351, 335544453)
SNAPSHOT drop procedure sp_worker
lock conflict on no wait transaction
-unsuccessful metadata update
-object PROCEDURE "SP_WORKER" is in use
(335544345, 335544351, 335544453)
SNAPSHOT drop function fn_worker
lock conflict on no wait transaction
-unsuccessful metadata update
-object FUNCTION "FN_WORKER" is in use
(335544345, 335544351, 335544453)
SNAPSHOT drop view v_test
lock conflict on no wait transaction
-unsuccessful metadata update
-object VIEW "V_TEST" is in use
(335544345, 335544351, 335544453)
SNAPSHOT drop table test2
lock conflict on no wait transaction
-unsuccessful metadata update
-object TABLE "TEST2" is in use
(335544345, 335544351, 335544453)
SNAPSHOT drop index test1_id
lock conflict on no wait transaction
-unsuccessful metadata update
-object INDEX "TEST1_ID" is in use
(335544345, 335544351, 335544453)
SNAPSHOT drop index test2_x
lock conflict on no wait transaction
-unsuccessful metadata update
-object INDEX "TEST2_X" is in use
(335544345, 335544351, 335544453)
SERIALIZABLE drop procedure sp_test
lock conflict on no wait transaction
-unsuccessful metadata update
-object PROCEDURE "SP_TEST" is in use
(335544345, 335544351, 335544453)
SERIALIZABLE drop procedure sp_worker
lock conflict on no wait transaction
-unsuccessful metadata update
-object PROCEDURE "SP_WORKER" is in use
(335544345, 335544351, 335544453)
SERIALIZABLE drop function fn_worker
lock conflict on no wait transaction
-unsuccessful metadata update
-object FUNCTION "FN_WORKER" is in use
(335544345, 335544351, 335544453)
SERIALIZABLE drop view v_test
lock conflict on no wait transaction
-unsuccessful metadata update
-object VIEW "V_TEST" is in use
(335544345, 335544351, 335544453)
SERIALIZABLE drop table test2
lock conflict on no wait transaction
-unsuccessful metadata update
-object TABLE "TEST2" is in use
(335544345, 335544351, 335544453)
SERIALIZABLE drop index test1_id
lock conflict on no wait transaction
-unsuccessful metadata update
-object INDEX "TEST1_ID" is in use
(335544345, 335544351, 335544453)
SERIALIZABLE drop index test2_x
lock conflict on no wait transaction
-unsuccessful metadata update
-object INDEX "TEST2_X" is in use
(335544345, 335544351, 335544453)
READ_COMMITTED_READ_CONSISTENCY drop procedure sp_test
lock conflict on no wait transaction
-unsuccessful metadata update
-object PROCEDURE "SP_TEST" is in use
(335544345, 335544351, 335544453)
READ_COMMITTED_READ_CONSISTENCY drop procedure sp_worker
lock conflict on no wait transaction
-unsuccessful metadata update
-object PROCEDURE "SP_WORKER" is in use
(335544345, 335544351, 335544453)
READ_COMMITTED_READ_CONSISTENCY drop function fn_worker
lock conflict on no wait transaction
-unsuccessful metadata update
-object FUNCTION "FN_WORKER" is in use
(335544345, 335544351, 335544453)
READ_COMMITTED_READ_CONSISTENCY drop view v_test
lock conflict on no wait transaction
-unsuccessful metadata update
-object VIEW "V_TEST" is in use
(335544345, 335544351, 335544453)
READ_COMMITTED_READ_CONSISTENCY drop table test2
lock conflict on no wait transaction
-unsuccessful metadata update
-object TABLE "TEST2" is in use
(335544345, 335544351, 335544453)
READ_COMMITTED_READ_CONSISTENCY drop index test1_id
lock conflict on no wait transaction
-unsuccessful metadata update
-object INDEX "TEST1_ID" is in use
(335544345, 335544351, 335544453)
READ_COMMITTED_READ_CONSISTENCY drop index test2_x
lock conflict on no wait transaction
-unsuccessful metadata update
-object INDEX "TEST2_X" is in use
(335544345, 335544351, 335544453)
"""
act.stdout = capsys.readouterr().out
assert act.clean_stdout == act.clean_expected_stdout
act.reset()

View File

@ -835,6 +835,7 @@ expected_stdout = """
OVERLAY_UTF8_TO_ASCII_LO 1
"""
@pytest.mark.intl
@pytest.mark.version('>=3')
def test_1(act: Action):
act.expected_stdout = expected_stdout

View File

@ -63,6 +63,7 @@ expected_stdout = """
Records affected: 0
"""
@pytest.mark.intl
@pytest.mark.version('>=4.0')
def test_1(act: Action):
act.expected_stdout = expected_stdout

View File

@ -167,6 +167,7 @@ expected_stdout = """
privilege:exec : YES
"""
@pytest.mark.intl
@pytest.mark.version('>=4.0')
def test_1(act: Action, non_acii_user: User, test_role: Role, capsys):
act.isql(switches=['-b', '-q'], input=ddl_script)

View File

@ -39,6 +39,7 @@ expected_stdout=f"""
WHOAMI : {NON_ASCII_NAME}
"""
@pytest.mark.intl
@pytest.mark.version('>=4.0')
def test_1(act: Action, non_ascii_user: User, capsys):
with act.db.connect(user=non_ascii_user.name, password=non_ascii_user.password) as con:

View File

@ -34,6 +34,7 @@ expected_stdout = """
C_LEN_UTF8_MIXED 16383
"""
@pytest.mark.intl
@pytest.mark.version('>=3.0')
def test_1(act: Action):
script_file = Path(act.files_dir / 'core_4881.zip', at='core_4881_script.sql')

View File

@ -7,6 +7,17 @@ TITLE: Indices on computed fields are broken after restore (all keys are N
DESCRIPTION:
JIRA: CORE-5118
FBTEST: bugs.core_5118
NOTES:
[12.09.2024] pzotov
Replaced test query so that it does not use index navigation ('plan order') but still checks indexed access.
Three separate queries with 'PLAN ... INDEX' are used instead of one with 'where <comp_field> IN <literals_list>'.
This is because of optimizer changed in 5.x and issues plan with only *one* occurrence of 'INDEX' for such cases.
See: https://github.com/FirebirdSQL/firebird/pull/7707 - "Better processing and optimization if IN <list>".
Commit: https://github.com/FirebirdSQL/firebird/commit/0493422c9f729e27be0112ab60f77e753fabcb5b, 04-sep-2023.
Requested by dimitr, letters with subj 'core_5118_test', since 11.09.2024 17:26.
Checked on 6.0.0.452, 5.0.2.1493, 4.0.5.3136, 3.0.13.33789.
"""
import pytest
@ -34,27 +45,29 @@ init_script = """
db = db_factory(init = init_script)
act = python_act('db')
act = python_act('db', substitutions = [ ('[ \t]+',' ') ])
expected_stdout = """
PLAN (TEST ORDER TEST_CONCAT_TEXT)
ID 1
X nom1
Y prenom1
PLAN (TEST INDEX (TEST_CONCAT_TEXT))
CONCAT_TEXT nom1 prenom1
Records affected: 1
ID 2
X nom2
Y prenom2
PLAN (TEST INDEX (TEST_CONCAT_TEXT))
CONCAT_TEXT nom2 prenom2
Records affected: 1
ID 3
X nom3
Y prenom3
PLAN (TEST INDEX (TEST_CONCAT_TEXT))
CONCAT_TEXT nom3 prenom3
Records affected: 1
"""
Records affected: 3
test_sql = """
set list on;
set plan on;
set count on;
select concat_text from test where concat_text = 'nom1 prenom1';
select concat_text from test where concat_text = 'nom2 prenom2';
select concat_text from test where concat_text = 'nom3 prenom3';
"""
@pytest.mark.version('>=3.0')
@ -63,9 +76,7 @@ def test_1(act: Action):
backup = BytesIO()
srv.database.local_backup(database = act.db.db_path, backup_stream = backup)
backup.seek(0)
srv.database.local_restore(database=act.db.db_path, backup_stream=backup,
flags=SrvRestoreFlag.REPLACE)
srv.database.local_restore(database = act.db.db_path, backup_stream=backup, flags = SrvRestoreFlag.REPLACE)
act.expected_stdout = expected_stdout
act.isql(switches=['-q'],
input='set list on; set plan on; set count on; select * from test order by concat_text;')
act.isql(switches=['-q'], input = test_sql, combine_output = True)
assert act.clean_stdout == act.clean_expected_stdout

View File

@ -62,6 +62,7 @@ remove_metadata = """
r.rdb$system_flag is distinct from 1;
"""
@pytest.mark.intl
@pytest.mark.version('>=3.0')
def test_1(act: Action):
#

View File

@ -20,6 +20,18 @@ NOTES:
External Data Source provider 'inet6://[' not found
========
It was fixed in gh-8156.
3. On Windows there is no way to make IPv6 'fully disabled': address '::1' remains active.
According to https://learn.microsoft.com/en-us/troubleshoot/windows-server/networking/configure-ipv6-in-windows
"You cannot completely disable IPv6 as IPv6 is used internally on the system for many TCPIP tasks.
For example, you will still be able to run ping ::1 after configuring this setting"
We can turn off listening of '::1' by FB server if do following:
* run PowerShell and type there: Enable-NetAdapterBinding -Name "*" -ComponentID ms_tcpip6
* chcp 65001, then: ipconfig /all | findstr /i /r /c:" IPv6.*(preferred)"
* save somewhere IPv6 address from previous command (e.g. 'fe80::f53c:9ecf:aad:4761%14')
* change in firebird.conf: RemoteBindAddress = fe80::f53c:9ecf:aad:4761
But this requires RESTART of FB server thus cannot be used in QA.
Discussed with Vlad 13-jun-2024.
[14.06.2024] pzotov
Checked "on external 'inet6://[::1]/{act.db.db_path}'" after fixed GH-8156, builds:
3.0.12.33757, 4.0.5.3112, 5.0.1.1416, 6.0.0.374

View File

@ -2,7 +2,7 @@
"""
ID: issue-5510
ISSUE: 5510
ISSUE: https://github.com/FirebirdSQL/firebird/issues/5510
TITLE: EXECUTE STATEMENT: BLR error if more than 256 output parameters exist
DESCRIPTION:
We define here number of output args for which one need to made test - see var 'sp_args_count'.
@ -71,5 +71,5 @@ def build_script(ddl_script: Path):
@pytest.mark.version('>=3.0')
def test_1(act: Action, ddl_script: Path):
build_script(ddl_script)
act.isql(switches=[], input_file=ddl_script, charset='NONE')
act.isql(switches=[], input_file=ddl_script, charset='NONE', combine_output = True)
assert act.clean_stdout == act.clean_expected_stdout

File diff suppressed because it is too large Load Diff

View File

@ -45,6 +45,7 @@ test_script = """
script_file = temp_file('test-script.sql')
@pytest.mark.intl
@pytest.mark.version('>=4.0')
def test_1(act: Action, script_file: Path):
script_file.write_text(test_script, encoding='cp1251')

View File

@ -5,66 +5,107 @@ ID: issue-5734
ISSUE: 5734
TITLE: AV in fbclient when reading blob stored in incompatible encoding
DESCRIPTION:
Domain description contains non-ascii text in Latvian
and is created using charset = win1257.
Subsequent connect which tries to get this description uses cp1253 (Greek).
Commit that fixed ticket: 0fab1a85597baa5054a34cae437f5da6096580b0 (20.01.2017 00:43)
JIRA: CORE-5464
FBTEST: bugs.core_5464
NOTES:
[06.10.2022] pzotov
Could not complete adjusting for LINUX in new-qa.
DEFERRED.
[30.10.2024] pzotov
Crash *not* occurs but one may note different behaviour of snapshots before and after fix.
Snapshot before fix (e.g. 90a46fa3, 06-jan-2017) for query to rdb$fields (see view v_domain_descr)
behave differently depending on connection protocol:
* for TCP is does not return any record for query to view 'v_conn_cset';
* for LOCAL protocol its returns weird 'RDB$SYSTEM_FLAG 18775' and error 'SQLSTATE = 42000 / invalid BLOB ID'.
Also, error message for query to view 'v_domain_descr' (before fix) was:
Statement failed, SQLSTATE = HY000
Cannot transliterate character between character sets
request synchronization error
Discussed with Vlad, letters date: 29-oct-2024.
Checked on 6.0.0.511 (Windows/Linux); 5.0.2.1550; 4.0.6.3165; 3.0.13.33793; 3.0.2.32670-0fab1a8.
"""
import platform
import locale
from pathlib import Path
import pytest
from firebird.qa import *
init_script = """
create domain d_int int;
comment on domain d_int is
'*Лев Николаевич Толстой * *Анна Каренина * /Мне отмщение, и аз воздам/ *ЧАСТЬ ПЕРВАЯ* *I *
Все счастливые семьи похожи друг на друга, каждая несчастливая
семья несчастлива по-своему.
Все смешалось в доме Облонских. Жена узнала, что муж был в связи
с бывшею в их доме француженкою-гувернанткой, и объявила мужу, что
не может жить с ним в одном доме. Положение это продолжалось уже
третий день и мучительно чувствовалось и самими супругами, и всеми
членами семьи, и домочадцами. Все члены семьи и домочадцы
чувствовали, что нет смысла в их сожительстве и что на каждом
п1
';
commit;
db = db_factory(charset='win1257')
act = isql_act('db', substitutions = [('TCPv(4|6)', 'TCP')])
tmp_sql = temp_file('tmp_core_5464.sql')
@pytest.mark.intl
@pytest.mark.version('>=3.0.1')
def test_1(act: Action, tmp_sql: Path, capsys):
non_ascii_txt = """
Oblonsku mājā viss bija sajaukts.
Sieva uzzināja, ka viņas vīram ir attiecības ar franču guvernanti,
kas atradās viņu mājā, un paziņoja vīram, ka nevar dzīvot ar viņu vienā mājā.
"""
db_1 = db_factory(charset='WIN1251', init=init_script)
test_script = """
set blob all;
set list on;
select c.rdb$character_set_name as connection_cset, r.rdb$character_set_name as db_default_cset
init_script = f"""
create domain dm_test int;
comment on domain dm_test is '{non_ascii_txt}';
commit;
create view v_conn_cset as
select
rdb$get_context('SYSTEM', 'NETWORK_PROTOCOL') as conn_protocol
,c.rdb$character_set_name as connection_cset
,r.rdb$character_set_name as db_default_cset
from mon$attachments a
join rdb$character_sets c on a.mon$character_set_id = c.rdb$character_set_id
cross join rdb$database r where a.mon$attachment_id=current_connection;
select rdb$field_name, rdb$system_flag, rdb$description
from rdb$fields where rdb$description is not null;
create view v_domain_descr as
select f.rdb$field_name, f.rdb$system_flag, f.rdb$description
from rdb$database d
left join rdb$fields f on f.rdb$description is not null;
commit;
"""
tmp_sql.write_bytes(init_script.encode('cp1257'))
act.isql(switches=['-q'], input_file = tmp_sql, charset='win1257', combine_output = True, io_enc = locale.getpreferredencoding())
assert act.return_code == 0
act = isql_act('db_1', test_script)
test_sql = f"""
set blob all;
set list on;
set count on;
connect '{act.db.dsn}';
select v1.* from v_conn_cset as v1;
select v2.* from v_domain_descr as v2;
commit;
expected_stdout = """
CONNECTION_CSET WIN1250
DB_DEFAULT_CSET WIN1251
connect '{act.db.db_path}';
select v3.* from v_conn_cset as v3;
select v4.* from v_domain_descr as v4;
commit;
"""
act.isql(switches=['-q'], connect_db = False, input = test_sql, charset='win1253', combine_output = True, io_enc = locale.getpreferredencoding())
act.expected_stdout = """
CONN_PROTOCOL TCP
CONNECTION_CSET WIN1253
DB_DEFAULT_CSET WIN1257
Records affected: 1
expected_stderr = """
Statement failed, SQLSTATE = 22018
Cannot transliterate character between character sets
"""
Records affected: 0
@pytest.mark.skipif(platform.system() != 'Windows', reason='FIXME: see notes')
@pytest.mark.version('>=3.0.2')
def test_1(act: Action):
act.expected_stderr = expected_stderr
act.expected_stdout = expected_stdout
act.isql(switches=['-q'], input=test_script, charset='WIN1250')
assert (act.clean_stderr == act.clean_expected_stderr and
act.clean_stdout == act.clean_expected_stdout)
CONN_PROTOCOL <null>
CONNECTION_CSET WIN1253
DB_DEFAULT_CSET WIN1257
Records affected: 1
Statement failed, SQLSTATE = 22018
Cannot transliterate character between character sets
Records affected: 0
"""
assert act.clean_stdout == act.clean_expected_stdout

View File

@ -77,6 +77,7 @@ expected_stderr_isql = """
expected_stdout_trace = test_sql.replace('set list on;', '').replace(';','')
@pytest.mark.intl
@pytest.mark.trace
@pytest.mark.version('>=3.0.6')
@pytest.mark.platform('Windows')

View File

@ -56,6 +56,7 @@ expected_stdout = """
N06 ||
"""
@pytest.mark.intl
@pytest.mark.version('>=4.0')
def test_1(act: Action):
act.expected_stdout = expected_stdout

View File

@ -44,6 +44,7 @@ expected_stdout = """
RES 1
"""
@pytest.mark.intl
@pytest.mark.version('>=3.0.3')
def test_1(act: Action):
act.expected_stdout = expected_stdout

View File

@ -32,6 +32,7 @@ expected_stdout = """
test_script = temp_file('test_script.sql')
@pytest.mark.intl
@pytest.mark.version('>=3.0.4')
def test_1(act: Action, test_script: Path):
if act.is_version('<4'):

View File

@ -76,6 +76,7 @@ test_script = """
rollback;
"""
@pytest.mark.intl
@pytest.mark.version('>=4.0')
def test_1(act: Action):
act.expected_stdout = expected_stdout

View File

@ -36,6 +36,7 @@ ddl_script = """
select rdb$role_name as r_name from rdb$roles where rdb$system_flag is distinct from 1;
"""
@pytest.mark.intl
@pytest.mark.version('>=4.0')
def test_1(act: Action):
act.isql(switches=[], input=ddl_script)

View File

@ -2,11 +2,14 @@
"""
ID: issue-6483
ISSUE: 6483
ISSUE: https://github.com/FirebirdSQL/firebird/issues/6483
TITLE: Procedures and EXECUTE BLOCK without RETURNS should not be allowed to use SUSPEND
DESCRIPTION:
JIRA: CORE-6239
FBTEST: bugs.core_6239
NOTES:
Fix was done by commit https://github.com/FirebirdSQL/firebird/commit/b2b5f9a87cea26a9f12fa231804dba9d0426d3fa
(can be checked by 4.0.0.1763+, date of build since 05-feb-2020).
"""
import pytest

View File

@ -3,8 +3,7 @@
"""
ID: issue-6652
ISSUE: 6652
TITLE: Error message "expected length N, actual M" contains wrong value of M when
charset UTF8 is used in the field declaration of a table
TITLE: Error message "expected length N, actual M" contains wrong value of M when charset UTF8 is used in the field declaration of a table
DESCRIPTION:
All attempts to create/alter table with not-null column with size that not enough space to fit default value must fail.
Length of such column can be declared either directly or via domain - and both of these ways must fail.
@ -133,6 +132,7 @@ expected_stderr = """
-expected length 1, actual 8
"""
@pytest.mark.intl
@pytest.mark.version('>=4.0')
def test_1(act: Action):
act.expected_stderr = expected_stderr

View File

@ -67,6 +67,7 @@ expected_stdout = """
Records affected: 1
"""
@pytest.mark.intl
@pytest.mark.version('>=4.0')
def test_1(act: Action):
act.expected_stdout = expected_stdout

View File

@ -3,8 +3,7 @@
"""
ID: issue-6717
ISSUE: 6717
TITLE: FETCH ABSOLUTE and RELATIVE beyond bounds of cursor should always position
immediately before-first or after-last
TITLE: FETCH ABSOLUTE and RELATIVE beyond bounds of cursor should always position immediately before-first or after-last
DESCRIPTION:
JIRA: CORE-6487
FBTEST: bugs.core_6487

100
tests/bugs/gh_0731_test.py Normal file
View File

@ -0,0 +1,100 @@
#coding:utf-8
"""
ID: issue-731
ISSUE: https://github.com/FirebirdSQL/firebird/issues/731
TITLE: coalesce fails with data type varchar and a non ascii value [CORE388]
DESCRIPTION:
NOTES:
[04.09.2024] pzotov
The issue seems to be fixed long ago.
Old FB versions can not be checked on current firebird QA.
ISQL must work with charset = utf8. Otherwise 'Expected end of statement, encountered EOF' will raise on Linux.
Checked on all recent 3.x ... 6.x -- all fine.
"""
import pytest
from firebird.qa import *
init_sql = """
recreate table trans_table
(
tcode smallint
not null,
code smallint
not null,
name varchar(10),
constraint trans_table_primarykey primary key
(tcode, code)
);
recreate table class1
(
class_name varchar(10)
not null,
class_num smallint
not null,
teacher_id integer,
constraint pk_class1 primary key (class_name, class_num)
);
recreate table class2
(
class_name varchar(10)
not null,
class_num smallint
not null,
teacher_id integer,
constraint pk_class2 primary key (class_name, class_num)
);
set term ^;
create trigger class1_bi for class1 active before insert position 0 as
declare name varchar(10);
begin
select name from trans_table c where c.tcode=2 and c.code=new.class_name
into :name;
new.class_name = case when :name is null then new.class_name else :name end;
-- new.class_name = coalesce(:name, new.class_name);
end
^
create trigger class2_bi for class2 active before insert position 0 as
declare name varchar(10);
begin
select name from trans_table c where c.tcode=2 and c.code=new.class_name
into :name;
-- new.class_name = case when :name is null then new.class_name else :name end;
new.class_name = coalesce(:name, new.class_name);
end
^
set term ;^
commit;
"""
db = db_factory(init = init_sql, charset='win1252')
test_script = """
set bail on;
set list on;
insert into trans_table(tcode, code, name) values (2, 1, 'à');
-- passed
insert into class1(class_name, class_num, teacher_id) values (1, 1, null);
-- failed
insert into class2(class_name, class_num, teacher_id) values (1, 1, null);
select 'Passed' as msg from rdb$database;
"""
act = isql_act('db', test_script, substitutions = [ ('[ \t]+',' ') ])
expected_stdout = """
MSG Passed
"""
@pytest.mark.intl
@pytest.mark.version('>=3.0')
def test_1(act: Action):
act.expected_stdout = expected_stdout
act.execute(charset = 'utf8')
assert act.clean_stdout == act.clean_expected_stdout

View File

@ -0,0 +1,57 @@
#coding:utf-8
"""
ID: issue-2292
ISSUE: https://github.com/FirebirdSQL/firebird/issues/2292
TITLE: Isql's extracted script is unusable with interdependent selectable procedures in FB 2.1 [CORE1862]
DESCRIPTION:
Test creates SP which has output parameter *and* SUSPEND clause.
Then we extract metadata and check whether this procedure header contains 'SUSPEND'.
On FB 2.0.7.13318 extracted metadata contains 'EXIT':
CREATE PROCEDURE SP_TEST RETURNS (O INTEGER)
AS
BEGIN EXIT; END ^
Although such code can be compiled, this SP could be called (and returned empty resultset) only in 2.0.7 and before.
Since 2.1 attempt to call such SP will raise:
Statement failed, SQLSTATE = 42000
...
-invalid request BLR at offset ...
-Procedure SP_TEST is not selectable (it does not contain a SUSPEND statement)
"""
import re
import pytest
from firebird.qa import *
init_sql = """
set term ^ ;
create or alter procedure sp_test returns(out_value int) as
begin
out_value = 1;
suspend;
end
^
set term ;^
commit;
"""
db = db_factory(init = init_sql)
act = python_act('db')
@pytest.mark.version('>=3.0.0')
def test_1(act: Action, capsys):
meta_sql = act.extract_meta()
EXPECTED_MSG = 'OK'
p = re.compile(r'SP_TEST\s+RETURNS \(OUT_VALUE INTEGER\)\s+AS\s+BEGIN\s+SUSPEND;\s+END\s?\^', re.IGNORECASE)
if p.search(meta_sql):
print(EXPECTED_MSG)
else:
print(f'Could not find pattern "{p.pattern}" in extracted metadata.')
print(meta_sql)
act.expected_stdout = EXPECTED_MSG
act.stdout = capsys.readouterr().out
assert act.clean_stdout == act.clean_expected_stdout

View File

@ -6,11 +6,16 @@ ISSUE: https://github.com/FirebirdSQL/firebird/issues/3357
TITLE: Bad execution plan if some stream depends on multiple streams via a function [CORE2975]
NOTES:
[04.03.2023] pzotov
Discussed with dimitr, letters 01-mar-2023 18:37 and 04-mar-2023 10:38.
1. Discussed with dimitr, letters 01-mar-2023 18:37 and 04-mar-2023 10:38.
Test must verify that execution plan uses NESTED LOOPS rather than HASH JOIN.
Because of this, tables must be filled with approximately equal volume of data.
Confirmed bug on 3.0.9.33548 (28-dec-2021), plan was:
PLAN HASH (JOIN (T1 INDEX (T1_COL), T2 INDEX (T2_ID)), T3 NATURAL)
2. Commit related to this test:
https://github.com/FirebirdSQL/firebird/commit/1b192404d43a15d403b5ff92760bc5df9d3c89c3
(13.09.2022 19:17, "More complete solution for #3357 and #7118")
One more test that attempts to verify this commit: bugs/gh_7398_test.py
Checked on 5.0.0.970, 4.0.3.2904, 3.0.11.33665.
"""

122
tests/bugs/gh_4314_test.py Normal file
View File

@ -0,0 +1,122 @@
#coding:utf-8
"""
ID: issue-4314
ISSUE: https://github.com/FirebirdSQL/firebird/issues/4314
TITLE: Sub-optimal predicate checking while selecting from a view [CORE3981]
DESCRIPTION:
NOTES:
[20.08.2024] pzotov
Checked on 6.0.0.438, 5.0.2.1479, 4.0.6.3142, 3.0.12.33784.
"""
import locale
import re
import pytest
from firebird.qa import *
init_sql = """
recreate table rr(
rel_name varchar(63)
,id int
,fid int
);
recreate table rf(
rel_name varchar(63)
,fid int
,fnm varchar(63)
);
insert into rr
select a.rdb$relation_name, a.rdb$relation_id, a.rdb$field_id
from rdb$relations a
;
insert into rf select f.rdb$relation_name, f.rdb$field_id, f.rdb$field_name
from rdb$relation_fields f
;
commit;
alter table rr add constraint rr_rel_name_unq unique (rel_name);
create index rr_id on rr (id);
alter table rf add constraint rf_fnm_rel_name_unq unique(fnm, rel_name);
create index rf_rel_name on rf(rel_name);
recreate view v as
select r.rel_name, abs(r.id) as id
from rr r
left
join rf on r.rel_name = rf.rel_name and r.fid = rf.fid
where r.id < 128;
set statistics index rr_rel_name_unq;
set statistics index rr_id;
set statistics index rf_fnm_rel_name_unq;
set statistics index rf_rel_name;
commit;
"""
db = db_factory(init = init_sql)
act = python_act('db')
#-----------------------------------------------------------
def replace_leading(source, char="."):
stripped = source.lstrip()
return char * (len(source) - len(stripped)) + stripped
#-----------------------------------------------------------
@pytest.mark.version('>=3.0')
def test_1(act: Action, capsys):
query_from_view = """
select /* trace_tag: VIEW */ v.rel_name as v_rel_name, v.id as v_id from v
where id = 0
"""
query_from_dt = """
select /* trace_tag: DERIVED TABLE */ d.rel_name as d_rel_name, d.id as d_id
from (
select r.rel_name, abs(r.id) as id
from rr r
left
join rf on r.rel_name = rf.rel_name and r.fid = rf.fid
where r.id < 128
) d
where d.id = 0
"""
with act.db.connect() as con:
cur = con.cursor()
for test_sql in (query_from_view, query_from_dt):
ps = cur.prepare(test_sql)
print( '\n'.join([replace_leading(s) for s in ps.detailed_plan.split('\n')]) )
expected_stdout = """
Select Expression
....-> Filter
........-> Nested Loop Join (outer)
............-> Filter
................-> Table "RR" as "V R" Access By ID
....................-> Bitmap
........................-> Index "RR_ID" Range Scan (upper bound: 1/1)
............-> Filter
................-> Table "RF" as "V RF" Access By ID
....................-> Bitmap
........................-> Index "RF_REL_NAME" Range Scan (full match)
Select Expression
....-> Filter
........-> Nested Loop Join (outer)
............-> Filter
................-> Table "RR" as "D R" Access By ID
....................-> Bitmap
........................-> Index "RR_ID" Range Scan (upper bound: 1/1)
............-> Filter
................-> Table "RF" as "D RF" Access By ID
....................-> Bitmap
........................-> Index "RF_REL_NAME" Range Scan (full match)
"""
act.expected_stdout = expected_stdout
act.stdout = capsys.readouterr().out
assert act.clean_stdout == act.clean_expected_stdout

150
tests/bugs/gh_4954_test.py Normal file
View File

@ -0,0 +1,150 @@
#coding:utf-8
"""
ID: issue-4954
ISSUE: https://github.com/FirebirdSQL/firebird/issues/4954
TITLE: subselect losing the index when where clause includes coalesce() [CORE4640]
DESCRIPTION:
NOTES:
[21.08.2024] pzotov
Confirmed bug on 2.1.7.18553. No such problem on 2.5.927156.
Checked on 6.0.0.438, 5.0.2.1479, 4.0.6.3142, 3.0.12.33784.
"""
import pytest
from firebird.qa import *
init_sql = """
recreate table t1 (
id int not null,
vc1 varchar(1) not null,
i1 int not null,
i2 int not null,
constraint t1_pk primary key (id) using descending index t1_pk,
constraint t1_uk1 unique (i1, i2, vc1)
);
recreate table t2 (
id int not null,
vc1 varchar(1) not null,
i1 int not null,
i2 int not null,
constraint t2_pk primary key (id) using descending index t2_pk,
constraint t2_uk1 unique (i1, i2, vc1)
);
recreate table t3 (
id int not null,
i1_1 int,
i1_2 int
);
create view v1 (ID, VC1, I1, I2) as
select t1.id, t1.vc1, t1.i1, t1.i2
from t1
union all
select t2.id, t2.vc1, t2.i1, t2.i2
from t2;
commit;
insert into t1 (id, vc1, i1, i2) values (9, 'a', 1009, 1000);
insert into t1 (id, vc1, i1, i2) values (8, 'a', 1008, 1000);
insert into t1 (id, vc1, i1, i2) values (7, 'a', 1007, 1000);
insert into t1 (id, vc1, i1, i2) values (6, 'a', 1006, 1000);
insert into t1 (id, vc1, i1, i2) values (5, 'a', 1005, 1000);
insert into t1 (id, vc1, i1, i2) values (4, 'a', 1004, 1000);
insert into t1 (id, vc1, i1, i2) values (3, 'a', 1003, 1000);
insert into t1 (id, vc1, i1, i2) values (2, 'a', 1002, 1000);
insert into t1 (id, vc1, i1, i2) values (1, 'a', 1001, 1000);
insert into t2 (id, vc1, i1, i2) values (19, 'a', 1019, 1000);
insert into t2 (id, vc1, i1, i2) values (18, 'a', 1018, 1000);
insert into t2 (id, vc1, i1, i2) values (17, 'a', 1017, 1000);
insert into t2 (id, vc1, i1, i2) values (16, 'a', 1016, 1000);
insert into t2 (id, vc1, i1, i2) values (15, 'a', 1015, 1000);
insert into t2 (id, vc1, i1, i2) values (14, 'a', 1014, 1000);
insert into t2 (id, vc1, i1, i2) values (13, 'a', 1013, 1000);
insert into t2 (id, vc1, i1, i2) values (12, 'a', 1012, 1000);
insert into t2 (id, vc1, i1, i2) values (11, 'a', 1011, 1000);
insert into t2 (id, vc1, i1, i2) values (10, 'a', 1010, 1000);
insert into t3 (id, i1_1, i1_2) values (100000, null, 1010);
insert into t3 (id, i1_1, i1_2) values (100001, 1012, null);
commit;
set statistics index t1_pk;
set statistics index t2_pk;
set statistics index t1_uk1;
set statistics index t2_uk1;
commit;
"""
db = db_factory(init = init_sql)
act = python_act('db')
#-----------------------------------------------------------
def replace_leading(source, char="."):
stripped = source.lstrip()
return char * (len(source) - len(stripped)) + stripped
#-----------------------------------------------------------
@pytest.mark.version('>=3.0')
def test_1(act: Action, capsys):
test_sql = """
select t3.id,
(select first 1 v1.id
from v1
where
v1.vc1 = 'A'
and v1.i2 = 1000
and v1.i1 = coalesce(t3.i1_1, t3.i1_2)
)
from t3
"""
with act.db.connect() as con:
cur = con.cursor()
ps = cur.prepare(test_sql)
print( '\n'.join([replace_leading(s) for s in ps.detailed_plan.split('\n')]) )
expected_plan_4x = """
Select Expression
....-> Singularity Check
........-> First N Records
............-> Filter
................-> Union
....................-> Filter
........................-> Table "T1" as "V1 T1" Access By ID
............................-> Bitmap
................................-> Index "T1_UK1" Unique Scan
....................-> Filter
........................-> Table "T2" as "V1 T2" Access By ID
............................-> Bitmap
................................-> Index "T2_UK1" Unique Scan
Select Expression
....-> Table "T3" Full Scan
"""
expected_plan_5x = """
Sub-query
....-> Singularity Check
........-> First N Records
............-> Filter
................-> Filter
....................-> Union
........................-> Filter
............................-> Table "T1" as "V1 T1" Access By ID
................................-> Bitmap
....................................-> Index "T1_UK1" Unique Scan
........................-> Filter
............................-> Table "T2" as "V1 T2" Access By ID
................................-> Bitmap
....................................-> Index "T2_UK1" Unique Scan
Select Expression
....-> Table "T3" Full Scan
"""
act.expected_stdout = expected_plan_4x if act.is_version('<5') else expected_plan_5x
act.stdout = capsys.readouterr().out
assert act.clean_stdout == act.clean_expected_stdout

103
tests/bugs/gh_5009_test.py Normal file
View File

@ -0,0 +1,103 @@
#coding:utf-8
"""
ID: issue-5009
ISSUE: 5009
TITLE: Index and blob garbage collection doesn't take into accout data in undo log [CORE4701]
DESCRIPTION:
JIRA: CORE-4701
NOTES:
[02.11.2024] pzotov
Confirmed bug on 3.0.13.33794.
Checked on 4.0.6.3165, 5.0.2.1551, 6.0.0.415
"""
import pytest
from firebird.qa import *
db = db_factory()
test_script = """
create table g_test (f integer);
create index g_ind on g_test (f);
insert into g_test values (1);
commit;
update g_test set f=2;
savepoint a;
update g_test set f=3;
savepoint b;
update g_test set f=3;
savepoint c;
update g_test set f=4;
savepoint d;
update g_test set f=4;
release savepoint b only;
rollback to savepoint c;
commit;
set list on;
set count on;
set plan on;
select g.f as f_natreads from g_test g;
select g.f as f_idxreads from g_test g where g.f between 1 and 4;
"""
act = isql_act('db', test_script, substitutions = [ ('[ \t]+', ' '), ])
@pytest.mark.version('>=4.0.0')
def test_1(act: Action, capsys):
act.execute(combine_output = True)
act.expected_stdout = """
PLAN (G NATURAL)
F_NATREADS 3
Records affected: 1
PLAN (G INDEX (G_IND))
F_IDXREADS 3
Records affected: 1
"""
with act.connect_server() as srv:
srv.database.validate(database = act.db.db_path)
validate_err = '\n'.join( [line for line in srv if 'ERROR' in line.upper()] )
expected_isql = 'ISQL output check: PASSED.'
expected_onlv = 'Online validation: FAILED.'
if act.clean_stdout == act.clean_expected_stdout:
print(expected_isql)
else:
print(
f"""
ISQL output check: FAILED.
Actual:
{act.clean_stdout}
Expected:
{act.expected_stdout}
"""
)
if not validate_err:
print(expected_onlv)
else:
print(
f"""
Online validation: FAILED.
Actual:
{validate_err}
Epsected:
<empty string>
"""
)
act.reset()
act.expected_stdout = f"""
{expected_isql}
{expected_onlv}
"""
act.stdout = capsys.readouterr().out
assert act.clean_stdout == act.clean_expected_stdout

View File

@ -0,0 +1,38 @@
#coding:utf-8
"""
ID: issue-5537
ISSUE: https://github.com/FirebirdSQL/firebird/issues/5537
TITLE: Non US-ASCII field names treated as unicode, although charset non-unicode, lowering max field length [CORE5258]
DESCRIPTION:
NOTES:
[09.11.2024] pzotov
FB-3.x must raise "Name longer than database column size", all others must work fine.
Checked on 3.0.13.33794, 4.0.6.3165, 5.0.2.1553, 6.0.0.520
"""
from pathlib import Path
import pytest
from firebird.qa import *
db = db_factory()
act = python_act('db', substitutions = [(r'After line \d+ .*', '')])
tmp_file = temp_file('tmp_5537.sql')
@pytest.mark.intl
@pytest.mark.version('>=3')
def test_1(act: Action, tmp_file: Path):
NON_ASCII_TXT = 'Поле в 26 символов!'
tmp_file.write_bytes(f"""set list on; select '' as "{NON_ASCII_TXT}" from rdb$database;""".encode('cp1251'))
expected_3x = """
Statement failed, SQLSTATE = 42000
Dynamic SQL Error
-SQL error code = -104
-Name longer than database column size
"""
expected_4x = f"{NON_ASCII_TXT}"
act.expected_stdout = expected_3x if act.is_version('<4') else expected_4x
act.isql(switches = ['-q'], input_file = tmp_file, charset = 'win1251', io_enc = 'cp1251', combine_output = True)
assert act.clean_stdout == act.clean_expected_stdout

View File

@ -2,8 +2,8 @@
"""
ID: issue-5588
ISSUE: 5588
TITLE: upport full SQL standard binary string literal syntax
ISSUE: https://github.com/FirebirdSQL/firebird/issues/5588
TITLE: Support full SQL standard binary string literal syntax [CORE5311]
DESCRIPTION:
JIRA: CORE-5311
FBTEST: bugs.gh_5588

View File

@ -0,0 +1,58 @@
#coding:utf-8
"""
ID: issue-5589
ISSUE: https://github.com/FirebirdSQL/firebird/issues/5589
TITLE: Support full SQL standard character string literal syntax [CORE5312]
DESCRIPTION:
JIRA: CORE-5312
NOTES:
[15.09.2024] pzotov
Commit (13.05.2021):
https://github.com/FirebirdSQL/firebird/commit/8a7927aac4fef3740e54b7941146b6d044b864b1
Checked on 6.0.0.457, 5.0.2.1499
"""
import pytest
from firebird.qa import *
db = db_factory()
test_script = """
set blob all;
set list on;
select 'ab' 'cd' 'ef' as good_chr_01 from rdb$database;
select 'ab'/*comment*/ 'cd' /**/ 'ef' as good_chr_02 from rdb$database;
select 'ab'/* foo
bar */'cd'
''
/*
*/
'ef' as good_chr_03 from rdb$database;
select 'ab' -- foo
'cd' -- bar
'ef' as good_chr_04 from rdb$database;
select'ab'
'cd'
'ef' as good_chr_05 from rdb$database;
"""
act = isql_act('db', test_script, substitutions=[('[ \t]+', ' ')])
expected_stdout = """
GOOD_CHR_01 abcdef
GOOD_CHR_02 abcdef
GOOD_CHR_03 abcdef
GOOD_CHR_04 abcdef
GOOD_CHR_05 abcdef
"""
@pytest.mark.version('>=5.0')
def test_1(act: Action):
act.expected_stdout = expected_stdout
act.execute(combine_output = True)
assert act.clean_stdout == act.clean_expected_stdout

View File

@ -5,7 +5,15 @@ ID: issue-5978
ISSUE: https://github.com/FirebirdSQL/firebird/issues/5978
TITLE: Access to the name of DB encryption key [CORE5712]
DESCRIPTION:
Test creates temporary user with system privilege GET_DBCRYPT_INFO in order to allow him to obtain encryption info.
Test uses Firebird built-in encryption plugin wich actually does encryption using trivial algorithm.
Before running this test following prerequisites must be met:
1. Files fbSampleKeyHolder.conf, fbSampleKeyHolder.dll, fbSampleDbCrypt.conf and fbSampleDbCrypt.dll
must be copied from $FB_HOME/examples/prebuilt/plugins/ to $FB_HOME/plugins/
(on Linux name of binaries are: libfbSampleDbCrypt.so and libfbSampleKeyHolder.so)
2. File fbSampleKeyHolder.conf must contain lines: Auto = true and KeyRed = <any number>
3. File $QA_HOME/pytest.ini must contain line with 'encryption' marker declaration.
We create temporary user with system privilege GET_DBCRYPT_INFO in order to allow him to obtain encryption info.
Then we run following:
1) encrypt DB using plugin 'fbSampleDbCrypt' provided in every FB 4.x+ snapshot;
2) make connection as SYSDBA and ask DB-crypt info (DbInfoCode.CRYPT_PLUGIN and DbInfoCode.CRYPT_KEY)
@ -16,10 +24,14 @@ NOTES:
[08.05.2024] pzotov
### ACHTUNG ### TEST REQUIRES FIREBIRD-DRIVER VERSION 1.10.4+ (date: 07-may-2024).
See reply from pcisar, letters with subj: "fb_info_crypt_key: how it can be obtained using firebird-driver ? // GH-5978, 2018"
Thanks to pcisar for explanation of DbInfoCode usage.
See letters with subj "fb_info_crypt_key: how it can be obtained using firebird-driver ? // GH-5978, 2018" (27.04.2024 14:55).
Firebird 3.x can not be checked. Exception:
raise NotSupportedError(f"Info code {info_code} not supported by engine version {self.__engine_version}")
firebird.driver.types.NotSupportedError: Info code 138 not supported by engine version 3.0
Checked on 4.0.5.3092, 5.0.1.1395, 6.0.0.346.
FB 3.x is not checked.
"""
import os
import locale

View File

@ -20,6 +20,13 @@ NOTES:
or
AttributeError: 'iUtil_v2' object has no attribute 'decode_timestamp_tz'
[01.09.2024]
On Linux argument of tzfile is shown with prefix ("/usr/share/zoneinfo/"), so we have to remove it:
<class 'dateutil.tz.tz.tzfile'>:
Windows = tzfile('Indian/Cocos')
Linux = tzfile('/usr/share/zoneinfo/Indian/Cocos')
This is done by extracting '_timezone_' property of this instance.
Checked on 6.0.0.396, 5.0.1.1440, 4.0.5.3127
"""
import pytest
@ -104,7 +111,7 @@ def test_1(act: Action, capsys):
cur = con.cursor()
cur.execute('select current_timestamp from rdb$database')
for r in cur:
print(r[0].tzinfo)
print(r[0].tzinfo._timezone_)
cur.close()
# The value set through the DPB should survive an `alter session reset`
@ -114,11 +121,14 @@ def test_1(act: Action, capsys):
cur = con.cursor()
cur.execute('select current_timestamp from rdb$database')
for r in cur:
print(r[0].tzinfo)
# class 'dateutil.zoneinfo.tzfile'
tzfile_nfo = r[0].tzinfo # <class 'dateutil.tz.tz.tzfile'>: Windows = tzfile('Indian/Cocos'); Linux = tzfile('/usr/share/zoneinfo/Indian/Cocos')
# tzfile_arg = tzfile_nfo._filename # <class 'str'>: Windows = 'Indian/Cocos'; Linux = '/usr/share/zoneinfo/Indian/Cocos'
print(tzfile_nfo._timezone_) # Windows: 'Indian/Cocos'; Linux: 'Indian/Cocos'
act.expected_stdout = f"""
tzfile('{SELECTED_TIMEZONE}')
tzfile('{SELECTED_TIMEZONE}')
{SELECTED_TIMEZONE}
{SELECTED_TIMEZONE}
"""
act.stdout = capsys.readouterr().out
assert act.clean_stdout == act.clean_expected_stdout

143
tests/bugs/gh_6416_test.py Normal file
View File

@ -0,0 +1,143 @@
#coding:utf-8
"""
ID: issue-6416
ISSUE: https://github.com/FirebirdSQL/firebird/issues/6416
TITLE: Engine cant determine datatype in SQL: Unknown SQL Data type (32752) [CORE6168]
DESCRIPTION:
Test creates table with columns belonging to "new datatypes" family: int128, decfloat and time[stamp] with time zone.
Also, one record is added into this table with values which are valid for numeric types in FB 4.x+ (time zone fields
can remain null or arbitrary).
This DB is them copied to another DB (using file-level call of shutil.copy2()).
Another DB filename must match to the specified in the databases.conf (alias defined by 'REQUIRED_ALIAS' variable).
Its alias has special value for DataTypeCompatibility parameter. Connection to this DB and query to a table with 'new datatypes'
must return SQLDA with *old* types which are known for FB versions prior 4.x.
Then we repeat same query to 'initial' test DB and must get SQLDA with actual values for all new columns (known since FB 4.x).
NOTES:
[18.08.2022] pzotov
1. One need to be sure that firebird.conf does NOT contain DatabaseAccess = None.
2. Database file for REQUIRED_ALIAS must NOT exist in the QA_root/files/qa/ subdirectory: it will be created here.
Content of databases.conf must be taken from $QA_ROOT/files/qa-databases.conf (one need to replace
it before every test session).
Discussed with pcisar, letters since 30-may-2022 13:48, subject:
"new qa, core_4964_test.py: strange outcome when use... shutil.copy() // comparing to shutil.copy2()"
3. Value of REQUIRED_ALIAS must be EXACTLY the same as alias specified in the pre-created databases.conf
(for LINUX this equality is case-sensitive, even when aliases are compared!)
Checked on 6.0.0.438, 5.0.2.1479, 4.0.6.3142.
"""
import re
from pathlib import Path
import shutil
import pytest
from firebird.qa import *
# Pre-defined alias for test DB in the QA_root/files/qa-databases.conf.
# This file (qa-databases.conf) must be copied manually to each testing
# FB home folder, with replacing databases.conf there:
#
REQUIRED_ALIAS = 'tmp_gh_6416_alias'
init_sql = f'''
set bail on;
recreate table test(
f_sml smallint default -32768
,f_int int default -2147483648
,f_big bigint default -9223372036854775808
,f_128 int128 default -170141183460469231731687303715884105728
,f_num numeric(38) default -170141183460469231731687303715884105728
,f_dec decfloat default -9.999999999999999999999999999999999E+6144
,f_tz time with time zone default '01:02:03 Indian/Cocos'
,f_tsz timestamp with time zone default '22.09.2023 01:02:03 Indian/Cocos'
);
insert into test default values;
commit;
'''
db = db_factory(init = init_sql)
substitutions = [('^((?!(SQLSTATE|error|Floating-point overflow|sqltype)).)*$', ''), ('[ \t]+', ' ')]
act = python_act('db', substitutions = substitutions)
@pytest.mark.version('>=4.0')
def test_1(act: Action, capsys):
# Scan line-by-line through databases.conf, find line starting with REQUIRED_ALIAS and extract name of file that
# must be created in the $(dir_sampleDb)/qa/ folder. This name will be used further as target database (tmp_db_for_3x_client).
# NOTE: we have to SKIP lines which are commented out, i.e. if they starts with '#':
p_required_alias_ptn = re.compile( '^(?!#)((^|\\s+)' + REQUIRED_ALIAS + ')\\s*=\\s*\\$\\(dir_sampleDb\\)/qa/', re.IGNORECASE )
fname_in_dbconf = None
with open(act.home_dir/'databases.conf', 'r') as f:
for line in f:
if p_required_alias_ptn.search(line):
# If databases.conf contains line like this:
# tmp_6416_alias = $(dir_sampleDb)/qa/tmp_gh_6416.fdb
# - then we extract filename: 'tmp_gh_6416.fdb' (see below):
fname_in_dbconf = Path(line.split('=')[1].strip()).name
break
# if 'fname_in_dbconf' remains undefined here then propably REQUIRED_ALIAS not equals to specified in the databases.conf!
#
assert fname_in_dbconf
# Full path + filename of database to which we will try to connect:
#
tmp_db_for_3x_client = Path( act.vars['sample_dir'], 'qa', fname_in_dbconf )
shutil.copy2(act.db.db_path, tmp_db_for_3x_client)
test_sql = f'''
set bail on;
set list on;
connect '{REQUIRED_ALIAS}' user {act.db.user};
-- select mon$database_name from mon$database;
set sqlda_display on;
select *
from test;
'''
act.expected_stdout = f"""
01: sqltype: 500 SHORT Nullable scale: 0 subtype: 0 len: 2
02: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
03: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
04: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
05: sqltype: 580 INT64 Nullable scale: 0 subtype: 1 len: 8
06: sqltype: 480 DOUBLE Nullable scale: 0 subtype: 0 len: 8
07: sqltype: 560 TIME Nullable scale: 0 subtype: 0 len: 4
08: sqltype: 510 TIMESTAMP Nullable scale: 0 subtype: 0 len: 8
Statement failed, SQLSTATE = 22003
-SQL error code = -303
-Floating-point overflow. The exponent of a floating-point operation is greater than the magnitude allowed.
"""
act.isql(switches = ['-q'], input = test_sql, combine_output = True, credentials = False, connect_db = False)
assert act.clean_stdout == act.clean_expected_stdout
act.reset()
tmp_db_for_3x_client.unlink()
#-------------------------------------------------------------
test_sql = f'''
set bail on;
set list on;
set sqlda_display on;
select *
from test;
'''
act.expected_stdout = """
01: sqltype: 500 SHORT Nullable scale: 0 subtype: 0 len: 2
02: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
03: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
04: sqltype: 32752 INT128 Nullable scale: 0 subtype: 0 len: 16
05: sqltype: 32752 INT128 Nullable scale: 0 subtype: 1 len: 16
06: sqltype: 32762 DECFLOAT(34) Nullable scale: 0 subtype: 0 len: 16
07: sqltype: 32756 TIME WITH TIME ZONE Nullable scale: 0 subtype: 0 len: 8
08: sqltype: 32754 TIMESTAMP WITH TIME ZONE Nullable scale: 0 subtype: 0 len: 12
"""
act.isql(switches = ['-q'], input = test_sql, combine_output = True)
assert act.clean_stdout == act.clean_expected_stdout
act.reset()

689
tests/bugs/gh_6545_test.py Normal file
View File

@ -0,0 +1,689 @@
#coding:utf-8
"""
ID: issue-6545
ISSUE: https://github.com/FirebirdSQL/firebird/issues/6545
TITLE: Error writing to TIMESTAMP/TIME WITH TIME ZONE array
DESCRIPTION:
Test generates values which will be inserted into ARRAY columns defined as 'time with time zone' and 'timestamp with time zone'.
We process time zones defined in the FB_HOME/include/firebird/TimeZones.h (except several, see notes below),
and use every time zone value as argument to 'get_timezone()' in datetime.dsatetime / datetime.time calls.
Then we run DML which tries to insert tuple of generated data into appropriate ARRAY columns. This must not raise error.
Finally, we run query to get just inserted data and compare its result with input argument that was used in previous step.
NOTES:
[15.08.2024] pzotov
1. ### ACHTUNG ### TEST REQUIRES FIREBIRD-DRIVER VERSION 1.10.6+ (date: 15-aug-2024).
See also addition in firebird-driver doc:
https://firebird-driver.readthedocs.io/en/latest/usage-guide.html#working-with-time-timestamp-with-timezone
2. Following timezones present in $FB_HOME/include/firebird/TimeZones.h
and in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
but are absent in pytz.all_timezones list:
America/Ciudad_Juarez
Europe/Kyiv
Pacific/Kanton
We have to SKIP them from handling.
Checked on 4.0.0.436, 5.0.2.1478, 4.0.6.3142
"""
import datetime
from firebird.driver import get_timezone
import random
import pytest
from firebird.qa import *
init_script = """
create table test (
arr_tmtz time with time zone [0:2]
,arr_tstz timestamp with time zone [0:2]
);
"""
db = db_factory(init = init_script)
act = python_act('db')
@pytest.mark.version('>=4.0')
def test_1(act: Action, capsys):
fb_time_zones = [
'Africa/Abidjan'
,'Africa/Accra'
,'Africa/Addis_Ababa'
,'Africa/Algiers'
,'Africa/Asmara'
,'Africa/Asmera'
,'Africa/Bamako'
,'Africa/Bangui'
,'Africa/Banjul'
,'Africa/Bissau'
,'Africa/Blantyre'
,'Africa/Brazzaville'
,'Africa/Bujumbura'
,'Africa/Cairo'
,'Africa/Casablanca'
,'Africa/Ceuta'
,'Africa/Conakry'
,'Africa/Dakar'
,'Africa/Dar_es_Salaam'
,'Africa/Djibouti'
,'Africa/Douala'
,'Africa/El_Aaiun'
,'Africa/Freetown'
,'Africa/Gaborone'
,'Africa/Harare'
,'Africa/Johannesburg'
,'Africa/Juba'
,'Africa/Kampala'
,'Africa/Khartoum'
,'Africa/Kigali'
,'Africa/Kinshasa'
,'Africa/Lagos'
,'Africa/Libreville'
,'Africa/Lome'
,'Africa/Luanda'
,'Africa/Lubumbashi'
,'Africa/Lusaka'
,'Africa/Malabo'
,'Africa/Maputo'
,'Africa/Maseru'
,'Africa/Mbabane'
,'Africa/Mogadishu'
,'Africa/Monrovia'
,'Africa/Nairobi'
,'Africa/Ndjamena'
,'Africa/Niamey'
,'Africa/Nouakchott'
,'Africa/Ouagadougou'
,'Africa/Porto-Novo'
,'Africa/Sao_Tome'
,'Africa/Timbuktu'
,'Africa/Tripoli'
,'Africa/Tunis'
,'Africa/Windhoek'
,'America/Adak'
,'America/Anchorage'
,'America/Anguilla'
,'America/Antigua'
,'America/Araguaina'
,'America/Argentina/Buenos_Aires'
,'America/Argentina/Catamarca'
,'America/Argentina/ComodRivadavia'
,'America/Argentina/Cordoba'
,'America/Argentina/Jujuy'
,'America/Argentina/La_Rioja'
,'America/Argentina/Mendoza'
,'America/Argentina/Rio_Gallegos'
,'America/Argentina/Salta'
,'America/Argentina/San_Juan'
,'America/Argentina/San_Luis'
,'America/Argentina/Tucuman'
,'America/Argentina/Ushuaia'
,'America/Aruba'
,'America/Asuncion'
,'America/Atikokan'
,'America/Atka'
,'America/Bahia'
,'America/Bahia_Banderas'
,'America/Barbados'
,'America/Belem'
,'America/Belize'
,'America/Blanc-Sablon'
,'America/Boa_Vista'
,'America/Bogota'
,'America/Boise'
,'America/Buenos_Aires'
,'America/Cambridge_Bay'
,'America/Campo_Grande'
,'America/Cancun'
,'America/Caracas'
,'America/Catamarca'
,'America/Cayenne'
,'America/Cayman'
,'America/Chicago'
,'America/Chihuahua'
# -- !! -- ,'America/Ciudad_Juarez'
,'America/Coral_Harbour'
,'America/Cordoba'
,'America/Costa_Rica'
,'America/Creston'
,'America/Cuiaba'
,'America/Curacao'
,'America/Danmarkshavn'
,'America/Dawson'
,'America/Dawson_Creek'
,'America/Denver'
,'America/Detroit'
,'America/Dominica'
,'America/Edmonton'
,'America/Eirunepe'
,'America/El_Salvador'
,'America/Ensenada'
,'America/Fort_Nelson'
,'America/Fort_Wayne'
,'America/Fortaleza'
,'America/Glace_Bay'
,'America/Godthab'
,'America/Goose_Bay'
,'America/Grand_Turk'
,'America/Grenada'
,'America/Guadeloupe'
,'America/Guatemala'
,'America/Guayaquil'
,'America/Guyana'
,'America/Halifax'
,'America/Havana'
,'America/Hermosillo'
,'America/Indiana/Indianapolis'
,'America/Indiana/Knox'
,'America/Indiana/Marengo'
,'America/Indiana/Petersburg'
,'America/Indiana/Tell_City'
,'America/Indiana/Vevay'
,'America/Indiana/Vincennes'
,'America/Indiana/Winamac'
,'America/Indianapolis'
,'America/Inuvik'
,'America/Iqaluit'
,'America/Jamaica'
,'America/Jujuy'
,'America/Juneau'
,'America/Kentucky/Louisville'
,'America/Kentucky/Monticello'
,'America/Knox_IN'
,'America/Kralendijk'
,'America/La_Paz'
,'America/Lima'
,'America/Los_Angeles'
,'America/Louisville'
,'America/Lower_Princes'
,'America/Maceio'
,'America/Managua'
,'America/Manaus'
,'America/Marigot'
,'America/Martinique'
,'America/Matamoros'
,'America/Mazatlan'
,'America/Mendoza'
,'America/Menominee'
,'America/Merida'
,'America/Metlakatla'
,'America/Mexico_City'
,'America/Miquelon'
,'America/Moncton'
,'America/Monterrey'
,'America/Montevideo'
,'America/Montreal'
,'America/Montserrat'
,'America/Nassau'
,'America/New_York'
,'America/Nipigon'
,'America/Nome'
,'America/Noronha'
,'America/North_Dakota/Beulah'
,'America/North_Dakota/Center'
,'America/North_Dakota/New_Salem'
,'America/Nuuk'
,'America/Ojinaga'
,'America/Panama'
,'America/Pangnirtung'
,'America/Paramaribo'
,'America/Phoenix'
,'America/Port-au-Prince'
,'America/Port_of_Spain'
,'America/Porto_Acre'
,'America/Porto_Velho'
,'America/Puerto_Rico'
,'America/Punta_Arenas'
,'America/Rainy_River'
,'America/Rankin_Inlet'
,'America/Recife'
,'America/Regina'
,'America/Resolute'
,'America/Rio_Branco'
,'America/Rosario'
,'America/Santa_Isabel'
,'America/Santarem'
,'America/Santiago'
,'America/Santo_Domingo'
,'America/Sao_Paulo'
,'America/Scoresbysund'
,'America/Shiprock'
,'America/Sitka'
,'America/St_Barthelemy'
,'America/St_Johns'
,'America/St_Kitts'
,'America/St_Lucia'
,'America/St_Thomas'
,'America/St_Vincent'
,'America/Swift_Current'
,'America/Tegucigalpa'
,'America/Thule'
,'America/Thunder_Bay'
,'America/Tijuana'
,'America/Toronto'
,'America/Tortola'
,'America/Vancouver'
,'America/Virgin'
,'America/Whitehorse'
,'America/Winnipeg'
,'America/Yakutat'
,'America/Yellowknife'
,'Antarctica/Casey'
,'Antarctica/Davis'
,'Antarctica/DumontDUrville'
,'Antarctica/Macquarie'
,'Antarctica/Mawson'
,'Antarctica/McMurdo'
,'Antarctica/Palmer'
,'Antarctica/Rothera'
,'Antarctica/South_Pole'
,'Antarctica/Syowa'
,'Antarctica/Troll'
,'Antarctica/Vostok'
,'Arctic/Longyearbyen'
,'Asia/Aden'
,'Asia/Almaty'
,'Asia/Amman'
,'Asia/Anadyr'
,'Asia/Aqtau'
,'Asia/Aqtobe'
,'Asia/Ashgabat'
,'Asia/Ashkhabad'
,'Asia/Atyrau'
,'Asia/Baghdad'
,'Asia/Bahrain'
,'Asia/Baku'
,'Asia/Bangkok'
,'Asia/Barnaul'
,'Asia/Beirut'
,'Asia/Bishkek'
,'Asia/Brunei'
,'Asia/Calcutta'
,'Asia/Chita'
,'Asia/Choibalsan'
,'Asia/Chongqing'
,'Asia/Chungking'
,'Asia/Colombo'
,'Asia/Dacca'
,'Asia/Damascus'
,'Asia/Dhaka'
,'Asia/Dili'
,'Asia/Dubai'
,'Asia/Dushanbe'
,'Asia/Famagusta'
,'Asia/Gaza'
,'Asia/Harbin'
,'Asia/Hebron'
,'Asia/Ho_Chi_Minh'
,'Asia/Hong_Kong'
,'Asia/Hovd'
,'Asia/Irkutsk'
,'Asia/Istanbul'
,'Asia/Jakarta'
,'Asia/Jayapura'
,'Asia/Jerusalem'
,'Asia/Kabul'
,'Asia/Kamchatka'
,'Asia/Karachi'
,'Asia/Kashgar'
,'Asia/Kathmandu'
,'Asia/Katmandu'
,'Asia/Khandyga'
,'Asia/Kolkata'
,'Asia/Krasnoyarsk'
,'Asia/Kuala_Lumpur'
,'Asia/Kuching'
,'Asia/Kuwait'
,'Asia/Macao'
,'Asia/Macau'
,'Asia/Magadan'
,'Asia/Makassar'
,'Asia/Manila'
,'Asia/Muscat'
,'Asia/Nicosia'
,'Asia/Novokuznetsk'
,'Asia/Novosibirsk'
,'Asia/Omsk'
,'Asia/Oral'
,'Asia/Phnom_Penh'
,'Asia/Pontianak'
,'Asia/Pyongyang'
,'Asia/Qatar'
,'Asia/Qostanay'
,'Asia/Qyzylorda'
,'Asia/Rangoon'
,'Asia/Riyadh'
,'Asia/Saigon'
,'Asia/Sakhalin'
,'Asia/Samarkand'
,'Asia/Seoul'
,'Asia/Shanghai'
,'Asia/Singapore'
,'Asia/Srednekolymsk'
,'Asia/Taipei'
,'Asia/Tashkent'
,'Asia/Tbilisi'
,'Asia/Tehran'
,'Asia/Tel_Aviv'
,'Asia/Thimbu'
,'Asia/Thimphu'
,'Asia/Tokyo'
,'Asia/Tomsk'
,'Asia/Ujung_Pandang'
,'Asia/Ulaanbaatar'
,'Asia/Ulan_Bator'
,'Asia/Urumqi'
,'Asia/Ust-Nera'
,'Asia/Vientiane'
,'Asia/Vladivostok'
,'Asia/Yakutsk'
,'Asia/Yangon'
,'Asia/Yekaterinburg'
,'Asia/Yerevan'
,'Atlantic/Azores'
,'Atlantic/Bermuda'
,'Atlantic/Canary'
,'Atlantic/Cape_Verde'
,'Atlantic/Faeroe'
,'Atlantic/Faroe'
,'Atlantic/Jan_Mayen'
,'Atlantic/Madeira'
,'Atlantic/Reykjavik'
,'Atlantic/South_Georgia'
,'Atlantic/St_Helena'
,'Atlantic/Stanley'
,'Australia/ACT'
,'Australia/Adelaide'
,'Australia/Brisbane'
,'Australia/Broken_Hill'
,'Australia/Canberra'
,'Australia/Currie'
,'Australia/Darwin'
,'Australia/Eucla'
,'Australia/Hobart'
,'Australia/LHI'
,'Australia/Lindeman'
,'Australia/Lord_Howe'
,'Australia/Melbourne'
,'Australia/NSW'
,'Australia/North'
,'Australia/Perth'
,'Australia/Queensland'
,'Australia/South'
,'Australia/Sydney'
,'Australia/Tasmania'
,'Australia/Victoria'
,'Australia/West'
,'Australia/Yancowinna'
,'Brazil/Acre'
,'Brazil/DeNoronha'
,'Brazil/East'
,'Brazil/West'
,'CET'
,'CST6CDT'
,'Canada/Atlantic'
,'Canada/Central'
,'Canada/Eastern'
,'Canada/Mountain'
,'Canada/Newfoundland'
,'Canada/Pacific'
,'Canada/Saskatchewan'
,'Canada/Yukon'
,'Chile/Continental'
,'Chile/EasterIsland'
,'Cuba'
,'EET'
,'EST'
,'EST5EDT'
,'Egypt'
,'Eire'
,'Etc/GMT'
,'Etc/GMT+0'
,'Etc/GMT+1'
,'Etc/GMT+10'
,'Etc/GMT+11'
,'Etc/GMT+12'
,'Etc/GMT+2'
,'Etc/GMT+3'
,'Etc/GMT+4'
,'Etc/GMT+5'
,'Etc/GMT+6'
,'Etc/GMT+7'
,'Etc/GMT+8'
,'Etc/GMT+9'
,'Etc/GMT-0'
,'Etc/GMT-1'
,'Etc/GMT-10'
,'Etc/GMT-11'
,'Etc/GMT-12'
,'Etc/GMT-13'
,'Etc/GMT-14'
,'Etc/GMT-2'
,'Etc/GMT-3'
,'Etc/GMT-4'
,'Etc/GMT-5'
,'Etc/GMT-6'
,'Etc/GMT-7'
,'Etc/GMT-8'
,'Etc/GMT-9'
,'Etc/GMT0'
,'Etc/Greenwich'
,'Etc/UCT'
,'Etc/UTC'
,'Etc/Universal'
,'Etc/Zulu'
,'Europe/Amsterdam'
,'Europe/Andorra'
,'Europe/Astrakhan'
,'Europe/Athens'
,'Europe/Belfast'
,'Europe/Belgrade'
,'Europe/Berlin'
,'Europe/Bratislava'
,'Europe/Brussels'
,'Europe/Bucharest'
,'Europe/Budapest'
,'Europe/Busingen'
,'Europe/Chisinau'
,'Europe/Copenhagen'
,'Europe/Dublin'
,'Europe/Gibraltar'
,'Europe/Guernsey'
,'Europe/Helsinki'
,'Europe/Isle_of_Man'
,'Europe/Istanbul'
,'Europe/Jersey'
,'Europe/Kaliningrad'
,'Europe/Kiev'
,'Europe/Kirov'
# -- !! -- ,'Europe/Kyiv'
,'Europe/Lisbon'
,'Europe/Ljubljana'
,'Europe/London'
,'Europe/Luxembourg'
,'Europe/Madrid'
,'Europe/Malta'
,'Europe/Mariehamn'
,'Europe/Minsk'
,'Europe/Monaco'
,'Europe/Moscow'
,'Europe/Nicosia'
,'Europe/Oslo'
,'Europe/Paris'
,'Europe/Podgorica'
,'Europe/Prague'
,'Europe/Riga'
,'Europe/Rome'
,'Europe/Samara'
,'Europe/San_Marino'
,'Europe/Sarajevo'
,'Europe/Saratov'
,'Europe/Simferopol'
,'Europe/Skopje'
,'Europe/Sofia'
,'Europe/Stockholm'
,'Europe/Tallinn'
,'Europe/Tirane'
,'Europe/Tiraspol'
,'Europe/Ulyanovsk'
,'Europe/Uzhgorod'
,'Europe/Vaduz'
,'Europe/Vatican'
,'Europe/Vienna'
,'Europe/Vilnius'
,'Europe/Volgograd'
,'Europe/Warsaw'
,'Europe/Zagreb'
,'Europe/Zaporozhye'
,'Europe/Zurich'
,'Factory'
,'GB'
,'GB-Eire'
,'GMT+0'
,'GMT-0'
,'GMT0'
,'Greenwich'
,'HST'
,'Hongkong'
,'Iceland'
,'Indian/Antananarivo'
,'Indian/Chagos'
,'Indian/Christmas'
,'Indian/Cocos'
,'Indian/Comoro'
,'Indian/Kerguelen'
,'Indian/Mahe'
,'Indian/Maldives'
,'Indian/Mauritius'
,'Indian/Mayotte'
,'Indian/Reunion'
,'Iran'
,'Israel'
,'Jamaica'
,'Japan'
,'Kwajalein'
,'Libya'
,'MET'
,'MST'
,'MST7MDT'
,'Mexico/BajaNorte'
,'Mexico/BajaSur'
,'Mexico/General'
,'NZ'
,'NZ-CHAT'
,'Navajo'
,'PRC'
,'PST8PDT'
,'Pacific/Apia'
,'Pacific/Auckland'
,'Pacific/Bougainville'
,'Pacific/Chatham'
,'Pacific/Chuuk'
,'Pacific/Easter'
,'Pacific/Efate'
,'Pacific/Enderbury'
,'Pacific/Fakaofo'
,'Pacific/Fiji'
,'Pacific/Funafuti'
,'Pacific/Galapagos'
,'Pacific/Gambier'
,'Pacific/Guadalcanal'
,'Pacific/Guam'
,'Pacific/Honolulu'
,'Pacific/Johnston'
# -- !! -- ,'Pacific/Kanton'
,'Pacific/Kiritimati'
,'Pacific/Kosrae'
,'Pacific/Kwajalein'
,'Pacific/Majuro'
,'Pacific/Marquesas'
,'Pacific/Midway'
,'Pacific/Nauru'
,'Pacific/Niue'
,'Pacific/Norfolk'
,'Pacific/Noumea'
,'Pacific/Pago_Pago'
,'Pacific/Palau'
,'Pacific/Pitcairn'
,'Pacific/Pohnpei'
,'Pacific/Ponape'
,'Pacific/Port_Moresby'
,'Pacific/Rarotonga'
,'Pacific/Saipan'
,'Pacific/Samoa'
,'Pacific/Tahiti'
,'Pacific/Tarawa'
,'Pacific/Tongatapu'
,'Pacific/Truk'
,'Pacific/Wake'
,'Pacific/Wallis'
,'Pacific/Yap'
,'Poland'
,'Portugal'
,'ROC'
,'ROK'
,'Singapore'
,'Turkey'
,'UCT'
,'US/Alaska'
,'US/Aleutian'
,'US/Arizona'
,'US/Central'
,'US/East-Indiana'
,'US/Eastern'
,'US/Hawaii'
,'US/Indiana-Starke'
,'US/Michigan'
,'US/Mountain'
,'US/Pacific'
,'US/Samoa'
,'UTC'
,'Universal'
,'W-SU'
,'WET'
,'Zulu'
]
problematic_timezones_map = {}
with act.db.connect() as con:
cur = con.cursor()
# random.choice(fb_time_zones)
for tz_name in fb_time_zones:
try:
tz_info = get_timezone(tz_name)
# print(tz_name)
tm_region = (
datetime.time(11, 22, 33, 561400, get_timezone(tz_name))
,datetime.time(12, 23, 34, 672400, get_timezone(tz_name))
,datetime.time(13, 24, 35, 783400, get_timezone(tz_name))
)
ts_region = (
datetime.datetime(2020, 10, 20, 11, 22, 33, 561400, get_timezone(tz_name))
,datetime.datetime(2021, 11, 21, 12, 23, 34, 672400, get_timezone(tz_name))
,datetime.datetime(2022, 12, 22, 13, 24, 35, 783400, get_timezone(tz_name))
)
#------------------------------------------------
cur.execute("insert into test(arr_tmtz, arr_tstz) values (?, ?) returning arr_tmtz,arr_tstz", ( tm_region, ts_region ) )
inserted_tmtz_array, inserted_tstz_array = cur.fetchone()[:2]
if set(inserted_tmtz_array) == set(tm_region) and set(inserted_tstz_array) == set(ts_region):
pass
else:
print('MISMATCH detected between input data and stored result:')
for i,x in enumerate(inserted_tmtz_array):
print(i, f'Input element (TIME WITH TIME ZONE): {tm_region[i]}', f'; stored data: {x}')
for i,x in enumerate(inserted_tstz_array):
print(i, f'Input element (TIMESTAMP WITH TIME ZONE): {ts_region[i]}', f'; stored data: {x}')
cur.execute("delete from test")
except Exception as e:
problematic_timezones_map[tz_name] = e.__str__()
if problematic_timezones_map:
print('Problems detected with time zone(s):')
for k,v in problematic_timezones_map.items():
print(k,v)
act.stdout = capsys.readouterr().out
assert act.clean_stdout == ''
act.reset()

108
tests/bugs/gh_6706_test.py Normal file
View File

@ -0,0 +1,108 @@
#coding:utf-8
"""
ID: issue-6706
ISSUE: https://github.com/FirebirdSQL/firebird/issues/6706
TITLE: Memory leak when running EXECUTE STATEMENT with named parameters [CORE6475]
DESCRIPTION:
We create stored procedure with PARAMS_COUNT input parameters.
Then EXECUTE BLOCK is generated with call of this SP via EXECUTE STATEMENT which applies EXCESS modifier to all arguments.
Value of memory_info().rss is obtained (for appropriate server process), then run execute block MEASURES_COUNT times
and after this - again get memory_info().rss value.
Ratio between current and initial values of memory_info().rss must be less than MAX_RATIO.
NOTES:
[17.08.2024] pzotov
1. Problem did exist in FB 4.x up to snapshot 4.0.0.2336.
Commit: https://github.com/FirebirdSQL/firebird/commit/4dfb30a45b767994c074bbfcbb8494b8ada19b33 (23-jan-2021, 15:26)
Before this commit ratio for SS was about 5..6 for SS and about 8..9 for CS.
Since 4.0.0.2341 memory consumption was reduced to ~1.6 ... 1.9
2. Database must be created with FW = ON otherwise ratio for all snapshots is about 1.5 (and this seems weird).
3. Test duration is about 35s.
Checked on 6.0.0.438, 5.0.2.1478, 4.0.6.3142; 4.0.0.2336, 4.0.0.2341.
"""
import psutil
import pytest
from firebird.qa import *
import time
###########################
### S E T T I N G S ###
###########################
# How many input parameters must have procedure:
PARAMS_COUNT = 1000
# How many times we call procedures:
MEASURES_COUNT = 1000
# Maximal value for ratio between
# new and initial memory_info().rss values:
#
MAX_RATIO = 3
#############
db = db_factory(async_write = False)
act = python_act('db')
#--------------------------------------------------------------------
def get_server_pid(con):
with con.cursor() as cur:
cur.execute('select mon$server_pid as p from mon$attachments where mon$attachment_id = current_connection')
fb_pid = int(cur.fetchone()[0])
return fb_pid
#--------------------------------------------------------------------
@pytest.mark.version('>=4.0.0')
def test_1(act: Action, capsys):
with act.db.connect() as con:
sp_ddl = """
create or alter procedure sp_test(
"""
params_lst = '\n'.join( [ (',' if i else '') +f'p_{i} int' for i in range(PARAMS_COUNT) ] )
sp_ddl = '\n'.join( ("create or alter procedure sp_test(", params_lst, ") returns(x int) as begin x = 1; suspend; end") )
con.execute_immediate(sp_ddl)
con.commit()
server_process = psutil.Process(get_server_pid(con))
params_lst = ','.join( [ f':p_{i}' for i in range(PARAMS_COUNT) ] )
passed_args = ','.join( [ f'excess p_{i} := 1' for i in range(PARAMS_COUNT) ] )
srv_memo_rss_init = int(server_process.memory_info().rss / 1024)
srv_memo_vms_init = int(server_process.memory_info().vms / 1024)
cur = con.cursor()
for k in range(MEASURES_COUNT):
es_sql = f"""
execute block returns(x int) as
begin
execute statement ('select p.x * {k} from sp_test({params_lst}) p') ({passed_args})
into x;
suspend;
end
"""
cur.execute(es_sql)
for r in cur:
pass
srv_memo_rss_curr = int(server_process.memory_info().rss / 1024)
srv_memo_vms_curr = int(server_process.memory_info().vms / 1024)
memo_ratio = srv_memo_rss_curr / srv_memo_rss_init
SUCCESS_MSG = 'Ratio between memory values measured before and after loop: acceptable'
if memo_ratio < MAX_RATIO:
print(SUCCESS_MSG)
else:
print( 'Ratio: /* perf_issue_tag */ POOR: %s, more than threshold: %s' % ( '{:.2f}'.format(memo_ratio), '{:.2f}'.format(MAX_RATIO) ) )
act.expected_stdout = SUCCESS_MSG
act.stdout = capsys.readouterr().out
assert act.clean_stdout == act.clean_expected_stdout

View File

@ -99,6 +99,7 @@ expected_stderr = """
-Integer overflow. The result of an integer operation caused the most significant bit of the result to carry.
"""
@pytest.mark.intl
@pytest.mark.version('>=5.0')
def test_1(act: Action):
act.expected_stdout = expected_stdout

View File

@ -25,6 +25,7 @@ db = db_factory()
act = python_act('db')
@pytest.mark.intl
@pytest.mark.version('>=5.0')
def test_1(act: Action, capsys):
with act.db.connect(charset='iso8859_1') as con:

View File

@ -13,15 +13,20 @@ DESCRIPTION:
We do these measures <N_MEASURES> times for each SP, and each result is added to the list
which, in turn, is the source for median evaluation.
Finally, we get ratio between minimal and maximal medians (see 'median_ratio')
On Windows 8.1 usually this ratio is about 7 (before fix it was more than 100).
This ratio is about:
* Windows: 0.6 ... 0.7
* Linux: 0.5 ... 0.6
Before fix it was more than 10.
Test is considered as passed if median_ratio less than threshold <MAX_RATIO>.
NOTES:
Number of iterations for loops differ: we have to perform 'sp_empty_loop' at least 1E6 times
in order to get valuable difference between CPU user time counters and use it as denomitator.
Procedure 'sp_ctime_loop' must be called for 10x times LESS than 'sp_empty_loop'.
Confirmed problem on:
4.0.1.2699 (01-jan-2022): median ratio was 109 ... 110 (3.40 vs 0.03)
5.0.0.362 (01-jan-2022): median ratio was 111 ... 113 (3.51 vs 0.03)
5.0.0.362, 4.0.1.2699 (bith snapshots have date 01-jan-2022)
Checked on 6.0.0.195, 5.0.0.1305, 4.0.5.3049.
Scope of median ratio values: 4.33 ... 7.00
"""
import psutil
@ -43,12 +48,13 @@ def median(lst):
N_MEASURES = 15
# How many iterations must be done:
N_COUNT_PER_MEASURE = 100000
N_COUNT_TIME_LOOP = 100000
N_COUNT_EMPTY_LOOP = 1000000
# Maximal value for ratio between maximal and minimal medians
#
MAX_RATIO = 15
##############
MAX_RATIO = 1.5
###############
init_script = \
f'''
@ -60,12 +66,12 @@ f'''
begin
while (n < a_limit) do
begin
n = n + 1;
d = current_time;
n = n + 1;
end
end
^
create procedure sp_dummy_loop(a_limit int)
create procedure sp_empty_loop(a_limit int)
as
declare n int = 1;
begin
@ -79,7 +85,7 @@ f'''
^
'''
db = db_factory(init = init_script)
db = db_factory(init = init_script, charset = 'win1251')
act = python_act('db')
expected_stdout = """
@ -96,24 +102,25 @@ def test_1(act: Action, capsys):
sp_time = {}
for i in range(0, N_MEASURES):
for sp_name in ('sp_ctime_loop', 'sp_dummy_loop'):
for sp_name in ('sp_ctime_loop', 'sp_empty_loop'):
n_count = N_COUNT_TIME_LOOP if sp_name == 'sp_ctime_loop' else N_COUNT_EMPTY_LOOP
fb_info_init = psutil.Process(fb_pid).cpu_times()
cur.callproc( sp_name, (N_COUNT_PER_MEASURE,) )
cur.callproc( sp_name, (n_count,) )
fb_info_curr = psutil.Process(fb_pid).cpu_times()
sp_time[ sp_name, i ] = max(fb_info_curr.user - fb_info_init.user, 0.000001)
sp_ctime_median = median([v for k,v in sp_time.items() if k[0] == 'sp_ctime_loop'])
sp_dummy_median = median([v for k,v in sp_time.items() if k[0] == 'sp_dummy_loop'])
sp_dummy_median = median([v for k,v in sp_time.items() if k[0] == 'sp_empty_loop'])
#----------------------------------
median_ratio = sp_ctime_median / sp_dummy_median
print( 'Medians ratio: ' + ('acceptable' if median_ratio <= MAX_RATIO else '/* perf_issue_tag */ POOR: %s, more than threshold: %s' % ( '{:9g}'.format(median_ratio), '{:9g}'.format(MAX_RATIO) ) ) )
if median_ratio > MAX_RATIO:
print('CPU times for each of {N_MEASURES} measures:')
print(f'CPU times for each of {N_MEASURES} measures:')
for k,v in sp_time.items():
print(k,':::',v)
print(f'Median cpu time for {N_MEASURES} measures using loops for {N_COUNT_PER_MEASURE} iterations in each SP call:')
print(f'Median cpu time for {N_MEASURES} measures:')
print('sp_ctime_median:',sp_ctime_median)
print('sp_dummy_median:',sp_dummy_median)
print('median_ratio:',median_ratio)

View File

@ -6,6 +6,11 @@ ISSUE: https://github.com/FirebirdSQL/firebird/issues/7118
TITLE: Chained JOIN .. USING across the same column names may be optimized badly
NOTES:
[01.03.2023] pzotov
Commit related to this test:
https://github.com/FirebirdSQL/firebird/commit/1b192404d43a15d403b5ff92760bc5df9d3c89c3
(13.09.2022 19:17, "More complete solution for #3357 and #7118")
One more test that attempts to verify this commit: bugs/gh_7398_test.py
Checked on 3.0.11.33665, 4.0.3.2904, 5.0.0.964
"""

View File

@ -0,0 +1,87 @@
#coding:utf-8
"""
ID: issue-7128
ISSUE: https://github.com/FirebirdSQL/firebird/issues/7128
TITLE: Incorrect error message with isc_sql_interprete()
DESCRIPTION:
NOTES:
[28.03.2024] pzotov
Bug caused crash of FB up to 5.0.0.890 (10-jan-2023).
Since 5.0.0.905 (11-jan-2023) following error raises:
Invalid resultset interface
-901
335545049
[03.09.2024] pzotov
1. Warning is issued:
$PYTHON_HOME/Lib/site-packages/firebird/driver/interfaces.py:710: FirebirdWarning: Invalid resultset interface
self._check()
It was decided to suppress warning by using 'warnings' package.
2. Result for snapshots with date = 09-feb-2022:
3.0.9.33560:
Exception ignored in: <function Cursor.__del__ at 0x000001DD87EA49A0>
Traceback (most recent call last):
File "$PYTHON_HOME/Lib/site-packages/firebird/driver/core.py", line 3047, in __del__
File "$PYTHON_HOME/Lib/site-packages/firebird/driver/core.py", line 3788, in close
File "$PYTHON_HOME/Lib/site-packages/firebird/driver/core.py", line 3655, in _clear
File "$PYTHON_HOME/Lib/site-packages/firebird/driver/interfaces.py", line 709, in close
OSError: exception: access violation writing 0x0000000000000024
4.0.1.2175: passed.
5.0.0.393: crashed,
> raise self.__report(DatabaseError, self.status.get_errors())
E firebird.driver.types.DatabaseError: Error writing data to the connection.
E -send_packet/send
3. Version 3.0.13.33793 raises:
> raise self.__report(DatabaseError, self.status.get_errors())
E firebird.driver.types.DatabaseError: Invalid resultset interface
(and this exceprion is not catched for some reason).
Checked on 6.0.0.447, 5.0.2.1487, 4.0.6.3142
"""
import pytest
from firebird.qa import *
from firebird.driver import tpb, Isolation, TraLockResolution, TraAccessMode, DatabaseError, FirebirdWarning
import time
import warnings
db = db_factory()
act = python_act('db')
@pytest.mark.version('>=4.0')
def test_1(act: Action, capsys):
tpb_isol_set = (Isolation.SERIALIZABLE, Isolation.SNAPSHOT, Isolation.READ_COMMITTED_READ_CONSISTENCY, Isolation.READ_COMMITTED_RECORD_VERSION, Isolation.READ_COMMITTED_NO_RECORD_VERSION)
with act.db.connect() as con:
for x_isol in tpb_isol_set:
custom_tpb = tpb(isolation = x_isol, lock_timeout = 0)
tx = con.transaction_manager(custom_tpb)
cur = tx.cursor()
tx.begin()
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
try:
print(x_isol.name)
cur.execute('select 0 from rdb$types rows 2')
cur.fetchone()
tx._cursors = []
tx.commit()
cur.fetchone()
except DatabaseError as e:
print(e.__str__())
print(e.sqlcode)
for g in e.gds_codes:
print(g)
finally:
cur.close()
act.expected_stdout = f"""
{x_isol.name}
Invalid resultset interface
-901
335545049
"""
act.stdout = capsys.readouterr().out
assert act.clean_stdout == act.clean_expected_stdout

View File

@ -0,0 +1,68 @@
#coding:utf-8
"""
ID: issue-7269
ISSUE: https://github.com/FirebirdSQL/firebird/issues/7269
TITLE: Database restore must make every effort on activating deferred indexes
DESCRIPTION:
Test uses unrecoverable .fbk that was provided in the ticket and tries to restore it using '-verbose' option.
After restore finish, we check its log. It must contain SEVERAL errors related to indices (PK and two FK),
and also it must have messages about FINAL point of restore (regardless error that follows after this):
gbak:finishing, closing, and going home
gbak:adjusting the ONLINE and FORCED WRITES flags
NOTES:
[02.11.2024] pzotov
Checked on 5.0.2.1551, 6.0.0.415.
"""
import subprocess
from pathlib import Path
import zipfile
import locale
import re
import pytest
from firebird.qa import *
from firebird.driver import SrvRestoreFlag
db = db_factory()
act = python_act('db')
tmp_fbk = temp_file('gh_7269.tmp.fbk')
tmp_fdb = temp_file('gh_7269.tmp.fdb')
@pytest.mark.version('>=5.0.2')
def test_1(act: Action, tmp_fbk: Path, tmp_fdb: Path, capsys):
zipped_fbk_file = zipfile.Path(act.files_dir / 'gh_7269.zip', at = 'gh-7269-unrecoverable.fbk')
tmp_fbk.write_bytes(zipped_fbk_file.read_bytes())
allowed_patterns = \
(
r'gbak:(\s+)?ERROR(:)?'
,r'gbak:(\s+)?finishing, closing, and going home'
,r'gbak:(\s+)?adjusting the ONLINE and FORCED WRITES flags'
)
allowed_patterns = [ re.compile(p, re.IGNORECASE) for p in allowed_patterns ]
act.gbak(switches = ['-rep', '-v', str(tmp_fbk), str(tmp_fdb)], combine_output = True, io_enc = locale.getpreferredencoding())
for line in act.stdout.splitlines():
if act.match_any(line.strip(), allowed_patterns):
print(line)
expected_stdout = """
gbak: ERROR:violation of PRIMARY or UNIQUE KEY constraint "PK_A3" on table "A3"
gbak: ERROR: Problematic key value is ("ID" = 9)
gbak: ERROR:violation of PRIMARY or UNIQUE KEY constraint "PK_A1" on table "A1"
gbak: ERROR: Problematic key value is ("ID" = 5)
gbak: ERROR:Cannot create foreign key constraint FK_A1. Partner index does not exist or is inactive.
gbak: ERROR:violation of FOREIGN KEY constraint "FK_A2" on table "B2"
gbak: ERROR: Foreign key reference target does not exist
gbak: ERROR: Problematic key value is ("A2_ID" = 5)
gbak: ERROR:Cannot create foreign key constraint FK_A3. Partner index does not exist or is inactive.
gbak:finishing, closing, and going home
gbak:adjusting the ONLINE and FORCED WRITES flags
gbak: ERROR:Database is not online due to failure to activate one or more indices.
gbak: ERROR: Run gfix -online to bring database online without active indices.
"""
act.expected_stdout = expected_stdout
act.stdout = capsys.readouterr().out
assert act.clean_stdout == act.clean_expected_stdout

218
tests/bugs/gh_7304_test.py Normal file
View File

@ -0,0 +1,218 @@
#coding:utf-8
"""
ID: issue-7304
ISSUE: 7304
TITLE: Events in system attachments (like garbage collector) are not traced
DESCRIPTION:
Test changes sweep interval to some low value (see SWEEP_GAP) and runs TX_COUNT transactions which
lead difference between OST and OIT to exceed given sweep interval. These transactions are performed
by ISQL which is launched as child process. SQL script uses table with record that is locked at the
beginning of script and execute block with loop of TX_COUNT statements which insert new records.
After this loop finish, we make ISQL to hang by forcing it to update first record (see LOCKED_ROW).
Then we change DB state to full shutdown and wait until ISQL will be terminated.
At this point database has sweep gap that is enough to run auto sweep at first connection to DB.
Finally, we bring DB online and start trace with log_sweep = true and log_transactions = true.
Doing connection and wait about 2..3 seconds cause auto sweep to be started and completed.
This must be reflected in the trace.
If ServerMode = 'Super' and ParallelWorkers >= 2 and MaxParallelWorkers >= ParallelWorkers
then trace log will contain folloing five lines related to worker(s) activity:
<TIMESTAMP> (...) START_TRANSACTION
<DBFILE> (ATT_..., <Worker>, NONE, <internal>) --------------------- [ 1 ]
(TRA_..., READ_COMMITTED | REC_VERSION | WAIT | READ_ONLY)
<TIMESTAMP> (...) COMMIT_TRANSACTION
<DBFILE> (ATT_..., <Worker>, NONE, <internal>) --------------------- [ 2 ]
This is the only difference that can be observed for snapshots before and after fix
(i.e. BEFORE fix trace had no such lines but all other data about sweep *did* present).
Test checks that trace log contains TWO lines with '<worker>', see above [ 1 ] and [ 2 ].
JIRA: CORE-2668
FBTEST: bugs.core_2668
NOTES:
[07.11.2024] pzotov
Confirmed absense of lines marked as '<worker' in the trace log for snapshot 5.0.0.731 (15.09.2022).
Checked on 5.0.0.733 (16.09.2022); 5.0.2.1553, 6.0.0.515
"""
import time
import subprocess
from datetime import datetime as dt
import re
from pathlib import Path
from difflib import unified_diff
from firebird.driver import DatabaseError, tpb, Isolation, TraLockResolution, DbWriteMode, ShutdownMode, ShutdownMethod
import pytest
from firebird.qa import *
db = db_factory()
act = python_act('db', substitutions = [('\\(ATT_\\d+', '(ATT_N')])
################
### SETTINGS ###
################
SWEEP_GAP = 100
TX_COUNT = 150
LOCKED_ROW = -1
MAX_WAIT_FOR_ISQL_PID_APPEARS_MS = 5000
WATCH_FOR_PTN = re.compile( r'\(ATT_\d+,\s+<Worker>,\s+NONE,\s+<internal>\)', re.IGNORECASE)
################
tmp_sql = temp_file('tmp_2668.sql')
tmp_log = temp_file('tmp_2668.log')
@pytest.mark.es_eds
@pytest.mark.version('>=5.0.0')
def test_1(act: Action, tmp_sql: Path, tmp_log: Path, capsys):
if act.vars['server-arch'] != 'SuperServer':
pytest.skip("Applies only to SuperServer")
with act.db.connect() as con:
cur = con.cursor()
sql = """
select
cast(max(iif(g.rdb$config_name = 'ParallelWorkers', g.rdb$config_value, null)) as int) as cfg_par_workers
,cast(max(iif(g.rdb$config_name = 'MaxParallelWorkers', g.rdb$config_value, null)) as int) as cfg_max_par_workers
from rdb$database
left join rdb$config g on g.rdb$config_name in ('ParallelWorkers', 'MaxParallelWorkers')
"""
cur.execute(sql)
cfg_par_workers, cfg_max_par_workers = cur.fetchone()
assert cfg_par_workers >=2 and cfg_max_par_workers >= cfg_par_workers, "Server must be configured for parallel work. Check values of ParallelWorkers and MaxParallelWorkers"
test_script = f"""
set echo on;
set bail on;
connect '{act.db.dsn}' user {act.db.user} password '{act.db.password}';
recreate table test(id int primary key);
insert into test(id) values({LOCKED_ROW});
commit;
set transaction read committed WAIT;
update test set id = id where id = {LOCKED_ROW};
set term ^;
execute block as
declare n int = {TX_COUNT};
declare v_role varchar(31);
begin
while (n > 0) do
in autonomous transaction do
insert into test(id) values(:n)
returning :n-1 into n;
v_role = left(replace( uuid_to_char(gen_uuid()), '-', ''), 31);
begin
execute statement ('update test /* ' || ascii_char(65) || ' */ set id = id where id = ?') ({LOCKED_ROW})
on external
'localhost:' || rdb$get_context('SYSTEM', 'DB_NAME')
as user '{act.db.user}' password '{act.db.password}' role v_role
with autonomous transaction;
when any do
begin
end
end
end
^
set term ;^
set heading off;
select '-- shutdown me now --' from rdb$database;
"""
tmp_sql.write_text(test_script)
with act.connect_server() as srv:
##############################
### reduce SWEEEP interval ###
##############################
srv.database.set_sweep_interval(database = act.db.db_path, interval = SWEEP_GAP)
srv.database.set_write_mode(database=act.db.db_path, mode=DbWriteMode.ASYNC)
with open(tmp_log,'w') as f_log:
p_work_sql = subprocess.Popen([act.vars['isql'], '-q', '-i', str(tmp_sql)], stdout = f_log, stderr = subprocess.STDOUT)
chk_mon_sql = """
select 1
from mon$attachments a
join mon$statements s
using (mon$attachment_id)
where
a.mon$attachment_id <> current_connection
and cast(s.mon$sql_text as varchar(8192)) containing '/* A */'
"""
found_in_mon_tables = False
with act.db.connect() as con_watcher:
custom_tpb = tpb(isolation = Isolation.SNAPSHOT, lock_timeout = -1)
tx_watcher = con_watcher.transaction_manager(custom_tpb)
cur_watcher = tx_watcher.cursor()
ps = cur_watcher.prepare(chk_mon_sql)
i = 0
da = dt.now()
while True:
cur_watcher.execute(ps)
mon_result = -1
for r in cur_watcher:
mon_result = r[0]
tx_watcher.commit()
db = dt.now()
diff_ms = (db-da).seconds*1000 + (db-da).microseconds//1000
if mon_result == 1:
found_in_mon_tables = True
break
elif diff_ms > MAX_WAIT_FOR_ISQL_PID_APPEARS_MS:
break
time.sleep(0.1)
ps.free()
assert found_in_mon_tables, f'Could not find attachment in mon$ tables for {MAX_WAIT_FOR_ISQL_PID_APPEARS_MS} ms.'
try:
##############################################
### f u l l s h u t d o w n D B ###
##############################################
srv.database.shutdown(database=act.db.db_path, mode=ShutdownMode.FULL,
method=ShutdownMethod.FORCED, timeout=0)
finally:
p_work_sql.terminate()
# < with open(tmp_log,'w') as f_log
srv.database.bring_online(database=act.db.db_path)
trace_options = \
[
'time_threshold = 0'
,'log_initfini = false'
,'log_connections = true'
,'log_transactions = true'
,'log_errors = true'
,'log_sweep = true'
]
with act.trace(db_events = trace_options, encoding='utf8', encoding_errors='utf8'):
with act.db.connect() as con_for_sweep_start:
time.sleep(2)
for line in act.trace_log:
if WATCH_FOR_PTN.search(line):
print(WATCH_FOR_PTN.search(line).group())
act.expected_stdout = """
(ATT_N, <Worker>, NONE, <internal>)
(ATT_N, <Worker>, NONE, <internal>)
"""
act.stdout = capsys.readouterr().out
assert act.clean_stdout == act.clean_expected_stdout

155
tests/bugs/gh_7398_test.py Normal file
View File

@ -0,0 +1,155 @@
#coding:utf-8
"""
ID: issue-7398
ISSUE: https://github.com/FirebirdSQL/firebird/issues/7398
TITLE: Worst plan sort created to execute an indexed tables
DESCRIPTION:
NOTES:
[29.09.2024] pzotov
1. Ineffective execution plan was up to 4.0.3.2840.
Since 4.0.3.2843 plan changed and is the same for all subsequent FB-4.x snapshots.
Commit: https://github.com/FirebirdSQL/firebird/commit/1b192404d43a15d403b5ff92760bc5df9d3c89c3
(13.09.2022 19:17, "More complete solution for #3357 and #7118")
2. Database provided in the ticket has too big size (~335 Mb).
Test uses much smaller DB that was created on basis of original one by
extraction of small portions of data from tables PCP_TIN_REC_MAT and INV_ETQ_MAT.
These tables in original DB have 114115 and 1351211 rows.
In DB that is used here these tables have 15000 and 30000 rows corresp.
NOT all constraints are used in the test DB. Particularly, following DDL were abandoned:
ALTER TABLE PCP_TIN_REC ADD CONSTRAINT FK_PCP_TIN_REC_EMP FOREIGN KEY (ID_EMP) REFERENCES SYS_EMP (ID_EMP);
ALTER TABLE PCP_TIN_REC ADD CONSTRAINT FK_PCP_TIN_REC_OP FOREIGN KEY (ID_OP) REFERENCES PCP_OP (ID_OP);
ALTER TABLE PCP_TIN_REC_MAT ADD CONSTRAINT FK_PCP_TIN_REC_MAT_MAT FOREIGN KEY (ID_MAT) REFERENCES INV_MAT (ID_MAT);
Test database have been backed up using 4.0.3.2840 and compressed to .zip file.
3. Because of missed valuable part of source data, I'm not sure that this test verifies exactly ticket issue.
But in any case, using this test one may see difference in execution plan that is produced in 4.0.3.2840 and 4.0.3.2843.
And such difference also can be seen on original DB (although plans there differ from those which are in test DB).
Checked on 6.0.0.471, 5.0.2.1519, 4.0.6.3157.
"""
import locale
import re
import zipfile
from pathlib import Path
from firebird.driver import SrvRestoreFlag
import time
import pytest
from firebird.qa import *
db = db_factory()
act = python_act('db')
check_sql = """
select r.id_op, r.id_rec, sum(m.q_mat * cus.cus_med)
from pcp_tin_rec r
join pcp_tin_rec_mat m on r.id_rec = m.id_rec
join inv_etq_mat cus on cus.id_mat = m.id_mat and cus.anomes = r.am_bai
join inv_etq_nat nat on nat.id_nat = cus.id_nat
where
nat.cml_stat = 1 and r.id_op = 216262
group by r.id_op, r.id_rec
"""
fbk_file = temp_file('gh_7398.tmp.fbk')
#-----------------------------------------------------------
def replace_leading(source, char="."):
stripped = source.lstrip()
return char * (len(source) - len(stripped)) + stripped
#-----------------------------------------------------------
expected_out_4x = """
Select Expression
....-> Aggregate
........-> Sort (record length: 148, key length: 16)
............-> Nested Loop Join (inner)
................-> Filter
....................-> Table "PCP_TIN_REC" as "R" Full Scan
................-> Filter
....................-> Table "PCP_TIN_REC_MAT" as "M" Access By ID
........................-> Bitmap
............................-> Index "FK_PCP_TIN_REC_MAT_REC" Range Scan (full match)
................-> Filter
....................-> Table "INV_ETQ_MAT" as "CUS" Access By ID
........................-> Bitmap
............................-> Index "IDX_INV_ETQ_MAT_ANOMES" Range Scan (full match)
................-> Filter
....................-> Table "INV_ETQ_NAT" as "NAT" Access By ID
........................-> Bitmap
............................-> Index "PK_INV_ETQ_NAT" Unique Scan
"""
expected_out_5x = """
Select Expression
....-> Aggregate
........-> Sort (record length: 148, key length: 16)
............-> Filter
................-> Hash Join (inner)
....................-> Nested Loop Join (inner)
........................-> Filter
............................-> Table "PCP_TIN_REC" as "R" Full Scan
........................-> Filter
............................-> Table "PCP_TIN_REC_MAT" as "M" Access By ID
................................-> Bitmap
....................................-> Index "FK_PCP_TIN_REC_MAT_REC" Range Scan (full match)
........................-> Filter
............................-> Table "INV_ETQ_MAT" as "CUS" Access By ID
................................-> Bitmap
....................................-> Index "IDX_INV_ETQ_MAT_ANOMES" Range Scan (full match)
....................-> Record Buffer (record length: 33)
........................-> Filter
............................-> Table "INV_ETQ_NAT" as "NAT" Access By ID
................................-> Bitmap
....................................-> Index "IDX_INV_ETQ_NAT_CML_STAT" Range Scan (full match)
"""
expected_out_6x = """
Select Expression
....-> Aggregate
........-> Sort (record length: 148, key length: 16)
............-> Filter
................-> Hash Join (inner) (keys: 1, total key length: 4)
....................-> Nested Loop Join (inner)
........................-> Filter
............................-> Table "PCP_TIN_REC" as "R" Full Scan
........................-> Filter
............................-> Table "PCP_TIN_REC_MAT" as "M" Access By ID
................................-> Bitmap
....................................-> Index "FK_PCP_TIN_REC_MAT_REC" Range Scan (full match)
........................-> Filter
............................-> Table "INV_ETQ_MAT" as "CUS" Access By ID
................................-> Bitmap
....................................-> Index "IDX_INV_ETQ_MAT_ANOMES" Range Scan (full match)
....................-> Record Buffer (record length: 33)
........................-> Filter
............................-> Table "INV_ETQ_NAT" as "NAT" Access By ID
................................-> Bitmap
....................................-> Index "IDX_INV_ETQ_NAT_CML_STAT" Range Scan (full match)
"""
@pytest.mark.version('>=4.0')
def test_1(act: Action, fbk_file: Path, capsys):
zipped_fbk_file = zipfile.Path(act.files_dir / 'gh_7398.zip', at = 'gh_7398.fbk')
fbk_file.write_bytes(zipped_fbk_file.read_bytes())
with act.connect_server(encoding=locale.getpreferredencoding()) as srv:
srv.database.restore(database = act.db.db_path, backup = fbk_file, flags = SrvRestoreFlag.REPLACE)
restore_log = srv.readlines()
assert restore_log == []
with act.db.connect() as con:
chk_sql = 'select 1 from test order by id'
cur = con.cursor()
ps = cur.prepare(check_sql)
# Print explained plan with padding eash line by dots in order to see indentations:
print( '\n'.join([replace_leading(s) for s in ps.detailed_plan.split('\n')]) )
act.expected_stdout = expected_out_4x if act.is_version('<5') else expected_out_5x if act.is_version('<6') else expected_out_6x
act.stdout = capsys.readouterr().out
assert act.clean_stdout == act.clean_expected_stdout

View File

@ -13,6 +13,7 @@ DESCRIPTION:
NOTES:
[25.11.2023] pzotov
Checked on 6.0.0.150.
See also: gh_8249_test.py
"""
import pytest

171
tests/bugs/gh_7767_test.py Normal file
View File

@ -0,0 +1,171 @@
#coding:utf-8
"""
ID: issue-7767
ISSUE: https://github.com/FirebirdSQL/firebird/issues/7767
TITLE: Slow drop trigger command execution under FB5.0
DESCRIPTION:
The issued problem can NOT be stably reproduced if we compare time ratio between 'DROP TRIGGER' vs 'DROP PROCEDURE' statements.
ratio between execution time differed for too small value (e.g. 7.2 before fix and 6.9 after it).
But regression can be noted if we check ratio between CPU time spent for 'DROP TRIGGER' and some code that does not relate
to any DB operations and makes some evaluation. Such code can be single call to CRYPT_HASH function (doing in loop many times).
This function must be called EVAL_CRYPT_HASH_COUNT times.
Result of evaluating of CRYPT_HASH is stored in var. 'eval_crypt_hash_time' and serves further as "etalone" value.
Test database is initialized by creation of PSQL_OBJECTS_COUNT triggers and is copied to backup FDB (see 'tmp_fdb').
Then we call 'DROP TRIGGER' using 'for select ... from rdb$triggers' cursor (so their count is also PSQL_OBJECTS_COUNT).
We repeat this in loop for MEASURES_COUNT iterations, doing restore from DB copy before every iteration (copy 'tmp_fdb' to act.db).
Median of ratios between CPU times obtained in this loop and eval_crypt_hash_time must be less than MAX_RATIO.
Duration is measured as difference between psutil.Process(fb_pid).cpu_times() counters.
NOTES:
[14.08.2024] pzotov
Problem did exist in FB 5.x until commit "Fix #7759 - Routine calling overhead increased by factor 6 vs Firebird 4.0.0."
https://github.com/FirebirdSQL/firebird/commit/d621ffbe0cf2d43e13480628992180c28a5044fe (03-oct-2023 13:32).
Before this commit (up to 5.0.0.1236) median of ratios was more than 6.5.
After fix it was reduced to ~3.5 ... 4.0 (5.0.0.1237 and above).
This ratio seems to be same on Windows and Linux.
Built-in function CRYPT_HASH appeared in 4.0.0.2180, 27-aug-2020, commit:
https://github.com/FirebirdSQL/firebird/commit/e9f3eb360db41ddff27fa419b908876be0d2daa5
("Moved cryptographic hashes to separate function crypt_hash(), crc32 - into function hash()")
Test duration time: about 50s.
Checked on 6.0.0.436, 5.0.2.1478, 4.0.6.3142 (all SS/CS; both Windows and Linux).
"""
import shutil
from pathlib import Path
import psutil
import pytest
from firebird.qa import *
import time
###########################
### S E T T I N G S ###
###########################
# How many times to generate crypt_hash:
EVAL_CRYPT_HASH_COUNT=5000
# How many times we call procedures:
MEASURES_COUNT = 11
# How many procedures and triggers must be created:
PSQL_OBJECTS_COUNT = 500
# Maximal value for ratio between maximal and minimal medians
#
MAX_RATIO = 6
#############
init_sql = """
set bail on;
alter database set linger to 0;
create sequence g;
create table test(id int);
commit;
set term ^;
"""
init_sql = '\n'.join(
( init_sql
,'\n'.join( [ f'create trigger tg_{i} for test before insert as declare v int; begin v = gen_id(g,1); end ^' for i in range(PSQL_OBJECTS_COUNT) ] )
,'^ set term ;^'
,'commit;'
)
)
db = db_factory(init = init_sql)
act = python_act('db')
tmp_fdb = temp_file('tmp_gh_7767_copy.tmp')
expected_stdout = """
Medians ratio: acceptable
"""
eval_crypt_code = f"""
execute block as
declare v_hash varbinary(64);
declare n int = {EVAL_CRYPT_HASH_COUNT};
begin
while (n > 0) do begin
v_hash = crypt_hash(lpad('', 32765, uuid_to_char(gen_uuid())) using SHA512);
n = n - 1;
end
end
"""
drop_trg_code = """
execute block as
declare trg_drop type of column rdb$triggers.rdb$trigger_name;
begin
for select 'DROP TRIGGER '||trim(rdb$trigger_name)
from rdb$triggers
where rdb$system_flag=0
into :trg_drop do
begin
in autonomous transaction do
begin
execute statement :trg_drop;
end
end
end
"""
#--------------------------------------------------------------------
def median(lst):
n = len(lst)
s = sorted(lst)
return (sum(s[n//2-1:n//2+1])/2.0, s[n//2])[n % 2] if n else None
#--------------------------------------------------------------------
def get_server_pid(con):
with con.cursor() as cur:
cur.execute('select mon$server_pid as p from mon$attachments where mon$attachment_id = current_connection')
fb_pid = int(cur.fetchone()[0])
return fb_pid
#--------------------------------------------------------------------
@pytest.mark.version('>=4.0.0')
def test_1(act: Action, tmp_fdb: Path, capsys):
shutil.copy2(act.db.db_path, tmp_fdb)
with act.db.connect() as con:
fb_pid = get_server_pid(con)
fb_info_init = psutil.Process(fb_pid).cpu_times()
con.execute_immediate( eval_crypt_code )
fb_info_curr = psutil.Process(fb_pid).cpu_times()
eval_crypt_hash_time = max(fb_info_curr.user - fb_info_init.user, 0.000001)
ddl_time = {}
for iter in range(MEASURES_COUNT):
with act.db.connect() as con:
fb_pid = get_server_pid(con)
fb_info_init = psutil.Process(fb_pid).cpu_times()
con.execute_immediate( drop_trg_code )
fb_info_curr = psutil.Process(fb_pid).cpu_times()
ddl_time[ 'tg', iter ] = max(fb_info_curr.user - fb_info_init.user, 0.000001)
# Quick jump back to database with PSQL_OBJECTS_COUNT triggers that we made on init phase:
shutil.copy2(tmp_fdb, act.db.db_path)
ratios = [ ddl_time['tg',iter] / eval_crypt_hash_time for iter in range(MEASURES_COUNT) ]
median_ratio = median(ratios)
SUCCESS_MSG = 'Medians ratio: acceptable'
if median_ratio < MAX_RATIO:
print(SUCCESS_MSG)
else:
print( 'Medians ratio: /* perf_issue_tag */ POOR: %s, more than threshold: %s' % ( '{:.2f}'.format(median_ratio), '{:.2f}'.format(MAX_RATIO) ) )
print('ratios:',['{:.2f}'.format(r) for r in ratios])
print('CPU times:')
for k,v in ddl_time.items():
print(k,':::','{:.2f}'.format(v))
act.expected_stdout = SUCCESS_MSG
act.stdout = capsys.readouterr().out
assert act.clean_stdout == act.clean_expected_stdout

190
tests/bugs/gh_7846_test.py Normal file
View File

@ -0,0 +1,190 @@
#coding:utf-8
"""
ID: issue-7846
ISSUE: https://github.com/FirebirdSQL/firebird/issues/7846
TITLE: FB4 can't backup/restore int128-array
DESCRIPTION:
Test checks ability to make b/r of DB with table that has array-based columns of following types:
smallint; integer; bigint; int128; double.
NOTES:
[16.09.2024] pzotov
1. Confirmed problem with b/r for INT128 on 4.0.4.3021 (dob: 17-nov-2023).
Got after restore: [Decimal('0'), Decimal('0')] (although no errors were during insert origin data).
Expected values: [Decimal('170141183460469231731687303715884105727'), Decimal('-170141183460469231731687303715884105728')].
Confirmed fix on 4.0.4.3022 (dob: 19-nov-2023).
2. Fix for #8100 ("The isc_array_lookup_bounds function returns invalid values...") required in order
this test can pass on FB 5.x and 6.x.
See commits (07-may-2024):
* 5.x: https://github.com/FirebirdSQL/firebird/commit/26ca064202169c0558359fc9ab06b70e827466f0
* 6.x: https://github.com/FirebirdSQL/firebird/commit/17b007d14f8ccc6cfba0d63a3b2f21622ced20d0
FB 4.x was not affected by that bug.
3. INT128 type requires as argument only INTEGER values (from scope -2**127 ... +2**127-1) but NOT decimals.
Rather, NUMERIC / DECIMAL columns allows only Decimal instances.
(letter from Alex, 20.08.2024 16:11, subj: "gh-6544 ("Error writing an array of NUMERIC(24,6)" ) <...>"
4. DECFLOAT type had a problem in firebird-driver, fixed in v 1.10.5 (26-JUL-2024)
5. Some features must be implemented in engine and/or in firebird-driver for proper support of NUMERIC datatype
which have big values and use int128 as underlying storage.
Discussed with pcisar, see subj "firebird-driver and its support for FB datatypes", letters since 21-jul-2024.
See also: https://github.com/FirebirdSQL/firebird/issues/6544#issuecomment-2294778138
Checked on 6.0.0.457; 5.0.2.1499; 4.0.5.3136.
"""
import pytest
from firebird.qa import *
from io import BytesIO
from firebird.driver import SrvRestoreFlag, DatabaseError, InterfaceError
from decimal import Decimal
import traceback
import time
init_script = """
recreate table test_arr(
id int generated by default as identity constraint test_pk primary key
,v_smallint smallint[2]
,v_integer int[2]
,v_bigint bigint[2]
,v_int128 int128[2]
,v_double double precision[2]
,v_decfloat decfloat[2]
);
"""
db = db_factory(init = init_script)
act = python_act('db')
#--------------------------------------
def try_insert(con, cur, fld, data):
print(f'\nTrying to add array in {fld}')
try:
print(f'Data: {data}')
with cur.prepare(f"insert into test_arr({fld}) values (?)") as ps:
for x in data:
cur.execute(ps, (x,))
cur.execute(f'select {fld} from test_arr order by id desc rows 1')
for r in cur:
for x in r[0]:
print(x, type(x))
con.commit()
print('Success.')
#except (ValueError, InterfaceError, DatabaseError) as e:
except Exception as e:
for x in traceback.format_exc().split('\n'):
print(' ',x)
#--------------------------------------
@pytest.mark.version('>=4.0.5')
def test_1(act: Action, capsys):
with act.db.connect() as con:
cur = con.cursor()
# ------------ smallint -------------
data = [ [32767, -32768] ]
try_insert(con, cur, 'v_smallint', data)
# ------------ int -------------
data = [ [2147483647, -2147483648] ]
try_insert(con, cur, 'v_integer', data)
# ------------ bigint -------------
data = [ [9223372036854775807, -9223372036854775808] ]
try_insert(con, cur, 'v_bigint', data)
# ------------ int128 -------------
# sqltype: 32752 INT128 Nullable scale: 0 subtype: 0 len: 16
# ValueError: Incorrect ARRAY field value.
# !! WRONG!! >>> data = [ [Decimal('170141183460469231731687303715884105727'), Decimal('-170141183460469231731687303715884105728')] ]
# Only INTEGERS must be specified as arguments:
data = [ [170141183460469231731687303715884105727, -170141183460469231731687303715884105728] ]
try_insert(con, cur, 'v_int128', data)
# ------------ double -------------
data = [ [-2.2250738585072014e-308, 1.7976931348623158e+308] ]
try_insert(con, cur, 'v_double', data)
# ------------ decfloat -------------
# ValueError: Incorrect ARRAY field value.
# data = [ [Decimal('-1.0E-6143'), Decimal('9.999999999999999999999999999999999E6144')] ]
# data = [ [-1.0E-6143, 9.999999999999999999999999999999999E6144] ]
data = [ [Decimal('-9.999999999999999999999999999999999E+6144'), Decimal('9.999999999999999999999999999999999E+6144')] ]
try_insert(con, cur, 'v_decfloat', data)
backup = BytesIO()
with act.connect_server() as srv:
srv.database.local_backup(database=act.db.db_path, backup_stream=backup)
backup.seek(0)
srv.database.local_restore(backup_stream=backup, database=act.db.db_path, flags = SrvRestoreFlag.REPLACE)
with act.db.connect() as con:
cur = con.cursor()
for fld_name in ('v_smallint','v_integer','v_bigint','v_int128','v_double', 'v_decfloat'):
cur.execute(f'select {fld_name} from test_arr')
for r in cur:
# type(r): <class 'tuple'>
if any(r):
print(f'Result after restore for column {fld_name}:')
for p in r:
print(p)
act.expected_stdout = """
Trying to add array in v_smallint
Data: [[32767, -32768]]
32767 <class 'int'>
-32768 <class 'int'>
Success.
Trying to add array in v_integer
Data: [[2147483647, -2147483648]]
2147483647 <class 'int'>
-2147483648 <class 'int'>
Success.
Trying to add array in v_bigint
Data: [[9223372036854775807, -9223372036854775808]]
9223372036854775807 <class 'int'>
-9223372036854775808 <class 'int'>
Success.
Trying to add array in v_int128
Data: [[170141183460469231731687303715884105727, -170141183460469231731687303715884105728]]
170141183460469231731687303715884105727 <class 'decimal.Decimal'>
-170141183460469231731687303715884105728 <class 'decimal.Decimal'>
Success.
Trying to add array in v_double
Data: [[-2.2250738585072014e-308, 1.7976931348623157e+308]]
-2.2250738585072014e-308 <class 'float'>
1.7976931348623157e+308 <class 'float'>
Success.
Trying to add array in v_decfloat
Data: [[Decimal('-9.999999999999999999999999999999999E+6144'), Decimal('9.999999999999999999999999999999999E+6144')]]
-9.999999999999999999999999999999999E+6144 <class 'decimal.Decimal'>
9.999999999999999999999999999999999E+6144 <class 'decimal.Decimal'>
Success.
Result after restore for column v_smallint:
[32767, -32768]
Result after restore for column v_integer:
[2147483647, -2147483648]
Result after restore for column v_bigint:
[9223372036854775807, -9223372036854775808]
Result after restore for column v_int128:
[Decimal('170141183460469231731687303715884105727'), Decimal('-170141183460469231731687303715884105728')]
Result after restore for column v_double:
[-2.2250738585072014e-308, 1.7976931348623157e+308]
Result after restore for column v_decfloat:
[Decimal('-9.999999999999999999999999999999999E+6144'), Decimal('9.999999999999999999999999999999999E+6144')]
"""
act.stdout = capsys.readouterr().out
assert act.clean_stdout == act.clean_expected_stdout

View File

@ -8,6 +8,14 @@ DESCRIPTION:
NOTES:
Confirmed problem on 5.0.0.1291 (for UMOWA_ROWS = 700K number of fetches = 6059386, elapsed time = 9.609s)
Checked on 5.0.0.1303, 6.0.0.180 (for UMOWA_ROWS = 700K number of fetches = 270208, elapsed time = 0.741s)
[24.09.2024] pzotov
Changed substitutions: one need to suppress '(keys: N, total key length: M)' in FB 6.x (and ONLY there),
otherwise actual and expected output become differ.
Commit: https://github.com/FirebirdSQL/firebird/commit/c50b0aa652014ce3610a1890017c9dd436388c43
("Add key info to the hash join plan output", 23.09.2024 18:26)
Discussed with dimitr.
Checked on 6.0.0.467-cc183f5, 5.0.2.1513
"""
import pytest
@ -16,6 +24,7 @@ from firebird.qa import *
UMOWA_ROWS = 7000
ROZL_MULTIPLIER = 10
init_sql = f"""
set bail on;
@ -178,9 +187,20 @@ init_sql = f"""
commit;
"""
#-----------------------------------------------------------
db = db_factory(init = init_sql)
substitutions = \
[
( r'\(record length: \d+, key length: \d+\)', '' ) # (record length: 132, key length: 16)
,( r'\(keys: \d+, total key length: \d+\)', '' ) # (keys: 1, total key length: 2)
]
act = python_act('db', substitutions = substitutions)
#-----------------------------------------------------------
query_lst = [
# Query from https://github.com/FirebirdSQL/firebird/issues/7904:
"""
@ -207,9 +227,6 @@ query_lst = [
""",
]
substitutions = [ ('record length: \\d+.*', 'record length'), ('key length: \\d+.*', 'key length') ]
act = python_act('db', substitutions = substitutions)
#---------------------------------------------------------
def replace_leading(source, char="."):
stripped = source.lstrip()

View File

@ -161,6 +161,7 @@ expected_stdout = """
ánoc
"""
@pytest.mark.intl
@pytest.mark.version('>=6.0.0')
def test_1(act: Action):
act.expected_stdout = expected_stdout

144
tests/bugs/gh_8091_test.py Normal file
View File

@ -0,0 +1,144 @@
#coding:utf-8
"""
ID: issue-8091
ISSUE: https://github.com/FirebirdSQL/firebird/issues/8091
TITLE: Ability to create an inactive index
DESCRIPTION:
Test creates a table with several indices, all of them are specified as INACTIVE.
Then we check that these indices actually can *not* be used: explained plans for any query
to this table that could relate to indexed columns must now contain 'Full Scan'.
After this we extract metadata (with saving it to 'init_meta' variable) and drop test table.
Applying of metada to the test database (which is empty now) must pass without errors and,
more important, all indices must remain inactive after that.
Finally, we change DB dialect to 1, make b/r and again do same actions.
Result must be the same as for iteration with default dialect = 3.
NOTES:
[25.10.2024] pzotov
Checked on 6.0.0.508-67d8e39 (intermediate build).
"""
import time
from io import BytesIO
from firebird.driver import SrvRestoreFlag
import pytest
from firebird.qa import *
init_sql = """
set bail on;
recreate table test(id int generated by default as identity, x int, y int, z int);
set term ^;
execute block as
declare n int = 100000;
declare i int = 0;
begin
while (i < n) do
begin
insert into test(x, y, z) values( :i, null, :i);
i = i + 1;
end
end^
set term ;^
commit;
create unique ascending index test_x_asc inactive on test(x);
create descending index test_y_desc inactive on test(y);
create unique descending index test_x_plus_y inactive on test computed by (x+y);
create index test_z_partial inactive on test(z) where mod(id,2) = 0;
create unique index test_x_minus_y_partial inactive on test computed by (x-y) where mod(id,3) <= 1;
commit;
"""
db = db_factory(init = init_sql)
act = python_act('db')
#-----------------------------------------------------------
def replace_leading(source, char="."):
stripped = source.lstrip()
return char * (len(source) - len(stripped)) + stripped
#-----------------------------------------------------------
def check_indices_inactive(act, qry_map, nr_block, capsys):
with act.db.connect() as con:
cur = con.cursor()
for k,v in qry_map.items():
ps = cur.prepare(v)
# Print explained plan with padding eash line by dots in order to see indentations:
print(v)
print( '\n'.join([replace_leading(s) for s in ps.detailed_plan.split('\n')]) )
print('')
ps.free()
expected_out = '\n'.join( [''.join( (qry_map[i],'\n',nr_block) ) for i in range(len(qry_map))] )
act.expected_stdout = expected_out
act.stdout = capsys.readouterr().out
assert act.clean_stdout == act.clean_expected_stdout
act.reset()
#-----------------------------------------------------------
@pytest.mark.version('>=6.0')
def test_1(act: Action, capsys):
qry_map = {
0 : 'select count(*) from test where x is null'
,1 : 'select count(*) from test where y is null'
,2 : 'select count(*) from test where x+y is null'
,3 : 'select count(*) from test where z is null and mod(id,2) = 0'
,4 : 'select count(*) from test where x-y is null and mod(id,3) <= 1'
,5 : 'select count(*) from test where x is not distinct from null'
,6 : 'select count(*) from test where y is not distinct from null'
,7 : 'select count(*) from test where x+y is not distinct from null'
,8 : 'select count(*) from test where z is not distinct from null and mod(id,2) = 0'
,9 : 'select count(*) from test where x-y is not distinct from null and mod(id,3) <= 1'
}
nr_block = """
Select Expression
....-> Aggregate
........-> Filter
............-> Table "TEST" Full Scan
"""
for iter in range(2):
# check-1: ensure that all indices actually are INACTIVE, i.e. all queries will use full scan.
##########
check_indices_inactive(act, qry_map, nr_block, capsys)
#---===+++---===+++---===+++---===+++---===+++---===+++---===+++---===+++---===+++---===+++---
# check-2: extract metadata, drop table and apply metadata which now contains 'INACTIVE' clause for indices.
##########
act.isql(switches=['-x'])
init_meta = '\n'.join( ('set bail on;', act.stdout) )
with act.db.connect() as con:
con.execute_immediate('drop table test')
con.commit()
# Apply metadata to main test database.
act.isql(switches = [], input = init_meta)
# NO errors must occur now:
assert act.clean_stdout == ''
act.reset()
#---===+++---===+++---===+++---===+++---===+++---===+++---===+++---===+++---===+++---===+++---
# check-3: ensure that all indices REMAIN INACTIVE, i.e. all queries will use full scan.
##########
check_indices_inactive(act, qry_map, nr_block, capsys)
if iter == 0:
# change dialect to 1, make backup / restore and repeat all prev actions.
act.gfix(switches = ['-sql_dialect', '1', act.db.dsn], combine_output = True)
assert act.clean_stdout == ''
act.reset()
backup = BytesIO()
with act.connect_server() as srv:
srv.database.local_backup(database = act.db.db_path, backup_stream = backup)
backup.seek(0)
srv.database.local_restore(backup_stream = backup, database = act.db.db_path, flags = SrvRestoreFlag.REPLACE)

View File

@ -0,0 +1,99 @@
#coding:utf-8
"""
ID: issue-8115
ISSUE: https://github.com/FirebirdSQL/firebird/issues/8115
TITLE: Avoid reading/hashing the inner stream(s) if the leader stream is empty
DESCRIPTION:
Original title: "FB 5.0.0.1306 - unexpected results using LEFT JOIN with When "
NOTES:
[16.09.2024] pzotov
Confirmed bug in 5.0.1.1369-8c31082 (17.03.2024)
Bug was fixed in 5.0.1.1369-bbd35ab (20.03.2024)
Commit:
https://github.com/FirebirdSQL/firebird/commit/bbd35ab07c129e9735f081fcd29172a8187aa8ab
Avoid reading/hashing the inner stream(s) if the leader stream is empty
Checked on 6.0.0.457, 5.0.2.1499
"""
import zipfile
from pathlib import Path
import locale
import re
from firebird.driver import DatabaseError
import pytest
from firebird.qa import *
db = db_factory()
substitutions = [('INDEX_\\d+', 'INDEX_nn'),]
act = python_act('db', substitutions = substitutions)
tmp_fbk = temp_file('gh_8115.tmp.fbk')
#-----------------------------------------------------------
def replace_leading(source, char="."):
stripped = source.lstrip()
return char * (len(source) - len(stripped)) + stripped
#-----------------------------------------------------------
@pytest.mark.version('>=5.0.1')
def test_1(act: Action, tmp_fbk: Path, capsys):
zipped_fbk_file = zipfile.Path(act.files_dir / 'gh_8115.zip', at = 'gh_8115.fbk')
tmp_fbk.write_bytes(zipped_fbk_file.read_bytes())
act.gbak(switches = ['-rep', str(tmp_fbk), act.db.db_path], combine_output = True, io_enc = locale.getpreferredencoding())
print(act.stdout) # must be empty
test_sql = """
select aa.id, ab.CNP_USER, ab.ID_USER
from sal_inperioada2('7DC51501-0DF2-45BE-93E5-382A541505DE', '15.05.2024') aa
left join user_cnp(aa.cnp, '15.05.2024') ab on ab.CNP_USER = aa.cnp
where ab.ID_USER = '04B23787-2C7F-451A-A12C-309F79D6F13A'
"""
with act.db.connect() as con:
cur = con.cursor()
ps = cur.prepare(test_sql)
print( '\n'.join([replace_leading(s) for s in ps.detailed_plan.split('\n')]) )
try:
cur.execute(ps)
for r in cur:
for p in r:
print(p)
except DatabaseError as e:
print(e.__str__())
print(e.gds_codes)
expected_stdout_5x = """
Select Expression
....-> Nested Loop Join (inner)
........-> Procedure "SAL_INPERIOADA2" as "AA" Scan
........-> Filter
............-> Filter
................-> Procedure "USER_CNP" as "AB" Scan
000DD4E1-B4D0-4D6E-9D9F-DE9A7D0D6492
E574F734-CECB-4A8F-B9BE-FAF51BC61FAD
04B23787-2C7F-451A-A12C-309F79D6F13A
"""
expected_stdout_6x = """
Select Expression
....-> Filter
........-> Nested Loop Join (inner)
............-> Procedure "SAL_INPERIOADA2" as "AA" Scan
............-> Filter
................-> Procedure "USER_CNP" as "AB" Scan
000DD4E1-B4D0-4D6E-9D9F-DE9A7D0D6492
E574F734-CECB-4A8F-B9BE-FAF51BC61FAD
04B23787-2C7F-451A-A12C-309F79D6F13A
"""
act.expected_stdout = expected_stdout_5x if act.is_version('<6') else expected_stdout_6x
act.stdout = capsys.readouterr().out
assert act.clean_stdout == act.clean_expected_stdout

View File

@ -19,6 +19,13 @@ NOTES:
Confirmed bug (regression) on 3.0.12.33735 (date of build: 09-mar-2024).
Checked on 3.0.12.33764, 4.0.5.3112, 5.0.1.1416, 6.0.0.374.
[31.10.2024] pzotov
Adjusted expected_out discuss with dimitr: explained plan for FB 6.x became identical to FB 5.x and earlier after
https://github.com/FirebirdSQL/firebird/commit/e7e9e01fa9d7c13d8513fcadca102d23ad7c5e2a
("Rework fix for #8290: Unique scan is incorrectly reported in the explained plan for unique index and IS NULL predicate")
Checked on 3.0.13.33794, 4.0.6.3165, 5.0.2.1551, 6.0.0.515
"""
import zipfile
from pathlib import Path
@ -87,17 +94,18 @@ def test_1(act: Action, tmp_fbk: Path, capsys):
................-> Filter
....................-> Table "RDB$DEPENDENCIES" as "DEP" Access By ID
........................-> Bitmap
............................-> Index "RDB$INDEX_28" Range Scan (full match)
............................-> Index "RDB$INDEX_nn" Range Scan (full match)
................-> Filter
....................-> Table "RDB$PROCEDURES" as "PRC" Access By ID
........................-> Bitmap
............................-> Index "RDB$INDEX_21" Unique Scan
............................-> Index "RDB$INDEX_nn" Unique Scan
Select Expression
....-> Filter
........-> Table "RDB$RELATION_FIELDS" as "X" Access By ID
............-> Bitmap
................-> Index "RDB$INDEX_3" Range Scan (full match)
................-> Index "RDB$INDEX_nn" Range Scan (full match)
"""
act.expected_stdout = expected_stdout
act.stdout = capsys.readouterr().out
assert act.clean_stdout == act.clean_expected_stdout

346
tests/bugs/gh_8185_test.py Normal file
View File

@ -0,0 +1,346 @@
#coding:utf-8
"""
ID: issue-8185
ISSUE: https://github.com/FirebirdSQL/firebird/issues/8185
TITLE: SIGSEGV in Firebird 5.0.0.1306 embedded during update on cursor
DESCRIPTION:
Test implements sequence of actions described by Dimitry Sibiryakov in the ticket,
see: https://github.com/FirebirdSQL/firebird/issues/8185#issuecomment-2258598579
NOTES:
[01.11.2024] pzotov
1. Bug was fixed on following commits:
5.x: 27.07.2024 11:48, 08dc25f8c45342a73c786bc60571c8a5f2c8c6e3
("Simplest fix for #8185: SIGSEGV in Firebird 5.0.0.1306 embedded during update on cursor - disallow caching for positioned updates/deletes")
6.x: 29.07.2024 00:53, a7d10a40147d326e56540498b50e40b2da0e5850
("Fix #8185 - SIGSEGV with WHERE CURRENT OF statement with statement cache turned on")
2. In current version of firebird-driver we can *not* set cursor name without executing it first.
But such execution leads to 'update conflict / deadlock' for subsequent UPDATE statement.
Kind of 'hack' is used to solve this problem: ps1._istmt.set_cursor_name(CURSOR_NAME)
3. GREAT thanks to:
* Vlad for providing workaround and explanation of problem with AV for code like this:
with connect(f'localhost:{DB_NAME}', user = DBA_USER, password = DBA_PSWD) as con:
cur1 = con.cursor()
ps1 = cur1.prepare('update test set id = -id rows 0 returning id')
cur1.execute(ps1)
ps1.free()
It is mandatory to store result of cur1.eecute in some variable, i.e. rs1 = cur1.execute(ps1),
and call then rs1.close() __BEFORE__ ps1.free().
Discussed 26.10.2024, subj:
"Oddities when using instance of selectable Statement // related to interfaces, VTable, iResultSet, iVersioned , CLOOP"
* Dimitry Sibiryakov for describe the 'step-by-step' algorithm for reproducing problem and providing working example in .cpp
Confirmed problem on 5.0.1.1452-b056f5b (last snapshot before it was fixed).
Checked on 5.0.1.1452-08dc25f (27.07.2024 11:50); 6.0.0.401-a7d10a4 (29.07.2024 01:33) -- all OK.
"""
import pytest
from firebird.qa import *
from firebird.driver import driver_config, connect, tpb, TraAccessMode, Isolation, DatabaseError
init_sql = """
set bail on;
recreate table test(id int, f01 int);
commit;
insert into test(id, f01) select row_number()over(), row_number()over() * 10 from rdb$types rows 3;
commit;
"""
db = db_factory(init = init_sql)
act = python_act('db')
@pytest.mark.version('>=5.0.0')
def test_1(act: Action, capsys):
srv_cfg = driver_config.register_server(name = 'test_srv_gh_8185', config = '')
db_cfg_name = f'db_cfg_8185'
db_cfg_object = driver_config.register_database(name = db_cfg_name)
db_cfg_object.server.value = srv_cfg.name
db_cfg_object.database.value = str(act.db.db_path)
db_cfg_object.config.value = f"""
MaxStatementCacheSize = 1M
"""
# Pre-check:
with connect(db_cfg_name, user = act.db.user, password = act.db.password) as con:
cur = con.cursor()
cur.execute("select a.mon$remote_protocol, g.rdb$config_value from mon$attachments a left join rdb$config g on g.rdb$config_name = 'MaxStatementCacheSize' where a.mon$attachment_id = current_connection")
for r in cur:
conn_protocol = r[0]
db_sttm_cache_size = int(r[1])
assert conn_protocol is None, "Test must use LOCAL protocol."
assert db_sttm_cache_size > 0, "Parameter 'MaxStatementCacheSize' (per-database) must be greater than zero for this test."
#---------------------------------------------
CURSOR_NAME = 'k1'
SELECT_STTM = 'select /* ps-1*/ id, f01 from test where id > 0 for update'
UPDATE_STTM = f'update /* ps-2 */ test set id = -id where current of {CURSOR_NAME} returning id'
update_tpb = tpb( access_mode = TraAccessMode.WRITE,
isolation = Isolation.READ_COMMITTED_RECORD_VERSION,
lock_timeout = 1)
with connect(db_cfg_name, user = act.db.user, password = act.db.password) as con:
tx2 = con.transaction_manager(update_tpb)
tx2.begin()
with con.cursor() as cur1, tx2.cursor() as cur2, con.cursor() as cur3:
ps1, rs1, ps2, rs2, ps3, rs3 = None, None, None, None, None, None
try:
ps1 = cur1.prepare(SELECT_STTM) # 1. [ticket, DS] Prepare statement 1 "select ... for update"
ps1._istmt.set_cursor_name(CURSOR_NAME) # 2. [ticket, DS] Set cursor name for statement 1 // ~hack.
# DO NOT use it because subsequent update statement will get 'deadlock / update conflict' and not able to start:
#rs1 = cur1.execute(ps1)
#cur1.set_cursor_name(CURSOR_NAME)
# DS example: "// Prepare positioned update statement"
ps2 = cur2.prepare(UPDATE_STTM) # 3. [ticket, DS] Prepare statement 2 "update ... where current of <cursor name from step 2>"
# DS .cpp: // fetch records from cursor and print them
rs1 = cur1.execute(ps1)
rs1.fetchall()
# DS .cpp: // IStatement* stmt2 = att->prepare(&status, tra, 0, "select * from pos where a > 1 for update",
ps3 = cur3.prepare(SELECT_STTM) # 4. [ticket, DS] Prepare statement 3 similar to statement 1
rs1.close() # 5. [ticket, DS] Release statement 1 // see hvlad recipe, 26.10.2024
ps1.free()
# DS .cpp: updStmt->free(&status);
ps2.free() # 6. [ticket, DS] Release statement 2 // see hvlad recipe, 26.10.2024
# DS .cpp: stmt = stmt2
ps3._istmt.set_cursor_name(CURSOR_NAME) # 7. [ticket, DS] Set cursor name to statement 3 as in step 2
ps2 = cur2.prepare(UPDATE_STTM) # 8. [ticket, DS] Prepare statement 2 again (it will be got from cache keeping reference to statement 1)
rs3 = cur3.execute(ps3)
rs3.fetchone() # 9. [ticket, DS] Run statement 3 and fetch one record
# At step 10 you can get "Invalid handle" error or a crash if you swap steps 5 and 6.
rs2 = cur2.execute(ps2) # 10. [ticket, DS] Execute statement 2
data2 = rs2.fetchone()
print('Changed ID:', data2[0])
# print(f'{rs2.rowcount=}')
except DatabaseError as e:
print(e.__str__())
print('gds codes:')
for i in e.gds_codes:
print(i)
finally:
if rs1:
rs1.close()
if ps1:
ps1.free()
if rs2:
rs2.close()
if ps2:
ps2.free()
if rs3:
rs3.close()
if ps3:
ps3.free()
#---------------------------------------------
act.expected_stdout = 'Changed ID: -1'
act.stdout = capsys.readouterr().out
assert act.clean_stdout == act.clean_expected_stdout
# Example in .cpp (provided by Dimitry Sibiryakov):
###################################################
#
# #include <memory>
# #include "ifaceExamples.h"
# static IMaster* master = fb_get_master_interface();
#
# int main()
# {
# int rc = 0;
#
# // status vector and main dispatcher
# ThrowStatusWrapper status(master->getStatus());
# IProvider* prov = master->getDispatcher();
# IUtil* utl = master->getUtilInterface();
#
# // declare pointers to required interfaces
# IAttachment* att = NULL;
# ITransaction* tra = NULL;
# IStatement* stmt = NULL;
# IMessageMetadata* meta = NULL;
# IMetadataBuilder* builder = NULL;
# IXpbBuilder* tpb = NULL;
#
# // Interface provides access to data returned by SELECT statement
# IResultSet* curs = NULL;
#
# try
# {
# // IXpbBuilder is used to access various parameters blocks used in API
# IXpbBuilder* dpb = NULL;
#
# // create DPB - use non-default page size 4Kb
# dpb = utl->getXpbBuilder(&status, IXpbBuilder::DPB, NULL, 0);
# dpb->insertString(&status, isc_dpb_user_name, "sysdba");
# dpb->insertString(&status, isc_dpb_password, "masterkey");
#
# // create empty database
# att = prov->attachDatabase(&status, "ctest", dpb->getBufferLength(&status),
# dpb->getBuffer(&status));
#
# dpb->dispose();
#
# printf("database attached.\n");
#
# att->execute(&status, nullptr, 0, "set debug option dsql_keep_blr = true", SAMPLES_DIALECT, nullptr, nullptr, nullptr, nullptr);
# // start read only transaction
# tpb = utl->getXpbBuilder(&status, IXpbBuilder::TPB, NULL, 0);
# tpb->insertTag(&status, isc_tpb_read_committed);
# tpb->insertTag(&status, isc_tpb_no_rec_version);
# tpb->insertTag(&status, isc_tpb_wait);
# tra = att->startTransaction(&status, tpb->getBufferLength(&status), tpb->getBuffer(&status));
#
# // prepare statement
# stmt = att->prepare(&status, tra, 0, "select * from pos where a > 1 for update",
# SAMPLES_DIALECT, IStatement::PREPARE_PREFETCH_METADATA);
#
# // get list of columns
# meta = stmt->getOutputMetadata(&status);
# unsigned cols = meta->getCount(&status);
# unsigned messageLength = meta->getMessageLength(&status);
#
# std::unique_ptr<char[]> buffer(new char[messageLength]);
#
# stmt->setCursorName(&status, "abc");
#
# // open cursor
# printf("Opening cursor...\n");
# curs = stmt->openCursor(&status, tra, NULL, NULL, meta, 0);
#
# // Prepare positioned update statement
# printf("Preparing update statement...\n");
# IStatement* updStmt = att->prepare(&status, tra, 0, "update pos set b=b+1 where current of abc",
# SAMPLES_DIALECT, 0);
#
# const unsigned char items[] = {isc_info_sql_exec_path_blr_text, isc_info_sql_explain_plan};
# unsigned char infoBuffer[32000];
# updStmt->getInfo(&status, sizeof items, items, sizeof infoBuffer, infoBuffer);
#
# IXpbBuilder* pb = utl->getXpbBuilder(&status, IXpbBuilder::INFO_RESPONSE, infoBuffer, sizeof infoBuffer);
# for (pb->rewind(&status); !pb->isEof(&status); pb->moveNext(&status))
# {
# switch (pb->getTag(&status))
# {
# case isc_info_sql_exec_path_blr_text:
# printf("BLR:\n%s\n", pb->getString(&status));
# break;
# case isc_info_sql_explain_plan:
# printf("Plan:\n%s\n", pb->getString(&status));
# break;
# case isc_info_truncated:
# printf(" truncated\n");
# // fall down...
# case isc_info_end:
# break;
# default:
# printf("Unexpected item %d\n", pb->getTag(&status));
# }
# }
# pb->dispose();
#
# // fetch records from cursor and print them
# for (int line = 0; curs->fetchNext(&status, buffer.get()) == IStatus::RESULT_OK; ++line)
# {
# printf("Fetched record %d\n", line);
# updStmt->execute(&status, tra, nullptr, nullptr, nullptr, nullptr);
# printf("Update executed\n");
# }
#
# IStatement* stmt2 = att->prepare(&status, tra, 0, "select * from pos where a > 1 for update",
# SAMPLES_DIALECT, IStatement::PREPARE_PREFETCH_METADATA);
#
# // close interfaces
# curs->close(&status);
# curs = NULL;
#
# stmt->free(&status);
# stmt = NULL;
#
# updStmt->free(&status);
#
# stmt = stmt2;
# stmt->setCursorName(&status, "abc");
#
# // open cursor
# printf("Opening cursor2...\n");
# curs = stmt->openCursor(&status, tra, NULL, NULL, meta, 0);
#
# // Prepare positioned update statement
# printf("Preparing update statement again...\n");
# updStmt = att->prepare(&status, tra, 0, "update pos set b=b+1 where current of abc",
# SAMPLES_DIALECT, 0);
#
# // fetch records from cursor and print them
# for (int line = 0; curs->fetchNext(&status, buffer.get()) == IStatus::RESULT_OK; ++line)
# {
# printf("Fetched record %d\n", line);
# updStmt->execute(&status, tra, nullptr, nullptr, nullptr, nullptr);
# printf("Update executed\n");
# }
#
# curs->close(&status);
# curs = NULL;
#
# stmt->free(&status);
# stmt = NULL;
#
# updStmt->free(&status);
#
# meta->release();
# meta = NULL;
#
# tra->commit(&status);
# tra = NULL;
#
# att->detach(&status);
# att = NULL;
# }
# catch (const FbException& error)
# {
# // handle error
# rc = 1;
#
# char buf[256];
# master->getUtilInterface()->formatStatus(buf, sizeof(buf), error.getStatus());
# fprintf(stderr, "%s\n", buf);
# }
#
# // release interfaces after error caught
# if (meta)
# meta->release();
# if (builder)
# builder->release();
# if (curs)
# curs->release();
# if (stmt)
# stmt->release();
# if (tra)
# tra->release();
# if (att)
# att->release();
# if (tpb)
# tpb->dispose();
#
# prov->release();
# status.dispose();
#
# return rc;
# }

239
tests/bugs/gh_8203_test.py Normal file
View File

@ -0,0 +1,239 @@
#coding:utf-8
"""
ID: issue-8203
ISSUE: https://github.com/FirebirdSQL/firebird/issues/8203
TITLE: MAKE_DBKEY can raise 'malformed string' for some table names
DESCRIPTION:
Test verifies ability to create table with random name for each of Unicode ranges
defined in https://jrgraphix.net/r/Unicode/, except following:
(0xD800, 0xDB7F), # High Surrogates
(0xDB80, 0xDBFF), # High Private Use Surrogates
(0xDC00, 0xDFFF), # Low Surrogates
Random name is generated for each range, with random length from scope NAME_MIN_LEN ... NAME_MAX_LEN scope.
Then we create table with such name and stored procedure that attempts to use make_dbkey() with 1st argument
equals to just created table.
This action is repeated REPEAT_CHECKS_FOR_SELECTED_UNICODE_RANGE times for each Unicode range.
Some characters from 'Basic Latin' are NOT included in any table names - see CHARS_TO_SKIP.
No error must raise for any of checked Unicode scopes.
Example of output when problem does exist:
iter=4 of REPEAT_CHECKS_FOR_SELECTED_UNICODE_RANGE=5: SUCCESS
range_name='Basic Latin', ..., table_random_unicode_name='}JIry@frnWdzb]5[:A=IomGozwyM*rmJ'
Error while parsing procedure SP_CHK's BLR
-Malformed string
err.gds_codes=(335544876, 335544849)
err.sqlcode=-901
err.sqlstate='2F000'
NOTES:
[11.08.2024] pzotov
Confirmed bug on 6.0.0.421, 5.0.1.1469
Checked on 6.0.0.423, 5.0.2.1477
"""
import pytest
from firebird.qa import *
from io import BytesIO
from firebird.driver import SrvRestoreFlag, DatabaseError
import locale
import random
db = db_factory()
act = python_act('db', substitutions=[('[ \t]+', ' ')])
#########################
### s e t t i n g s ###
#########################
CHARS_TO_SKIP = set('<>|"\'^')
NAME_MIN_LEN = 32
NAME_MAX_LEN = 63
REPEAT_CHECKS_FOR_SELECTED_UNICODE_RANGE = 5 # duration: ~60"
#------------------------------------------------
def get_random_unicode(length, bound_points):
# https://stackoverflow.com/questions/1477294/generate-random-utf-8-string-in-python
try:
get_char = unichr
except NameError:
get_char = chr
alphabet = [
get_char(code_point) for code_point in range(bound_points[0],bound_points[1])
]
return ''.join(random.choice(alphabet) for i in range(length))
#------------------------------------------------
@pytest.mark.version('>=5.0.2')
def test_1(act: Action, capsys):
# https://jrgraphix.net/r/Unicode/
UNICODE_RANGES_MAP = {
(0x0020, 0x007F) : 'Basic Latin',
(0x00A0, 0x00FF) : 'Latin-1 Supplement',
(0x0100, 0x017F) : 'Latin Extended-A',
(0x0180, 0x024F) : 'Latin Extended-B',
(0x0400, 0x04FF) : 'Cyrillic',
(0x0500, 0x052F) : 'Cyrillic Supplementary',
(0x0300, 0x036F) : 'Combining Diacritical Marks',
(0x0250, 0x02AF) : 'IPA Extensions',
(0x0370, 0x03FF) : 'Greek and Coptic',
(0x0530, 0x058F) : 'Armenian',
(0x02B0, 0x02FF) : 'Spacing Modifier Letters',
(0x0590, 0x05FF) : 'Hebrew',
(0x0600, 0x06FF) : 'Arabic',
(0x0700, 0x074F) : 'Syriac',
(0x0780, 0x07BF) : 'Thaana',
(0x0900, 0x097F) : 'Devanagari',
(0x0980, 0x09FF) : 'Bengali',
(0x0A00, 0x0A7F) : 'Gurmukhi',
(0x0A80, 0x0AFF) : 'Gujarati',
(0x0B00, 0x0B7F) : 'Oriya',
(0x0B80, 0x0BFF) : 'Tamil',
(0x0C00, 0x0C7F) : 'Telugu',
(0x0C80, 0x0CFF) : 'Kannada',
(0x0D00, 0x0D7F) : 'Malayalam',
(0x0D80, 0x0DFF) : 'Sinhala',
(0x0E00, 0x0E7F) : 'Thai',
(0x0E80, 0x0EFF) : 'Lao',
(0x0F00, 0x0FFF) : 'Tibetan',
(0x1000, 0x109F) : 'Myanmar',
(0x10A0, 0x10FF) : 'Georgian',
(0x1100, 0x11FF) : 'Hangul Jamo',
(0x1200, 0x137F) : 'Ethiopic',
(0x13A0, 0x13FF) : 'Cherokee',
(0x1400, 0x167F) : 'Unified Canadian Aboriginal Syllabics',
(0x1680, 0x169F) : 'Ogham',
(0x16A0, 0x16FF) : 'Runic',
(0x1700, 0x171F) : 'Tagalog',
(0x1720, 0x173F) : 'Hanunoo',
(0x1740, 0x175F) : 'Buhid',
(0x1760, 0x177F) : 'Tagbanwa',
(0x1780, 0x17FF) : 'Khmer',
(0x1800, 0x18AF) : 'Mongolian',
(0x1900, 0x194F) : 'Limbu',
(0x1950, 0x197F) : 'Tai Le',
(0x19E0, 0x19FF) : 'Khmer Symbols',
(0x1D00, 0x1D7F) : 'Phonetic Extensions',
(0x1E00, 0x1EFF) : 'Latin Extended Additional',
(0x1F00, 0x1FFF) : 'Greek Extended',
(0x2000, 0x206F) : 'General Punctuation',
(0x2070, 0x209F) : 'Superscripts and Subscripts',
(0x20A0, 0x20CF) : 'Currency Symbols',
(0x20D0, 0x20FF) : 'Combining Diacritical Marks for Symbols',
(0x2100, 0x214F) : 'Letterlike Symbols',
(0x2150, 0x218F) : 'Number Forms',
(0x2190, 0x21FF) : 'Arrows',
(0x2200, 0x22FF) : 'Mathematical Operators',
(0x2300, 0x23FF) : 'Miscellaneous Technical',
(0x2400, 0x243F) : 'Control Pictures',
(0x2440, 0x245F) : 'Optical Character Recognition',
(0x2460, 0x24FF) : 'Enclosed Alphanumerics',
(0x2500, 0x257F) : 'Box Drawing',
(0x2580, 0x259F) : 'Block Elements',
(0x25A0, 0x25FF) : 'Geometric Shapes',
(0x2600, 0x26FF) : 'Miscellaneous Symbols',
(0x2700, 0x27BF) : 'Dingbats',
(0x27C0, 0x27EF) : 'Miscellaneous Mathematical Symbols-A',
(0x27F0, 0x27FF) : 'Supplemental Arrows-A',
(0x2800, 0x28FF) : 'Braille Patterns',
(0x2900, 0x297F) : 'Supplemental Arrows-B',
(0x2980, 0x29FF) : 'Miscellaneous Mathematical Symbols-B',
(0x2A00, 0x2AFF) : 'Supplemental Mathematical Operators',
(0x2B00, 0x2BFF) : 'Miscellaneous Symbols and Arrows',
(0x2E80, 0x2EFF) : 'CJK Radicals Supplement',
(0x2F00, 0x2FDF) : 'Kangxi Radicals',
(0x2FF0, 0x2FFF) : 'Ideographic Description Characters',
(0x3000, 0x303F) : 'CJK Symbols and Punctuation',
(0x3040, 0x309F) : 'Hiragana',
(0x30A0, 0x30FF) : 'Katakana',
(0x3100, 0x312F) : 'Bopomofo',
(0x3130, 0x318F) : 'Hangul Compatibility Jamo',
(0x3190, 0x319F) : 'Kanbun',
(0x31A0, 0x31BF) : 'Bopomofo Extended',
(0x31F0, 0x31FF) : 'Katakana Phonetic Extensions',
(0x3200, 0x32FF) : 'Enclosed CJK Letters and Months',
(0x3300, 0x33FF) : 'CJK Compatibility',
(0x3400, 0x4DBF) : 'CJK Unified Ideographs Extension A',
(0x4DC0, 0x4DFF) : 'Yijing Hexagram Symbols',
(0x4E00, 0x9FFF) : 'CJK Unified Ideographs',
(0xA000, 0xA48F) : 'Yi Syllables',
(0xA490, 0xA4CF) : 'Yi Radicals',
(0xAC00, 0xD7AF) : 'Hangul Syllables',
(0xE000, 0xF8FF) : 'Private Use Area',
(0xF900, 0xFAFF) : 'CJK Compatibility Ideographs',
(0xFB00, 0xFB4F) : 'Alphabetic Presentation Forms',
(0xFB50, 0xFDFF) : 'Arabic Presentation Forms-A',
(0xFE00, 0xFE0F) : 'Variation Selectors',
(0xFE20, 0xFE2F) : 'Combining Half Marks',
(0xFE30, 0xFE4F) : 'CJK Compatibility Forms',
(0xFE50, 0xFE6F) : 'Small Form Variants',
(0xFE70, 0xFEFF) : 'Arabic Presentation Forms-B',
(0xFF00, 0xFFEF) : 'Halfwidth and Fullwidth Forms',
(0xFFF0, 0xFFFF) : 'Specials',
(0x10000, 0x1007F) : 'Linear B Syllabary',
(0x10080, 0x100FF) : 'Linear B Ideograms',
(0x10100, 0x1013F) : 'Aegean Numbers',
(0x10300, 0x1032F) : 'Old Italic',
(0x10330, 0x1034F) : 'Gothic',
(0x10380, 0x1039F) : 'Ugaritic',
(0x10400, 0x1044F) : 'Deseret',
(0x10450, 0x1047F) : 'Shavian',
(0x10480, 0x104AF) : 'Osmanya',
(0x10800, 0x1083F) : 'Cypriot Syllabary',
(0x1D000, 0x1D0FF) : 'Byzantine Musical Symbols',
(0x1D100, 0x1D1FF) : 'Musical Symbols',
(0x1D300, 0x1D35F) : 'Tai Xuan Jing Symbols',
(0x1D400, 0x1D7FF) : 'Mathematical Alphanumeric Symbols',
(0x20000, 0x2A6DF) : 'CJK Unified Ideographs Extension B',
(0x2F800, 0x2FA1F) : 'CJK Compatibility Ideographs Supplement',
(0xE0000, 0xE007F) : 'Tags',
}
for bound_points, range_name in UNICODE_RANGES_MAP.items():
for iter in range(1,REPEAT_CHECKS_FOR_SELECTED_UNICODE_RANGE+1):
table_random_unicode_name = get_random_unicode( random.randint(NAME_MIN_LEN, NAME_MAX_LEN), bound_points )
table_random_unicode_name = ''.join(c for c in table_random_unicode_name if c not in CHARS_TO_SKIP)
test_sql = f"""
recreate table "{table_random_unicode_name.replace('"','""')}"(id int)
^
create or alter procedure sp_chk as
declare id1 int;
begin
select /* {range_name=} {iter=} */ id from "{table_random_unicode_name.replace('"','""')}" where rdb$db_key = make_dbkey('{table_random_unicode_name}', 0) into id1;
end
^
"""
# select id from "{table_random_unicode_name.replace('"','""')}" where rdb$db_key = make_dbkey('{table_random_unicode_name.replace("'","''")}', 0) into id1;
expected_txt = f'{iter=} of {REPEAT_CHECKS_FOR_SELECTED_UNICODE_RANGE=}: SUCCESS'
with act.db.connect(charset = 'utf-8') as con:
try:
for line in test_sql.split('^'):
if (expr := line.strip()):
if expr != '^':
con.execute_immediate(expr)
else:
con.commit()
con.commit()
print(expected_txt)
except DatabaseError as err:
print(f'{range_name=}, {iter=} of {REPEAT_CHECKS_FOR_SELECTED_UNICODE_RANGE=}, {table_random_unicode_name=}')
print(err)
print(f'{err.gds_codes=}')
print(f'{err.sqlcode=}')
print(f'{err.sqlstate=}')
'''
backup = BytesIO()
with act.connect_server() as srv:
srv.database.local_backup(database=act.db.db_path, backup_stream=backup)
backup.seek(0)
srv.database.local_restore(backup_stream=backup, database=act.db.db_path, flags = SrvRestoreFlag.REPLACE)
'''
act.expected_stdout = expected_txt
act.stdout = capsys.readouterr().out
assert act.clean_stdout == act.clean_expected_stdout
act.reset()

View File

@ -0,0 +1,50 @@
#coding:utf-8
"""
ID: issue-8211
ISSUE: https://github.com/FirebirdSQL/firebird/issues/8211
TITLE: DATEADD truncates milliseconds for month and year deltas.
DESCRIPTION:
NOTES:
[11.08.2024] pzotov
Confirmed bug on 6.0.0.423
Checked on intermediate snapshots: 6.0.0.431-16bb157; 5.0.2.1477-c71eb20; 4.0.6.3141
"""
import pytest
from firebird.qa import *
db = db_factory()
test_script = """
set list on;
select
''||dateadd(0 millisecond to cast('01.01.2001 01:01:01.1111' as timestamp)) a_millisecond,
''||dateadd(0 second to cast('01.01.2001 01:01:01.1111' as timestamp)) a_second,
''||dateadd(0 minute to cast('01.01.2001 01:01:01.1111' as timestamp)) a_minute,
''||dateadd(0 hour to cast('01.01.2001 01:01:01.1111' as timestamp)) a_hour,
''||dateadd(0 day to cast('01.01.2001 01:01:01.1111' as timestamp)) a_day,
''||dateadd(0 week to cast('01.01.2001 01:01:01.1111' as timestamp)) a_week,
''||dateadd(0 month to cast('01.01.2001 01:01:01.1111' as timestamp)) a_month,
''||dateadd(0 year to cast('01.01.2001 01:01:01.1111' as timestamp)) a_year
from rdb$database;
"""
act = isql_act('db', test_script, substitutions=[('[ \t]+', ' ')])
expected_stdout = """
A_MILLISECOND 2001-01-01 01:01:01.1111
A_SECOND 2001-01-01 01:01:01.1111
A_MINUTE 2001-01-01 01:01:01.1111
A_HOUR 2001-01-01 01:01:01.1111
A_DAY 2001-01-01 01:01:01.1111
A_WEEK 2001-01-01 01:01:01.1111
A_MONTH 2001-01-01 01:01:01.1111
A_YEAR 2001-01-01 01:01:01.1111
"""
@pytest.mark.version('>=4.0.6')
def test_1(act: Action):
act.expected_stdout = expected_stdout
act.execute(combine_output = True)
assert act.clean_stdout == act.clean_expected_stdout

276
tests/bugs/gh_8213_test.py Normal file
View File

@ -0,0 +1,276 @@
#coding:utf-8
"""
ID: issue-8213
ISSUE: https://github.com/FirebirdSQL/firebird/issues/8213
TITLE: WHEN NOT MATCHED BY SOURCE - does not work with a direct table as source
DESCRIPTION:
NOTES:
[20.08.2024] pzotov
Checked on 6.0.0.438-d40d01b, 5.0.2.1479-47aa3b1
"""
import pytest
from firebird.qa import *
db = db_factory()
test_script = """
set list on;
recreate procedure sp_main as begin end;
recreate table test (id smallint);
recreate generator g;
recreate table test (
id smallint primary key,
typ smallint,
cat smallint
);
commit;
set term ^ ;
create or alter trigger test_bi0 for test active before insert position 0 as
begin
new.id = coalesce(new.id, gen_id(g, 1));
end
^
set term ; ^
commit;
insert into test(typ, cat) values(1, 10);
insert into test(typ, cat) values(1, 20);
insert into test(typ, cat) values(2, 10);
insert into test(typ, cat) values(2, 30);
commit;
set term ^;
recreate procedure sp_main (
a_insert_using_sp boolean,
a_delete_using_sp boolean,
a_source_typ smallint,
a_target_typ smallint
) as
declare procedure inner_sp_data_for_source_typ
returns (
id smallint,
typ smallint,
cat smallint
) as
begin
for select t.id,
t.typ,
t.cat
from test t
where t.typ = :a_source_typ
into :id,
:typ,
:cat
do
begin
suspend;
end
end
begin
if ( a_insert_using_sp or :a_delete_using_sp ) then
begin
if (a_insert_using_sp) then
merge into test t
using inner_sp_data_for_source_typ s
on t.typ = :a_target_typ and
t.cat = s.cat
when not matched by target then
insert (typ, cat) values (:a_target_typ, s.cat);
else
merge into test t
using test s
on t.typ = :a_target_typ and
t.cat = s.cat
when not matched by target then
insert (typ, cat) values (:a_target_typ, s.cat);
if (a_delete_using_sp) then
merge into test t
using inner_sp_data_for_source_typ s on t.cat = s.cat
when not matched by source and t.typ = :a_target_typ then
delete;
else
merge into test t
using test s on t.cat = s.cat
when not matched by source and t.typ = :a_target_typ then
delete;
end
else
begin
-- works as expected
merge into test t
using ( select t.id,
t.typ,
t.cat
from test t
where t.typ = :a_source_typ
) s
on t.typ = :a_target_typ and
t.cat = s.cat
when not matched by target then
insert (typ, cat) values (:a_target_typ, s.cat);
merge into test t
using ( select t.id,
t.typ,
t.cat
from test t
where t.typ = :a_source_typ
) s
on t.cat = s.cat
when not matched by source and t.typ = :a_target_typ then
delete;
end
end
^
set term ;^
commit;
-- select * from test;
set count on;
alter sequence g restart with 1000;
execute procedure sp_main(true, true, 1, 10);
select 'INS:SP, DEL:SP' msg, t.id, t.typ, t.cat from test t order by id;
rollback;
alter sequence g restart with 1000;
execute procedure sp_main(true, false, 1, 10);
select 'INS:SP, DEL:TAB' msg, t.id, t.typ, t.cat from test t order by id;
rollback;
alter sequence g restart with 1000;
execute procedure sp_main(false, true, 1, 10);
select 'INS:TAB, DEL:SP' msg, t.id, t.typ, t.cat from test t order by id;
rollback;
alter sequence g restart with 1000;
execute procedure sp_main(false, false, 1, 10);
select 'INS:TAB, DEL:TAB' msg, t.id, t.typ, t.cat from test t order by id;
rollback;
"""
act = isql_act('db', test_script)
expected_stdout = """
MSG INS:SP, DEL:SP
ID 1
TYP 1
CAT 10
MSG INS:SP, DEL:SP
ID 2
TYP 1
CAT 20
MSG INS:SP, DEL:SP
ID 3
TYP 2
CAT 10
MSG INS:SP, DEL:SP
ID 4
TYP 2
CAT 30
MSG INS:SP, DEL:SP
ID 1000
TYP 10
CAT 10
MSG INS:SP, DEL:SP
ID 1001
TYP 10
CAT 20
Records affected: 6
MSG INS:SP, DEL:TAB
ID 1
TYP 1
CAT 10
MSG INS:SP, DEL:TAB
ID 2
TYP 1
CAT 20
MSG INS:SP, DEL:TAB
ID 3
TYP 2
CAT 10
MSG INS:SP, DEL:TAB
ID 4
TYP 2
CAT 30
MSG INS:SP, DEL:TAB
ID 1000
TYP 10
CAT 10
MSG INS:SP, DEL:TAB
ID 1001
TYP 10
CAT 20
Records affected: 6
MSG INS:TAB, DEL:SP
ID 1
TYP 1
CAT 10
MSG INS:TAB, DEL:SP
ID 2
TYP 1
CAT 20
MSG INS:TAB, DEL:SP
ID 3
TYP 2
CAT 10
MSG INS:TAB, DEL:SP
ID 4
TYP 2
CAT 30
MSG INS:TAB, DEL:SP
ID 1000
TYP 10
CAT 10
MSG INS:TAB, DEL:SP
ID 1001
TYP 10
CAT 20
MSG INS:TAB, DEL:SP
ID 1002
TYP 10
CAT 10
Records affected: 7
MSG INS:TAB, DEL:TAB
ID 1
TYP 1
CAT 10
MSG INS:TAB, DEL:TAB
ID 2
TYP 1
CAT 20
MSG INS:TAB, DEL:TAB
ID 3
TYP 2
CAT 10
MSG INS:TAB, DEL:TAB
ID 4
TYP 2
CAT 30
MSG INS:TAB, DEL:TAB
ID 1000
TYP 10
CAT 10
MSG INS:TAB, DEL:TAB
ID 1001
TYP 10
CAT 20
Records affected: 6
"""
@pytest.mark.version('>=5.0.2')
def test_1(act: Action):
act.expected_stdout = expected_stdout
act.execute(combine_output = True)
assert act.clean_stdout == act.clean_expected_stdout

441
tests/bugs/gh_8214_test.py Normal file
View File

@ -0,0 +1,441 @@
#coding:utf-8
"""
ID: issue-8214
ISSUE: 8214
TITLE: Incorrect result of index list scan for a composite index, the second segment of which is a text field with COLLATE UNICODE_CI
DESCRIPTION:
Test adds check for:
* collation with attributes 'case insensitive accent insensitive';
* null values of some records (the must not appear in any query);
* non-ascii values;
* both asc and desc indices - results must be identical;
* miscelaneous predicates
NOTES:
[31.10.2024] pzotov
Confirmed bug on 5.0.2.1547.
Checked on 5.0.2.1551, 6.0.0.515.
"""
import pytest
from firebird.qa import *
db = db_factory(charset = 'utf8')
test_script = """
set bail on;
set list on;
create collation txt_coll_ci for utf8 from unicode case insensitive;
create collation txt_coll_ci_ai for utf8 from unicode case insensitive accent insensitive;
recreate table mans (
id bigint not null,
code_sex smallint not null,
name_1 varchar(50) collate txt_coll_ci,
name_2 varchar(50) collate txt_coll_ci_ai,
constraint pk_mans primary key(id)
);
commit;
insert into mans (id, code_sex, name_1, name_2) values (1, 1, 'BoB', 'BØb');
insert into mans (id, code_sex, name_1, name_2) values (2, 1, 'jOhN', 'jŐhŇ');
insert into mans (id, code_sex, name_1, name_2) values (3, 2, 'BArbArA', 'BÄŔBĄŕă');
insert into mans (id, code_sex, name_1, name_2) values (4, 2, 'aNNA', 'âŃŃÁ');
insert into mans (id, code_sex, name_1, name_2) values (5, 1, null, null);
insert into mans (id, code_sex, name_1, name_2) values (6, 2, null, null);
insert into mans (id, code_sex, name_1, name_2) values (7, 1, 'danIEL', 'ĐÁniel');
insert into mans (id, code_sex, name_1, name_2) values (8, 2, 'debora', 'ĐeborÁ');
commit;
create index mans_sex_name_1_asc on mans(code_sex, name_1);
create index mans_sex_name_2_asc on mans(code_sex, name_2);
create view v_test_1 as
select msg, id, name_1
from (
select 'chk-a' as msg, id, code_sex, name_1
from mans where code_sex between 1 and 2 and name_1 starts 'b'
UNION ALL
select 'chk-b' as msg, id, code_sex, name_1
from mans where code_sex > 0 and code_sex < 3 and name_1 starts 'b'
UNION ALL
select 'chk-c' as msg, id, code_sex, name_1
from mans where (code_sex =1 or code_sex =2) and name_1 starts 'b'
UNION ALL
select 'chk-d' as msg, id, code_sex, name_1
from mans where code_sex in(1,2) and name_1 starts 'b'
UNION ALL
select 'chk-e' as msg, id, code_sex, name_1
from mans where code_sex in(1,2) and name_1 like 'b%'
UNION ALL
select 'chk-f' as msg, id, code_sex, name_1
from mans where code_sex in(1,2) and name_1 similar to 'b%'
UNION ALL
select 'chk-g' as msg, id, code_sex, name_1
from mans where code_sex in(1,2) and name_1 in ('boB', 'barbarA')
UNION ALL
select 'chk-h' as msg, id, code_sex, name_1
from mans where code_sex in(1,2) and (name_1 is not distinct from 'boB' or name_1 is not distinct from 'barbarA')
UNION ALL
select 'chk-i' as msg, id, code_sex, name_1
from mans where code_sex in(1,2) and (name_1 >= 'D' and name_1 <= 'E')
)
order by msg, id
;
create view v_test_2 as
select msg, id, name_2
from (
select 'chk-a' as msg, id, code_sex, name_2
from mans where code_sex between 1 and 2 and name_2 starts 'b'
UNION ALL
select 'chk-b' as msg, id, code_sex, name_2
from mans where code_sex > 0 and code_sex < 3 and name_2 starts 'b'
UNION ALL
select 'chk-c' as msg, id, code_sex, name_2
from mans where (code_sex =1 or code_sex =2) and name_2 starts 'b'
UNION ALL
select 'chk-d' as msg, id, code_sex, name_2
from mans where code_sex in(1,2) and name_2 starts 'b'
UNION ALL
select 'chk-e' as msg, id, code_sex, name_2
from mans where code_sex in(1,2) and name_2 like 'b%'
UNION ALL
select 'chk-f' as msg, id, code_sex, name_2
from mans where code_sex in(1,2) and name_2 similar to 'b%'
UNION ALL
select 'chk-g' as msg, id, code_sex, name_2
from mans where code_sex in(1,2) and name_2 in ('boB', 'barbarA')
UNION ALL
select 'chk-h' as msg, id, code_sex, name_2
from mans where code_sex in(1,2) and (name_2 is not distinct from 'boB' or name_2 is not distinct from 'barbarA')
UNION ALL
select 'chk-i' as msg, id, code_sex, name_2
from mans where code_sex in(1,2) and (name_2 >= 'D' and name_2 <= 'E')
)
order by msg, id
;
select * from v_test_1;
select * from v_test_2;
commit;
-----------------------------------------------------------
alter index mans_sex_name_1_asc inactive;
alter index mans_sex_name_2_asc inactive;
create descending index mans_sex_name_1_dec on mans(code_sex, name_1);
create descending index mans_sex_name_2_dec on mans(code_sex, name_2);
commit;
select * from v_test_1;
select * from v_test_2;
"""
act = isql_act('db', test_script, substitutions = [ ('[ \t]+',' ') ])
expected_stdout = """
MSG chk-a
ID 1
NAME_1 BoB
MSG chk-a
ID 3
NAME_1 BArbArA
MSG chk-b
ID 1
NAME_1 BoB
MSG chk-b
ID 3
NAME_1 BArbArA
MSG chk-c
ID 1
NAME_1 BoB
MSG chk-c
ID 3
NAME_1 BArbArA
MSG chk-d
ID 1
NAME_1 BoB
MSG chk-d
ID 3
NAME_1 BArbArA
MSG chk-e
ID 1
NAME_1 BoB
MSG chk-e
ID 3
NAME_1 BArbArA
MSG chk-f
ID 1
NAME_1 BoB
MSG chk-f
ID 3
NAME_1 BArbArA
MSG chk-g
ID 1
NAME_1 BoB
MSG chk-g
ID 3
NAME_1 BArbArA
MSG chk-h
ID 1
NAME_1 BoB
MSG chk-h
ID 3
NAME_1 BArbArA
MSG chk-i
ID 7
NAME_1 danIEL
MSG chk-i
ID 8
NAME_1 debora
MSG chk-a
ID 1
NAME_2 BØb
MSG chk-a
ID 3
NAME_2 BÄŔBĄŕă
MSG chk-b
ID 1
NAME_2 BØb
MSG chk-b
ID 3
NAME_2 BÄŔBĄŕă
MSG chk-c
ID 1
NAME_2 BØb
MSG chk-c
ID 3
NAME_2 BÄŔBĄŕă
MSG chk-d
ID 1
NAME_2 BØb
MSG chk-d
ID 3
NAME_2 BÄŔBĄŕă
MSG chk-e
ID 1
NAME_2 BØb
MSG chk-e
ID 3
NAME_2 BÄŔBĄŕă
MSG chk-f
ID 1
NAME_2 BØb
MSG chk-f
ID 3
NAME_2 BÄŔBĄŕă
MSG chk-g
ID 1
NAME_2 BØb
MSG chk-g
ID 3
NAME_2 BÄŔBĄŕă
MSG chk-h
ID 1
NAME_2 BØb
MSG chk-h
ID 3
NAME_2 BÄŔBĄŕă
MSG chk-i
ID 7
NAME_2 ĐÁniel
MSG chk-i
ID 8
NAME_2 ĐeborÁ
MSG chk-a
ID 1
NAME_1 BoB
MSG chk-a
ID 3
NAME_1 BArbArA
MSG chk-b
ID 1
NAME_1 BoB
MSG chk-b
ID 3
NAME_1 BArbArA
MSG chk-c
ID 1
NAME_1 BoB
MSG chk-c
ID 3
NAME_1 BArbArA
MSG chk-d
ID 1
NAME_1 BoB
MSG chk-d
ID 3
NAME_1 BArbArA
MSG chk-e
ID 1
NAME_1 BoB
MSG chk-e
ID 3
NAME_1 BArbArA
MSG chk-f
ID 1
NAME_1 BoB
MSG chk-f
ID 3
NAME_1 BArbArA
MSG chk-g
ID 1
NAME_1 BoB
MSG chk-g
ID 3
NAME_1 BArbArA
MSG chk-h
ID 1
NAME_1 BoB
MSG chk-h
ID 3
NAME_1 BArbArA
MSG chk-i
ID 7
NAME_1 danIEL
MSG chk-i
ID 8
NAME_1 debora
MSG chk-a
ID 1
NAME_2 BØb
MSG chk-a
ID 3
NAME_2 BÄŔBĄŕă
MSG chk-b
ID 1
NAME_2 BØb
MSG chk-b
ID 3
NAME_2 BÄŔBĄŕă
MSG chk-c
ID 1
NAME_2 BØb
MSG chk-c
ID 3
NAME_2 BÄŔBĄŕă
MSG chk-d
ID 1
NAME_2 BØb
MSG chk-d
ID 3
NAME_2 BÄŔBĄŕă
MSG chk-e
ID 1
NAME_2 BØb
MSG chk-e
ID 3
NAME_2 BÄŔBĄŕă
MSG chk-f
ID 1
NAME_2 BØb
MSG chk-f
ID 3
NAME_2 BÄŔBĄŕă
MSG chk-g
ID 1
NAME_2 BØb
MSG chk-g
ID 3
NAME_2 BÄŔBĄŕă
MSG chk-h
ID 1
NAME_2 BØb
MSG chk-h
ID 3
NAME_2 BÄŔBĄŕă
MSG chk-i
ID 7
NAME_2 ĐÁniel
MSG chk-i
ID 8
NAME_2 ĐeborÁ
"""
@pytest.mark.version('>=5.0.2')
def test_1(act: Action):
act.expected_stdout = expected_stdout
act.execute(combine_output = True)
assert act.clean_stdout == act.clean_expected_stdout

145
tests/bugs/gh_8219_test.py Normal file
View File

@ -0,0 +1,145 @@
#coding:utf-8
"""
ID: issue-8219
ISSUE: https://github.com/FirebirdSQL/firebird/issues/8219
TITLE: Database creation in 3.0.12, 4.0.5 and 5.0.1 slower than in previous releases
DESCRIPTION:
We can estimate perfomance by comparison of time that is spent to create DB vs result of some crypt function.
Function crypt_hash(<string> using SHA512) has been selected for that because of notable CPU consumation.
Stored procedured SP_GEN_HASH is created for evaluation of crypt hash, it will run loop for N_HASH_EVALUATE_COUNT times.
Duration for each measure is difference between psutil.Process(fb_pid).cpu_times() counters.
We do <N_MEASURES> times call of SP and create_database(), with adding results to map.
Finally, we get ratio between medians of these measures (see 'median_ratio')
Test is considered as passed if median_ratio less than threshold <MAX_RATIO>.
NOTES:
[05.09.2024] pzotov.
1. Confirmed problem on snapshotrs before 20-aug-2024.
Medians ratio on Windows:
1. Before fix:
6.0.0.423: 0.39; 6.0.0.436: 0.39; 6.0.0.437: 0.35;
5.0.1.1464: 0.42; 5.0.1.1469: 0.39; 5.0.1.1479: 0.35
4.0.5.3136: 0.42; 4.0.6.3142: 0.39
2. After fix ratio reduced to ~0.25:
6.0.0.438: 0.21; 6.0.0.442: 0.21; 6.0.0.438: 0.21; 6.0.0.442: 0.21; 6.0.0.450: 0.24
5.0.2.1481: 0.25; 5.0.2.1482: 0.21; 5.0.2.1493: 0.22
4.0.6.3144: 0.25; 4.0.6.3149: 0.29
Medians ratio on Windows:
1. Before fix:
6.0.0.397-c734c96: 0.48; 6.0.0.438-088b529: 0.49
2. After fix ratio reduced to ~0.25:
6.0.0.441-75042b5: 0.23
5.0.2.1481-fc71044: 0.24
4.0.6.3144-5a3b718: 0.27
2. Test DB must NOT have charset = utf8, otherwise 'implementation limit exceeded' will raise; win1251 was selected for work.
3. Test can be used only for ServerMode = Super or SuperClassic
(because in CS a new process is made and we have no value of cpu_times() *before* DB creation).
"""
import os
import psutil
import pytest
from firebird.qa import *
from firebird.driver import driver_config, create_database, NetProtocol
from pathlib import Path
#--------------------------------------------------------------------
def median(lst):
n = len(lst)
s = sorted(lst)
return (sum(s[n//2-1:n//2+1])/2.0, s[n//2])[n % 2] if n else None
#--------------------------------------------------------------------
###########################
### S E T T I N G S ###
###########################
# How many times we create databases
N_MEASURES = 31
# How many iterations must be done for hash evaluation:
N_HASH_EVALUATE_COUNT = 3000
# Maximal value for ratio between maximal and minimal medians
#
#############################################
MAX_RATIO = 0.30 if os.name == 'nt' else 0.33
#############################################
init_script = \
f'''
set term ^;
create or alter procedure sp_gen_hash (n_cnt int) as
declare v_hash varbinary(64);
declare s varchar(32765);
begin
s = lpad('', 32765, uuid_to_char(gen_uuid()));
while (n_cnt > 0) do
begin
v_hash = crypt_hash(s using SHA512);
n_cnt = n_cnt - 1;
end
end
^
commit
^
'''
db = db_factory(init = init_script, charset = 'win1251')
act = python_act('db')
tmp_fdb = temp_file('tmp_gh_8219.tmp')
expected_stdout = """
Medians ratio: acceptable
"""
@pytest.mark.version('>=4.0.5')
def test_1(act: Action, tmp_fdb: Path, capsys):
if act.vars['server-arch'].lower() == 'classic':
pytest.skip('Can be used only for SS / SC.')
srv_cfg = driver_config.register_server(name = 'test_srv_gh_8219', config = '')
db_cfg_name = 'tmp_8219'
db_cfg_object = driver_config.register_database(name = db_cfg_name)
db_cfg_object.server.value = srv_cfg.name
db_cfg_object.protocol.value = NetProtocol.INET
db_cfg_object.database.value = str(tmp_fdb)
with act.db.connect() as con:
cur=con.cursor()
cur.execute('select mon$server_pid as p from mon$attachments where mon$attachment_id = current_connection')
fb_pid = int(cur.fetchone()[0])
times_map = {}
for i in range(0, N_MEASURES):
fb_info_init = psutil.Process(fb_pid).cpu_times()
cur.callproc( 'sp_gen_hash', (N_HASH_EVALUATE_COUNT,) )
fb_info_curr = psutil.Process(fb_pid).cpu_times()
times_map[ 'hash_eval', i ] = max(fb_info_curr.user - fb_info_init.user, 0.000001)
fb_info_init = psutil.Process(fb_pid).cpu_times()
with create_database(db_cfg_name, user = act.db.user, password = act.db.password, overwrite = True) as dbc:
pass
fb_info_curr = psutil.Process(fb_pid).cpu_times()
times_map[ 'db_create', i ] = max(fb_info_curr.user - fb_info_init.user, 0.000001)
sp_gen_hash_median = median([v for k,v in times_map.items() if k[0] == 'hash_eval'])
sp_db_create_median = median([v for k,v in times_map.items() if k[0] == 'db_create'])
median_ratio = sp_db_create_median / sp_gen_hash_median
print( 'Medians ratio: ' + ('acceptable' if median_ratio <= MAX_RATIO else '/* perf_issue_tag */ POOR: %s, more than threshold: %s' % ( '{:9g}'.format(median_ratio), '{:9g}'.format(MAX_RATIO) ) ) )
if median_ratio > MAX_RATIO:
print(f'CPU times for each of {N_MEASURES} measures:')
for sp_name in ('hash_eval', 'db_create', ):
print(f'{sp_name=}:')
for p in [v for k,v in times_map.items() if k[0] == sp_name]:
print(p)
act.expected_stdout = expected_stdout
act.stdout = capsys.readouterr().out
assert act.clean_stdout == act.clean_expected_stdout

View File

@ -0,0 +1,38 @@
#coding:utf-8
"""
ID: issue-8221
ISSUE: https://github.com/FirebirdSQL/firebird/issues/8221
TITLE: Crash when MAKE_DBKEY() is called with 0 or 1 arguments
DESCRIPTION:
NOTES:
[20.08.2024] pzotov
Confirmed crash on 6.0.0.438-d40d01b (dob: 20.08.2024 04:44).
Checked on 6.0.0.438-d9f9b28, 5.0.2.1479-adfe97a, 4.0.6.3142-984ccb9
"""
import pytest
from firebird.qa import *
db = db_factory()
test_script = """
set bail OFF;
select 1 from rdb$database where rdb$db_key = make_dbkey();
select 1 from rdb$database where rdb$db_key = make_dbkey('RDB$DATABASE');
"""
act = isql_act('db', test_script, substitutions=[ ('[ \\t]+', ' ') ])
@pytest.mark.version('>=4.0.6')
def test_1(act: Action):
expected_stdout = f"""
Statement failed, SQLSTATE = 39000
function MAKE_DBKEY could not be matched
Statement failed, SQLSTATE = 39000
function MAKE_DBKEY could not be matched
"""
act.expected_stdout = expected_stdout
act.execute(combine_output = True)
assert act.clean_stdout == act.clean_expected_stdout

Some files were not shown because too many files have changed in this diff Show More