6
0
mirror of https://github.com/FirebirdSQL/firebird-qa.git synced 2025-01-22 13:33:07 +01:00

Fixes for FB 4

This commit is contained in:
Pavel Císař 2021-11-09 11:01:26 +01:00
parent e754587c22
commit b458cecb1e
32 changed files with 2826 additions and 2617 deletions

View File

@ -74,6 +74,9 @@ def pytest_addoption(parser, pluginmanager):
def pytest_report_header(config):
return ["Firebird:",
f" root: {_vars_['root']}",
f" databases: {_vars_['databases']}",
f" backups: {_vars_['backups']}",
f" driver configuration: {_vars_['firebird-config']}",
f" server: {_vars_['server']}",
f" protocol: {_vars_['protocol']}",
@ -120,7 +123,6 @@ def pytest_configure(config):
path = config.rootpath / 'backups'
_vars_['backups'] = path if path.is_dir() else config.rootpath
_vars_['server'] = config.getoption('server')
_vars_['bin-dir'] = config.getoption('bin_dir')
_vars_['protocol'] = config.getoption('protocol')
_vars_['save-output'] = config.getoption('save_output')
srv_conf = driver_config.get_server(_vars_['server'])
@ -131,17 +133,25 @@ def pytest_configure(config):
password=_vars_['password']) as srv:
_vars_['version'] = parse(srv.info.version.replace('-dev', ''))
_vars_['home-dir'] = Path(srv.info.home_directory)
if bindir := config.getoption('bin_dir'):
_vars_['bin-dir'] = Path(bindir)
else:
bindir = _vars_['home-dir'] / 'bin'
if not bindir.exists():
bindir = _vars_['home-dir']
_vars_['bin-dir'] = bindir
_vars_['lock-dir'] = Path(srv.info.lock_directory)
_vars_['bin-dir'] = Path(bindir) if bindir else _vars_['home-dir']
_vars_['security-db'] = Path(srv.info.security_database)
_vars_['arch'] = srv.info.architecture
if _vars_['bin-dir'] is None:
path = _vars_['home-dir'] / 'bin'
if path.is_dir():
_vars_['bin-dir'] = path
else:
pytest.exit("Path to binary tools not determined")
else:
_vars_['bin-dir'] = Path(_vars_['bin-dir'])
#if _vars_['bin-dir'] is None:
#path = _vars_['home-dir'] / 'bin'
#if path.is_dir():
#_vars_['bin-dir'] = path
#else:
#pytest.exit("Path to binary tools not determined")
#else:
#_vars_['bin-dir'] = Path(_vars_['bin-dir'])
# tools
for tool in ['isql', 'gbak', 'nbackup', 'gstat', 'gfix', 'gsec']:
set_tool(tool)
@ -154,17 +164,17 @@ def pytest_collection_modifyitems(session, config, items):
for item in items:
if 'slow' in item.keywords and not _vars_['runslow']:
item.add_marker(skip_slow)
for platforms in [mark.args for mark in item.iter_markers(name="platform")]:
if _platform not in platforms:
item.add_marker(skip_platform)
# Deselect tests not applicable to tested engine version
# Deselect tests not applicable to tested engine version and platform
selected = []
deselected = []
for item in items:
platform_ok = True
for platforms in [mark.args for mark in item.iter_markers(name="platform")]:
platform_ok = _platform in platforms
versions = [mark.args for mark in item.iter_markers(name="version")]
if versions:
spec = SpecifierSet(','.join(list(versions[0])))
if _vars_['version'] in spec:
if platform_ok and _vars_['version'] in spec:
selected.append(item)
else:
deselected.append(item)
@ -189,12 +199,15 @@ class Database:
user: str=None, password: str=None):
self.db_path: Path = path / filename
self.dsn: str = None
self.io_enc = 'utf8'
if _vars_['host']:
self.dsn = f"{_vars_['host']}:{str(self.db_path)}"
else:
self.dsn = str(self.db_path)
self.subs = {'temp_directory': str(path / 'x')[:-1], 'database_location': str(path / 'x')[:-1],
'DATABASE_PATH': str(path / 'x')[:-1], 'DSN': self.dsn,
self.subs = {'temp_directory': str(path / 'x')[:-1],
'database_location': str(path / 'x')[:-1],
'DATABASE_PATH': str(path / 'x')[:-1],
'DSN': self.dsn,
'files_location': str(_vars_['root'] / 'files'),
'backup_location': str(_vars_['root'] / 'backups'),
'suite_database_location': str(_vars_['root'] / 'databases'),
@ -271,10 +284,11 @@ class Database:
charset = charset.upper()
else:
charset = 'NONE'
self.io_enc = CHARSET_MAP[charset]
result = run([_vars_['isql'], '-ch', charset, '-user', self.user,
'-password', self.password, str(self.dsn)],
input=substitute_macros(script, self.subs),
encoding=CHARSET_MAP[charset], capture_output=True)
encoding=self.io_enc, capture_output=True)
if result.returncode and raise_on_fail:
print(f"-- ISQL script stdout {'-' * 20}")
print(result.stdout)
@ -401,9 +415,9 @@ class Action:
# Store output
if _vars_['save-output']:
if self.stdout:
out_file.write_text(self.stdout)
out_file.write_text(self.stdout, encoding=self.db.io_enc)
if self.stderr:
err_file.write_text(self.stderr)
err_file.write_text(self.stderr, encoding=self.db.io_enc)
@property
def clean_stdout(self) -> str:
if self._clean_stdout is None:

File diff suppressed because it is too large Load Diff

View File

@ -24,10 +24,8 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
-- [pcisar] 20.10.2021
-- For 3.0.7 on Linux (uses system ICU) or Windows (includes ICU 52) this
-- collation *SHOULD* be created w/o errors like in 4.0 version.
-- However this has to be verified (passes on Linux opensuse tumbleweed)
-- In case it would be confirmed, both test cases could be merged into one.
-- For 3.0.7 on Linux this PASS (uses system ICU) but on Windows (includes ICU 52)
-- it FAIL unless newer ICU (63) is installed.
set list on;
set count on;

View File

@ -2,11 +2,11 @@
#
# id: bugs.core_2078
# title: Suboptimal join plan if there are selective non-indexed predicates involved
# decription:
# decription:
# This test operates with three tables: "small", "medium" and "big" - which are INNER-join'ed.
# It was found that there is some threshold ratio between number of rows in "small" vs "medium"
# tables which affects on generated PLAN after reaching this ratio.
# In particular, when tables have following values of rows: 26, 300 and 3000 - than optimizer
# tables which affects on generated PLAN after reaching this ratio.
# In particular, when tables have following values of rows: 26, 300 and 3000 - than optimizer
# still DOES take in account "WHERE" condition with non-indexed field in SMALL table ("where s.sf = 0"),
# and this lead to GOOD (fast) performance because SMALL table will be FIRST in the join order.
# However, if number of rows in SMALL table will change from 26 to 27 (yes, just one row) than
@ -32,7 +32,7 @@
# FETCHES_1_2 9094
# FETCHES_2_1 19548
# FETCHES_2_2 19548
#
#
# 18.08.2020.
# Test uses pre-created database which has several procedures for analyzing performance by with the help of MON$ tables.
# Performance results are gathered in the table STAT_LOG, each odd run will save mon$ counters with "-" sign and next
@ -41,18 +41,18 @@
# difference between them means performance expenses which we want to evaluate).
# NOTE. Before each new measure we have to set generator G_GATHER_STAT to zero in order to make it produce proper values
# starting with 1 (odd --> NEGATIVE sign for counters). This is done in SP_TRUNCATE_STAT.
#
#
# :::::::::::::::::::::::::::::::::::::::: NB ::::::::::::::::::::::::::::::::::::
# 18.08.2020. FB 4.x has incompatible behaviour with all previous versions since build 4.0.0.2131 (06-aug-2020):
# statement 'alter sequence <seq_name> restart with 0' changes rdb$generators.rdb$initial_value to -1 thus next call
# gen_id(<seq_name>,1) will return 0 (ZERO!) rather than 1.
# gen_id(<seq_name>,1) will return 0 (ZERO!) rather than 1.
# See also CORE-6084 and its fix: https://github.com/FirebirdSQL/firebird/commit/23dc0c6297825b2e9006f4d5a2c488702091033d
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# This is considered as *expected* and is noted in doc/README.incompatibilities.3to4.txt
#
#
# Because of this, it was decided to change code of SP_TRUNCATE_STAT: instead of 'alter sequence restart...' we do
# reset like this: c = gen_id(g_gather_stat, -gen_id(g_gather_stat, 0));
#
#
# Checked on:
# 4.0.0.2164 SS: 2.183s.
# 4.0.0.2119 SS: 2.280s.
@ -60,7 +60,7 @@
# 3.0.7.33356 SS: 1.500s.
# 3.0.7.33356 CS: 2.445s.
# 2.5.9.27150 SC: 0.653s.
#
#
# tracker_id: CORE-2078
# min_versions: ['2.5.0']
# versions: 3.0
@ -86,43 +86,43 @@ test_script_1 = """
commit;
recreate table tmed(id int not null);
commit;
set term ^;
create or alter procedure sp_fill_data(a_sml_rows int, a_med_rows int, a_big_rows int)
as
declare i int;
declare i_mod2 smallint;
begin
-- gather old record versions if they are from previous run:
select count(*) from tsml into i;
select count(*) from tsml into i;
select count(*) from tmed into i;
select count(*) from tbig into i;
i=0;
while (i < a_sml_rows) do
begin
insert into tsml(id, sf) values( :i, :i - (:i/2)*2 );
i = i+1;
end
i=0;
while (i < a_med_rows) do
begin
insert into tmed(id) values( :i );
i = i+1;
end
i=0;
while (i < a_big_rows) do
begin
insert into tbig(id, sid, mid) values( :i, :i - (:i/2)*2, :i - (:i/:a_med_rows)*:a_med_rows );
i = i+1;
end
end
^
create or alter procedure srv_recalc_idx_stat
returns (
tab_name varchar(31),
@ -132,7 +132,7 @@ test_script_1 = """
as
begin
-- Refresh index statistics all user (non-system) tables.
-- Needs to be run in regular basis (`cron` on linux, `at` on windows)
-- Needs to be run in regular basis (`cron` on linux, `at` on windows)
-- otherwise ineffective plans can be generated when doing inner joins!
-- Example to run: select * from srv_recalc_idx_stat;
for
@ -149,52 +149,52 @@ test_script_1 = """
execute statement( 'set statistics index '||idx_name )
with autonomous transaction
;
select ri.rdb$statistics
from rdb$indices ri
where ri.rdb$relation_name = :tab_name and ri.rdb$index_name = :idx_name
into idx_stat_afte;
suspend;
end
end
^
set term ;^
commit;
alter table tsml add constraint tsml_pk primary key(id) using index tsml_pk;
alter table tmed add constraint tmed_pk primary key(id) using index tmed_pk;
alter table tbig add constraint tbig_fk_sml foreign key(sid) references tsml using index tbig_idx1_fk_sml;
alter table tbig add constraint tbig_fk_med foreign key(mid) references tmed using index tbig_idx2_fk_med;
commit;
set width tab_name 31;
set width idx_name 31;
set list on;
-------------------- prepare-1 ------------------
--execute procedure sp_fill_data(26, 300, 3000);
execute procedure sp_fill_data(15, 300, 3000);
commit;
set transaction read committed;
select
tab_name as run1_tab_name
select
tab_name as run1_tab_name
,idx_name as run1_idx_name
,idx_stat_afte run1_idx_stat
from srv_recalc_idx_stat where tab_name in ( upper('tsml'), upper('tmed'), upper('tbig') );
commit;
--------------------- run-1.1 -------------------
-----alter sequence g_gather_stat restart with 0;
execute procedure sp_truncate_stat;
commit;
execute procedure sp_gather_stat; ------- catch statistics BEFORE measured statement(s)
commit;
set plan on;
select count(*) cnt_1_1
from tsml s
@ -202,14 +202,14 @@ test_script_1 = """
join tmed m on b.mid = m.id
;
set plan off;
execute procedure sp_gather_stat; ------- catch statistics AFTER measured statement(s)
commit;
--------------------- run-1.2 -------------------
execute procedure sp_gather_stat; ------- catch statistics BEFORE measured statement(s)
commit;
set plan on;
select count(*) cnt_1_2
from tsml s
@ -218,32 +218,32 @@ test_script_1 = """
where s.sf = 0 -- selective non-indexed boolean
;
set plan off;
execute procedure sp_gather_stat; ------- catch statistics AFTER measured statement(s)
commit;
-------------------- prepare-2 ------------------
delete from tbig;
delete from tmed;
delete from tsml;
commit;
--execute procedure sp_fill_data(27, 300, 3000);
execute procedure sp_fill_data(45, 300, 3000);
commit;
set transaction read committed;
select
tab_name as run2_tab_name
select
tab_name as run2_tab_name
,idx_name as run2_idx_name
,idx_stat_afte run2_idx_stat
from srv_recalc_idx_stat where tab_name in ( upper('tsml'), upper('tmed'), upper('tbig') );
commit;
--------------------- run-2.1 -------------------
execute procedure sp_gather_stat; ------- catch statistics BEFORE measured statement(s)
commit;
set plan on;
select count(*) cnt_2_1
from tsml s
@ -251,11 +251,11 @@ test_script_1 = """
join tmed m on b.mid = m.id
;
set plan off;
execute procedure sp_gather_stat; ------- catch statistics AFTER measured statement(s)
commit;
--------------------- run-2.2 -------------------
execute procedure sp_gather_stat; ------- catch statistics BEFORE measured statement(s)
commit;
set plan on;
@ -268,8 +268,8 @@ test_script_1 = """
set plan off;
execute procedure sp_gather_stat; ------- catch statistics AFTER measured statement(s)
commit;
-- Here we define constants that serve as *upper* limit for fetches:
set term ^;
execute block as
@ -279,7 +279,7 @@ test_script_1 = """
end
^
set term ;^
-- Typical values for page_size 4K on 2.5 and 3.0:
-- FETCHES_1_1 19636
-- FETCHES_1_2 9094
@ -309,20 +309,20 @@ test_script_1 = """
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
RUN1_TAB_NAME TBIG
RUN1_IDX_NAME TBIG_IDX1_FK_SML
RUN1_TAB_NAME TBIG
RUN1_IDX_NAME TBIG_IDX1_FK_SML
RUN1_IDX_STAT 0.5000000000
RUN1_TAB_NAME TBIG
RUN1_IDX_NAME TBIG_IDX2_FK_MED
RUN1_TAB_NAME TBIG
RUN1_IDX_NAME TBIG_IDX2_FK_MED
RUN1_IDX_STAT 0.0033333334
RUN1_TAB_NAME TMED
RUN1_IDX_NAME TMED_PK
RUN1_TAB_NAME TMED
RUN1_IDX_NAME TMED_PK
RUN1_IDX_STAT 0.0033333334
RUN1_TAB_NAME TSML
RUN1_IDX_NAME TSML_PK
RUN1_TAB_NAME TSML
RUN1_IDX_NAME TSML_PK
RUN1_IDX_STAT 0.0666666701
PLAN JOIN (M NATURAL, B INDEX (TBIG_IDX2_FK_MED), S INDEX (TSML_PK))
@ -331,20 +331,20 @@ expected_stdout_1 = """
PLAN JOIN (S NATURAL, B INDEX (TBIG_IDX1_FK_SML), M INDEX (TMED_PK))
CNT_1_2 1500
RUN2_TAB_NAME TBIG
RUN2_IDX_NAME TBIG_IDX1_FK_SML
RUN2_TAB_NAME TBIG
RUN2_IDX_NAME TBIG_IDX1_FK_SML
RUN2_IDX_STAT 0.5000000000
RUN2_TAB_NAME TBIG
RUN2_IDX_NAME TBIG_IDX2_FK_MED
RUN2_TAB_NAME TBIG
RUN2_IDX_NAME TBIG_IDX2_FK_MED
RUN2_IDX_STAT 0.0033333334
RUN2_TAB_NAME TMED
RUN2_IDX_NAME TMED_PK
RUN2_TAB_NAME TMED
RUN2_IDX_NAME TMED_PK
RUN2_IDX_STAT 0.0033333334
RUN2_TAB_NAME TSML
RUN2_IDX_NAME TSML_PK
RUN2_TAB_NAME TSML
RUN2_IDX_NAME TSML_PK
RUN2_IDX_STAT 0.0222222228
PLAN JOIN (M NATURAL, B INDEX (TBIG_IDX2_FK_MED), S INDEX (TSML_PK))

View File

@ -2,20 +2,20 @@
#
# id: bugs.core_2650
# title: Improve sorting performance when long VARCHARs are involved
# decription:
# decription:
# Test verifies trivial queries with persistent and computed columns, predicates, views,
# expressions without reference to any column and datatypes which have no symmetrical
# transformation from value to a key (decfloat, time-with-timezone and varchar with non-default collation).
#
#
# It is supposed that default value of InlineSortThreshold parameter is 1000.
# No changes in the firebird.conf reuired.
#
#
# This test most probably will be added by some new examples later.
#
#
# Thanks to dimitr for lot of explanations (e-mail discussion was 28.12.2020).
#
#
# Checked on 4.0.0.2303 SS/CS.
#
#
# tracker_id: CORE-2650
# min_versions: ['4.0']
# versions: 4.0
@ -48,8 +48,8 @@ test_script_1 = """
,computed_ts_dup computed by ( txt_short || txt_short )
,computed_tb_dup computed by ( txt_broad || txt_broad )
,computed_guid computed by ( lpad('', 2000, uuid_to_char(gen_uuid()) ) )
,computed_ts_left computed by( left(txt_short,10) )
,computed_tb_left computed by( left(txt_broad,10) )
,computed_ts_left computed by( left(txt_short,10) )
,computed_tb_left computed by( left(txt_broad,10) )
);
commit;
@ -64,7 +64,7 @@ test_script_1 = """
-- Must USE refetch because length of non-key column is greater than default threshold:
select txt_broad from test a02 order by id;
-- MUST use refethc regardless on length of column because 'ROWS <N>' presents (!):
select txt_short from test a03 order by id rows 1;
@ -99,7 +99,7 @@ test_script_1 = """
select id from test a11 where '' <> any (select id from test x11 where txt_broad>'' order by id) ;
-- ######################################## e x i s t s ###########################################
-- Predicate "EXISTS" must turn on refetching regardless of record length, but only when "WHERE" has column which not present in "ORDER BY"
select id,txt_short from test a12 where exists(select 1 from test x12 where txt_short>'' order by id) ; -- MUST use refetch
@ -148,13 +148,13 @@ test_script_1 = """
recreate view v_unioned as
select id, txt_broad from test
union all
select -1, 'qwerty'
select -1, 'qwerty'
from rdb$database rows 0;
-- does NOT use refetch because view is based on UNION:
select txt_broad from v_unioned v01 order by id;
commit;
-- ################################# e x p r e s s i o n s #####################################
-- must use refetch because expression is based on column which has length >= threshold
@ -166,7 +166,7 @@ test_script_1 = """
select left( txt_short || txt_short, 2000) as txt from test a22 order by id;
commit;
-- ########### n o n - s y m m e t r i c a l k e y - v a l u e d a t a t y p e s #########
-- Following data types in common case have no ability to get column value from a key:
@ -462,6 +462,7 @@ expected_stdout_1 = """
@pytest.mark.version('>=4.0')
def test_1(act_1: Action):
act_1.charset = 'NONE'
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout

View File

@ -2,7 +2,7 @@
#
# id: bugs.core_2966
# title: Wrong results or unexpected errors while sorting a large data set
# decription:
# decription:
# tracker_id: CORE-2966
# min_versions: ['2.1.6', '2.5.0']
# versions: 2.5.0
@ -14,7 +14,7 @@ from firebird.qa import db_factory, isql_act, Action
# version: 2.5.0
# resources: None
substitutions_1 = []
substitutions_1 = [('=.*', '=')]
init_script_1 = """create table t (col varchar(32000));
commit;

View File

@ -17,7 +17,7 @@ from firebird.qa import db_factory, isql_act, Action
# version: 2.5
# resources: None
substitutions_1 = []
substitutions_1 = [('01-JAN-', ' 1-JAN-')]
init_script_1 = """"""
@ -41,13 +41,6 @@ test_script_1 = """
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
# [pcisar] 20.10.2011
# This test FAIL on my system because the isql output is:
# ID 1
# OPDATE 31-DEC-2000
# ID 2
# OPDATE 1-JAN-2001
expected_stdout_1 = """
ID 1
OPDATE 31-DEC-2000

View File

@ -28,9 +28,10 @@ test_script_1 = """
-- -Data type unknown
-- -COLLATION WIN_PTBR for CHARACTER SET UTF8 is not defined
-- (See ticket issue: "WIN_PTBR is tried to be resolved agains database charset instead of client charset: incorrect")
-- [pcisar] 20.10.2021
-- It fails as well in 3.0.7 on Linux (opensuse tumbleweed)
-- In 3.0.0.31827 (WI- and LI-) works fine:
-- [pcisar] 20.10.2021
-- It fails as well in 3.0.7 and 4.0 on Linux (opensuse tumbleweed) and Windows (8.1)
-- It appears that this test is bogus from the beginning
set term ^;
execute block returns (c varchar(10) collate win_ptbr) as
begin

View File

@ -2,9 +2,9 @@
#
# id: bugs.core_3362_complex
# title: Cursors should ignore changes made by the same statement
# decription:
# decription:
# This test verifies PSQL issues that were accumulated in miscenaleous tickets.
#
#
# tracker_id: CORE-3362
# min_versions: ['3.0.1']
# versions: 3.0.1
@ -37,13 +37,13 @@ test_script_1 = """
create or alter procedure sp_set_ctx(a_point varchar(20), a_data1 int, a_data2 int, a_data3 int, a_data4 int) as
begin
-- Store values of cursor fields in the context variable which name is passed here as 'a_point'.
rdb$set_context(
'USER_SESSION',
rdb$set_context(
'USER_SESSION',
a_point,
coalesce(cast( a_data1 as char(6)),'#null#')
coalesce(cast( a_data1 as char(6)),'#null#')
|| ' ' || coalesce(cast( a_data2 as char(6)),'#null#')
|| ' ' || coalesce(cast( a_data3 as char(6)),'#null#')
|| ' ' || coalesce(cast( a_data4 as char(6)),'#null#')
|| ' ' || coalesce(cast( a_data4 as char(6)),'#null#')
);
end
^
@ -56,10 +56,10 @@ test_script_1 = """
-- Do _NOT_ try to check following statements using explicit cursor
-- (i.e. OPEN <C>; FETCH ...; CLOSE <C>)
for
select t.id, t.data1, t.data2, t.data3, t.data4 from test t where t.id = 1
select t.id, t.data1, t.data2, t.data3, t.data4 from test t where t.id = 1
as cursor c
do begin
execute procedure sp_set_ctx('point_0', c.data1, c.data2, c.data3, c.data4 );
update test t set t.data1 = 100001 where current of c;
@ -123,13 +123,14 @@ expected_stdout_1 = """
CTX_NAME point_3
CTX_VALUE #null# #null# 100003 #null#
"""
expected_stderr_1 = """
Statement failed, SQLSTATE = 22000
no current record for fetch operation
-At procedure 'SP_TEST1A'
"""
@pytest.mark.version('>=3.0.1')
@pytest.mark.version('>=3.0.1,<4')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.expected_stderr = expected_stderr_1
@ -137,3 +138,119 @@ def test_1(act_1: Action):
assert act_1.clean_expected_stderr == act_1.clean_stderr
assert act_1.clean_expected_stdout == act_1.clean_stdout
# version: 4.0
# resources: None
substitutions_2 = [('[ \\t]+', ' '), ('line: [\\d]+[,]{0,1} col: [\\d]+', '') ]
test_script_2 = """
-- see also:
-- https://www.sql.ru/forum/1319017/obnovlenie-zapisi-po-kursoru
-- Discussed 13.11.2019 with hvlad and dimitr (related to CORE-5794)
recreate table test (
id int not null
,data1 int
,data2 int
,data3 int
,data4 int
);
set term ^;
create or alter procedure sp_set_ctx(a_point varchar(20), a_data1 int, a_data2 int, a_data3 int, a_data4 int) as
begin
-- Store values of cursor fields in the context variable which name is passed here as 'a_point'.
rdb$set_context(
'USER_SESSION',
a_point,
coalesce(cast( a_data1 as char(6)),'#null#')
|| ' ' || coalesce(cast( a_data2 as char(6)),'#null#')
|| ' ' || coalesce(cast( a_data3 as char(6)),'#null#')
|| ' ' || coalesce(cast( a_data4 as char(6)),'#null#')
);
end
^
create or alter procedure sp_test1a as
begin
-- ::: NOTE :::
-- Only IMPLICIT cursors are stable in 3.0+.
-- #############
-- Do _NOT_ try to check following statements using explicit cursor
-- (i.e. OPEN <C>; FETCH ...; CLOSE <C>)
for
select t.id, t.data1, t.data2, t.data3, t.data4 from test t where t.id = 1
as cursor c
do begin
execute procedure sp_set_ctx('point_0', c.data1, c.data2, c.data3, c.data4 );
update test t set t.data1 = 100001 where current of c;
-- make "photo" of all cursor fields:
execute procedure sp_set_ctx('point_1', c.data1, c.data2, c.data3, c.data4 );
-- at this point value of c.data1 remains NULL from cursor POV because
-- "UPDATE WHERE CURRENT OF C" sees record as it was no changes at all:
update test t set t.data2 = 100002 where current of c;
-- make "photo" of all cursor fields:
execute procedure sp_set_ctx('point_2', c.data1, c.data2, c.data3, c.data4 );
-- at this point value of c.data1 and c.data2 remain NULL from cursor POV because
-- "UPDATE WHERE CURRENT OF C" sees record as it was no changes at all:
update test t set t.data3 = 100003 where current of c;
-- make "photo" of all cursor fields:
execute procedure sp_set_ctx('point_3', c.data1, c.data2, c.data3, c.data4 );
delete from test t where current of c;
execute procedure sp_set_ctx('point_4', c.data1, c.data2, c.data3, c.data4 );
-- 02.05.2021: on FB 3.x following UPDATE statement raises exception
-- "SQLSTATE = 22000 / no current record for fetch operation"
-- But on FB 4.x this was fixed since build 4.0.0.2448 and no exception will be here
-- See also: https://github.com/FirebirdSQL/firebird/issues/6778
-- This means that deletion of record in the table TEST will not be undone
-- and we must NOT see its data in expected_stdout section!
update test t set t.data4 = 100004 where current of c;
execute procedure sp_set_ctx('point_5', c.data1, c.data2, c.data3, c.data4 );
end
end
^
set term ;^
commit;
insert into test (id) values (1);
commit;
set bail off;
set list on;
execute procedure sp_test1a;
select * from test;
select mon$variable_name as ctx_name, mon$variable_value ctx_value from mon$context_variables where mon$attachment_id = current_connection;
"""
act_2 = isql_act('db_1', test_script_2, substitutions=substitutions_2)
expected_stdout_2 = """
CTX_NAME point_0
CTX_VALUE #null# #null# #null# #null#
CTX_NAME point_1
CTX_VALUE 100001 #null# #null# #null#
CTX_NAME point_2
CTX_VALUE #null# 100002 #null# #null#
CTX_NAME point_3
CTX_VALUE #null# #null# 100003 #null#
CTX_NAME point_4
CTX_VALUE #null# #null# #null# #null#
CTX_NAME point_5
CTX_VALUE #null# #null# #null# #null#
"""
@pytest.mark.version('>=4')
def test_2(act_2: Action):
act_2.expected_stdout = expected_stdout_2
act_2.execute()
assert act_2.clean_expected_stdout == act_2.clean_stdout

View File

@ -2,7 +2,7 @@
#
# id: bugs.core_3547
# title: Floating-point negative zero doesn't match positive zero in the index
# decription:
# decription:
# tracker_id: CORE-3547
# min_versions: ['2.5.1']
# versions: 2.5.1, 2.5.1
@ -22,7 +22,7 @@ init_script_1 = """
insert into t_float_no_pk (col) values (0e0);
insert into t_float_no_pk (col) values (-0e0);
commit;
recreate table t1_double_as_pk (col double precision, constraint t1_double_pk primary key(col) using index t1_double_pk);
commit;
"""
@ -37,7 +37,7 @@ test_script_1 = """
select count(*) "where id = -0e0" from rdb$relations where rdb$relation_id = -0e0;
select count(*) "where id = -(1e0 - 1e0)" from rdb$relations where rdb$relation_id = -(1e0 - 1e0);
select count(*) "where 0e0 = -0e0" from rdb$database where 0e0 = -0e0;
insert into t1_double_as_pk (col) values (0e0);
commit;
insert into t1_double_as_pk (col) values (-0e0);
@ -68,7 +68,7 @@ expected_stderr_1 = """
-Problematic key value is ("COL" = 0.0000000000000000)
"""
@pytest.mark.version('>=2.5.1,<2.5.1')
@pytest.mark.version('>=2.5.1')
@pytest.mark.platform('Windows')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1

View File

@ -2,14 +2,14 @@
#
# id: bugs.core_3919
# title: Improve SIMILAR TO performance
# decription:
# decription:
# Confirmed normal work on WI-T4.0.0.1598. Moreover, SIMILAR TO is about 5x faster than LIKE comparison in this test.
#
#
# CAUTION.
# This test must be run only on 4.0+, despite that its 'Fix version' = 3.0 Alpha 1.
# Performance of SIMILAR TO statement is extremely poor in comparison with LIKE operator:
# COUNT through the table of 102 records requires 27 seconds vs 16 ms (checked on WI-V6.3.6.33246).
#
#
# tracker_id: CORE-3919
# min_versions: ['4.0']
# versions: 4.0
@ -33,14 +33,14 @@ test_script_1 = """
set term ^;
execute block returns(
ratio_of_time varchar(255)
) as
) as
declare i int;
declare j int;
declare t0 timestamp;
declare t0 timestamp;
declare t1 timestamp;
declare elap_ms_using_like int;
declare elap_ms_using_similar_to int;
declare s varchar(32761);
declare s varchar(32761);
declare ratio_similar_vs_like numeric(15,4);
declare MAX_RATIO numeric(15,4) = 2;
-- ^
@ -48,52 +48,52 @@ test_script_1 = """
-- MAX THRESHOLD
-- #############
declare n_count int = 100; -- do not set it to values less than 10: duration should not be zero!
begin
begin
t0 = cast('now' as timestamp);
select count(*) as like_count, sum(char_length(b)) as like_sum_len
select count(*) as like_count, sum(char_length(b)) as like_sum_len
from test t, (select 1 i from rdb$types rows (:n_count) ) n
where
t.b like '%a%' or
t.b like '%b%' or
t.b like '%c%' or
t.b like '%d%' or
t.b like '%e%' or
t.b like '%f%' or
t.b like '%g%' or
t.b like '%h%' or
t.b like '%i%' or
t.b like '%j%' or
t.b like '%k%' or
t.b like '%l%' or
t.b like '%m%' or
t.b like '%n%' or
t.b like '%o%' or
t.b like '%p%' or
t.b like '%q%' or
t.b like '%r%' or
t.b like '%s%' or
t.b like '%t%' or
t.b like '%u%' or
t.b like '%v%' or
t.b like '%w%' or
t.b like '%x%' or
t.b like '%y%' or
t.b like '%z%'
where
t.b like '%a%' or
t.b like '%b%' or
t.b like '%c%' or
t.b like '%d%' or
t.b like '%e%' or
t.b like '%f%' or
t.b like '%g%' or
t.b like '%h%' or
t.b like '%i%' or
t.b like '%j%' or
t.b like '%k%' or
t.b like '%l%' or
t.b like '%m%' or
t.b like '%n%' or
t.b like '%o%' or
t.b like '%p%' or
t.b like '%q%' or
t.b like '%r%' or
t.b like '%s%' or
t.b like '%t%' or
t.b like '%u%' or
t.b like '%v%' or
t.b like '%w%' or
t.b like '%x%' or
t.b like '%y%' or
t.b like '%z%'
into i,j
;
t1 = cast('now' as timestamp);
t1 = cast('now' as timestamp);
elap_ms_using_like = datediff(millisecond from t0 to t1);
t0 = cast('now' as timestamp);
select count(*) as similar_to_count, sum(char_length(b)) as similar_to_sum_len
t0 = cast('now' as timestamp);
select count(*) as similar_to_count, sum(char_length(b)) as similar_to_sum_len
from test t, (select 1 i from rdb$types rows (:n_count) ) n
where t.b similar to '%[a-z]%'
into i,j
;
t1 = cast('now' as timestamp);
;
t1 = cast('now' as timestamp);
elap_ms_using_similar_to = datediff(millisecond from t0 to t1);
ratio_similar_vs_like = 1.0000 * elap_ms_using_similar_to / elap_ms_using_like;
ratio_of_time = iif( ratio_similar_vs_like < MAX_RATIO
@ -101,7 +101,7 @@ test_script_1 = """
,'TOO LONG: '|| ratio_similar_vs_like ||' times. This is more than max threshold = ' || MAX_RATIO || ' times'
)
;
suspend;
suspend;
end
^
"""
@ -114,6 +114,7 @@ expected_stdout_1 = """
@pytest.mark.version('>=4.0')
def test_1(act_1: Action):
act_1.charset = 'NONE'
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout

View File

@ -43,22 +43,30 @@ Statement failed, SQLSTATE = 42000
unsuccessful metadata update
-CREATE USER U01 failed
-Password must be specified when creating user
Statement failed, SQLSTATE = 42000
unsuccessful metadata update
-CREATE USER U01 failed
-Password must be specified when creating user
Statement failed, SQLSTATE = 42000
unsuccessful metadata update
-CREATE USER U01 failed
-Password must be specified when creating user
Statement failed, SQLSTATE = 42000
unsuccessful metadata update
-CREATE USER U01 failed
-Password must be specified when creating user
Statement failed, SQLSTATE = 42000
unsuccessful metadata update
-CREATE USER U01 failed
-Password must be specified when creating user
Statement failed, SQLSTATE = HY000
Password must be specified when creating user
Statement failed, SQLSTATE = 42000
unsuccessful metadata update
-CREATE USER PASSWORD failed

View File

@ -11,6 +11,7 @@
# Statement failed, SQLSTATE = 42000
# Execute statement error at attach :
# 335544472 : Your user name and password are not defined <...>
# [pcisar] 03.11.2021 The same error raised by 4.0 on Windows and Linux
#
# Works fine on:
# fb30Cs, build 3.0.4.32947: OK, 2.907s.

View File

@ -2,10 +2,10 @@
#
# id: bugs.core_5494
# title: Creating a column of type BLOB SUB_TYPE BINARY fails with a Token unknown
# decription:
# decription:
# Confirmed compilation problem on WI-T4.0.0.546.
# Checked on WI-T4.0.0.549 -- all OK.
#
#
# tracker_id: CORE-5494
# min_versions: ['4.0']
# versions: 4.0
@ -60,6 +60,7 @@ expected_stdout_1 = """
@pytest.mark.version('>=4.0')
def test_1(act_1: Action):
act_1.charset = 'NONE'
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout

View File

@ -2,7 +2,7 @@
#
# id: bugs.core_5495
# title: New users or changed passwords in legacy authentication do not work in Firebird 4
# decription:
# decription:
# Confirmed bug on WI-T4.0.0.546, got:
# Statement failed, SQLSTATE = 28000
# Your user name and password are not defined. Ask your database administrator <...>
@ -12,7 +12,10 @@
# WireCrypt = Disabled
# UserManager = Srp, Legacy_UserManager
# Checked on WI-T4.0.0.549 - works fine.
#
#
# [pcisar] 3.11.2021 This test fails with 4.0, even with specified config
# Although user is created, the connect as user tmp$c5495 fails (unknown user)
#
# tracker_id: CORE-5495
# min_versions: ['4.0']
# versions: 4.0
@ -33,11 +36,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
set list on;
set bail on;
set echo on;
create user tmp$c5495 password '123' using plugin Legacy_UserManager;
commit;
connect '$(DSN)' user tmp$c5495 password '123';
--select mon$user,mon$remote_address,mon$remote_protocol,mon$client_version,mon$remote_version,mon$auth_method from mon$attachments
select mon$user,mon$remote_protocol,mon$auth_method from mon$attachments
--select mon$user,mon$remote_address,mon$remote_protocol,mon$client_version,mon$remote_version,mon$auth_method from mon$attachments
select mon$user,mon$remote_protocol,mon$auth_method from mon$attachments
where mon$attachment_id=current_connection;
commit;
connect '$(DSN)' user SYSDBA password 'masterkey';

View File

@ -2,9 +2,9 @@
#
# id: bugs.core_5664
# title: SIMILAR TO is substantially (500-700x) slower than LIKE on trivial pattern matches with VARCHAR data.
# decription:
# decription:
# Confirmed normal work (ratio is about 1) on WI-T4.0.0.1598
#
#
# tracker_id: CORE-5664
# min_versions: ['4.0']
# versions: 4.0
@ -23,9 +23,9 @@ init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
set list on;
set bail on;
set term ^;
set list on;
set bail on;
set term ^;
-- test#1: <long_string> SIMILAR TO '%QWERTY'
@ -33,13 +33,13 @@ test_script_1 = """
execute block returns(
ratio_in_test_1 varchar(255)
) as
declare i int = 0;
declare t0 timestamp;
declare t1 timestamp;
) as
declare i int = 0;
declare t0 timestamp;
declare t1 timestamp;
declare elap_ms_using_like int;
declare elap_ms_using_similar_to int;
declare s varchar(32761);
declare s varchar(32761);
declare ratio_similar_vs_like numeric(15,4);
declare MAX_RATIO numeric(15,4) = 2;
-- ^
@ -47,23 +47,23 @@ test_script_1 = """
-- MAX THRESHOLD
-- #############
declare n_count int = 5000; -- do not set it to values less than 500: duration should not be zero!
begin
s = lpad('', 32755, uuid_to_char(gen_uuid())) || 'QWERTY';
begin
s = lpad('', 32755, uuid_to_char(gen_uuid())) || 'QWERTY';
t0 = cast('now' as timestamp);
while (i < n_count) do
begin
i = i + iif( s like '%QWERTY', 1, 1);
end
t1 = cast('now' as timestamp);
elap_ms_using_like = datediff(millisecond from t0 to t1);
t0 = cast('now' as timestamp);
while (i < n_count) do
begin
i = i + iif( s like '%QWERTY', 1, 1);
end
t1 = cast('now' as timestamp);
elap_ms_using_like = datediff(millisecond from t0 to t1);
i = 0;
while (i < n_count) do
begin
i = i + iif( s similar to '%QWERTY', 1, 1);
end
elap_ms_using_similar_to = datediff(millisecond from t1 to cast('now' as timestamp));
i = 0;
while (i < n_count) do
begin
i = i + iif( s similar to '%QWERTY', 1, 1);
end
elap_ms_using_similar_to = datediff(millisecond from t1 to cast('now' as timestamp));
ratio_similar_vs_like = 1.0000 * elap_ms_using_similar_to / elap_ms_using_like;
@ -72,9 +72,9 @@ test_script_1 = """
,'TOO LOG: '|| ratio_similar_vs_like ||' times. This is more than max threshold = ' || MAX_RATIO || ' times'
)
;
suspend;
suspend;
end
^
^
-- test#2: <long_string> SIMILAR TO 'QWERTY%'
@ -82,13 +82,13 @@ test_script_1 = """
execute block returns(
ratio_in_test_2 varchar(255)
) as
declare i int = 0;
declare t0 timestamp;
declare t1 timestamp;
) as
declare i int = 0;
declare t0 timestamp;
declare t1 timestamp;
declare elap_ms_using_like int;
declare elap_ms_using_similar_to int;
declare s varchar(32761);
declare s varchar(32761);
declare ratio_similar_vs_like numeric(15,4);
declare MAX_RATIO numeric(15,4) = 2;
-- ^
@ -96,24 +96,24 @@ test_script_1 = """
-- MAX THRESHOLD
-- #############
declare n_count int = 5000; -- do not set it to values less than 500: duration should not be zero!
begin
begin
s = 'QWERTY' || lpad('', 32755, uuid_to_char(gen_uuid())) ;
s = 'QWERTY' || lpad('', 32755, uuid_to_char(gen_uuid())) ;
t0 = cast('now' as timestamp);
while (i < n_count) do
begin
i = i + iif( s similar to 'QWERTY%', 1, 1);
end
t1 = cast('now' as timestamp);
elap_ms_using_like = datediff(millisecond from t0 to t1);
t0 = cast('now' as timestamp);
while (i < n_count) do
begin
i = i + iif( s similar to 'QWERTY%', 1, 1);
end
t1 = cast('now' as timestamp);
elap_ms_using_like = datediff(millisecond from t0 to t1);
i = 0;
while (i < n_count) do
begin
i = i + iif( s similar to 'QWERTY%', 1, 1);
end
elap_ms_using_similar_to = datediff(millisecond from t1 to cast('now' as timestamp));
i = 0;
while (i < n_count) do
begin
i = i + iif( s similar to 'QWERTY%', 1, 1);
end
elap_ms_using_similar_to = datediff(millisecond from t1 to cast('now' as timestamp));
ratio_similar_vs_like = 1.0000 * elap_ms_using_similar_to / elap_ms_using_like;
@ -122,10 +122,10 @@ test_script_1 = """
,'TOO LONG: '|| ratio_similar_vs_like ||' times. This is more than max threshold = ' || MAX_RATIO || ' times'
)
;
suspend;
suspend;
end
^
set term ;^
^
set term ;^
"""
@ -138,6 +138,7 @@ expected_stdout_1 = """
@pytest.mark.version('>=4.0')
def test_1(act_1: Action):
act_1.charset = 'NONE'
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout

View File

@ -2,11 +2,13 @@
#
# id: bugs.core_5884
# title: Initial global mapping from srp plugin does not work
# decription:
# decription:
# Confirmed bug on: 3.0.4.33020, 4.0.0.1143 ('TEST2' was shown instead of 'GTOST').
# Checked on:
# FB30SS, build 3.0.4.33021: OK, 2.312s.
#
#
# [pcisar] 3.11.2021 This test fails for 4.0 (returns tmp$ user names instead mapped ones)
#
# tracker_id: CORE-5884
# min_versions: ['3.0.4']
# versions: 3.0.4
@ -31,7 +33,7 @@ test_script_1 = """
commit;
create or alter mapping lmap using plugin srp from user tmp$c5884_1 to user ltost;
create or alter global mapping gmap using plugin srp from user tmp$c5884_2 to user gtost;
create or alter global mapping gmap using plugin srp from user tmp$c5884_2 to user gtost;
commit;
connect '$(DSN)' user tmp$c5884_1 password '123';

View File

@ -2,20 +2,20 @@
#
# id: bugs.core_5953
# title: Statement level read consistency in read-committed transactions
# decription:
# decription:
# We create table with single column and inspect it TWICE by this query: 'select max(x) from test'.
# Both queries are in the single procedure, but they are separated by autonomous transaction.
# Initially (before 1st query) this table has one record with value=1, so the first query will return 1.
# Then autonomous transaction inserts into this table 10 rows with incremental values.
# After this, query runs again.
#
# After this, query runs again.
#
# Procedure is executed within READ COMMITTED transations.
#
#
# If current transaction was started as READ CONSISTENCY then 2nd query must return the same value as 1st.
# Otherwise 2nd query return DIFFERENT (last of newly added) value and output column MAX_X will differ.
#
#
# Checked on 4.0.0.1573.
#
#
# ::: NB :::
# It is stated (in doc\\README.read_consistency.md ) that "In the future versions of Firebird old kinds of read-committed transactions could be removed".
# But for now we can suppose that at least in FB 4.x family these modes will be preserved and we can use them beside new (READ CONSISTENCY) mode.
@ -24,22 +24,24 @@
# 2. Start Tx in READ RECORD_VERSION, get result_2 - and it must differ from result_1.
# For this test could start Tx in READ RECORD_VERSION mode, parameter ReadConsistency in firebird.conf must be set to 0 (ZERO).
# THIS VALUE DIFFERS FROM DEFAULT, but it is not a problem for other major FB-versions: config is prepared separately for each of them.
# So, if in the future some major FB version will exclude RECORD_VERSION at all we can prepare new section of this test
# So, if in the future some major FB version will exclude RECORD_VERSION at all we can prepare new section of this test
# which will assume that there is no such parameter (ReadConsistency) in firebird.conf and check only one isolation mode.
#
#
# :::::::::::::::::::::::::::::::::::::::: NB ::::::::::::::::::::::::::::::::::::
# 18.08.2020. FB 4.x has incompatible behaviour with all previous versions since build 4.0.0.2131 (06-aug-2020):
# statement 'alter sequence <seq_name> restart with 0' changes rdb$generators.rdb$initial_value to -1 thus next call
# gen_id(<seq_name>,1) will return 0 (ZERO!) rather than 1.
# gen_id(<seq_name>,1) will return 0 (ZERO!) rather than 1.
# See also CORE-6084 and its fix: https://github.com/FirebirdSQL/firebird/commit/23dc0c6297825b2e9006f4d5a2c488702091033d
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# This is considered as *expected* and is noted in doc/README.incompatibilities.3to4.txt
#
#
# Because of this, it was decided to replace 'alter sequence restart...' with subtraction of two gen values:
# c = gen_id(<g>, -gen_id(<g>, 0)) -- see procedure sp_restart_sequences.
#
#
# Checked on 4.0.0.2164
#
#
# [pcisar] 3.11.2021 This test fails for v4.0.0.2496 (4.0 final)
#
# tracker_id: CORE-5953
# min_versions: ['4.0']
# versions: 4.0
@ -69,9 +71,9 @@ test_script_1 = """
commit;
set term ^;
alter procedure sp_run_heavy_query returns( dts timestamp, max_x int ) as
alter procedure sp_run_heavy_query returns( dts timestamp, max_x int ) as
begin
execute statement 'select max(x) from test' into max_x;
dts='now';
suspend;
@ -96,8 +98,8 @@ test_script_1 = """
set term ;^
commit;
recreate view v_check as
select
recreate view v_check as
select
t.mon$isolation_mode as mon_isol_mode
,rdb$get_context('SYSTEM', 'ISOLATION_LEVEL') as ctx_isol_level
,d.max_x
@ -116,7 +118,7 @@ test_script_1 = """
select * from v_check;
delete from test;
execute procedure sp_restart_sequences;
commit;

View File

@ -2,16 +2,16 @@
#
# id: bugs.core_5970
# title: Built-in cryptographic functions
# decription:
# decription:
# Issues found during implementing this test - see CORE-6185, CORE-6186.
# This test checks only ability to call ENCRYPT()/DECRYPT() functions with different parameters.
# Also, it checks that <source> -> encrypt(<source>) -> decrypt(encrypted_source) gives the same <source>.
#
#
# Checked on:
# 4.0.0.1646 SS: 3.657s.
# 4.0.0.1637 SC: 3.271s.
# 4.0.0.1633 CS: 4.191s.
#
#
# tracker_id: CORE-5970
# min_versions: ['4.0.0']
# versions: 4.0
@ -102,7 +102,7 @@ test_script_1 = """
set term ^;
create or alter procedure sp_block_test(a_alg varchar(30))
returns(
returns(
encryption_algorithm varchar(30)
,encryption_mode varchar(10)
,enc_key_octet_length int
@ -134,7 +134,7 @@ test_script_1 = """
-- block_cipher ::= { AES | ANUBIS | BLOWFISH | KHAZAD | RC5 | RC6 | SAFER+ | TWOFISH | XTEA }
-- mode ::= { CBC | CFB | CTR | ECB | OFB }
for
select 'CBC' as mode from rdb$database union all
select 'CFB' from rdb$database union all -- AES
@ -143,7 +143,7 @@ test_script_1 = """
select 'OFB' from rdb$database -- AES
as cursor cm
do begin
encryption_mode = cm.mode;
encrypted_equals_to_decrypted = null;
encryption_finish_gdscode = null;
@ -167,7 +167,7 @@ test_script_1 = """
v_decrypt_sttm = 'select decrypt( t.encrypted_text using ' || c.crypto_alg || ' mode ' || cm.mode || ' key q''{' || c.crypto_key || '}'' iv q''{' || c.crypto_iv || '}'' ) from gtt_tmp t';
execute statement v_decrypt_sttm into s_decrypted_text;
encrypted_equals_to_decrypted = false;
if ( hash(s_source_text) = hash(s_decrypted_text) ) then
if (s_source_text = s_decrypted_text) then
@ -230,10 +230,10 @@ test_script_1 = """
-- Mode should be specified for block ciphers.
-- Initialization vector (IV) should be specified for block ciphers in all modes except ECB and all stream ciphers except RC4.
insert into gtt_tmp(source_text) values(c.source_text);
s_source_text = c.source_text;
enc_init_vector_octet_length = 0;
if ( upper( :a_alg ) = upper('RC4') ) then
iv_suffix= '';
@ -251,7 +251,7 @@ test_script_1 = """
v_decrypt_sttm = 'select decrypt( t.encrypted_text using ' || c.crypto_alg || ' key q''{' || c.crypto_key || '}'' ' || iv_suffix || ') from gtt_tmp t';
execute statement v_decrypt_sttm into s_decrypted_text;
encrypted_equals_to_decrypted = false;
if ( hash(s_source_text) = hash(s_decrypted_text) ) then
if (s_source_text = s_decrypted_text) then
@ -1366,13 +1366,13 @@ expected_stdout_1 = """
OUTPUT message field count: 3
01: sqltype: 520 BLOB scale: 0 subtype: 0 len: 8
: name: ENCRYPT alias: E_BLOB
: table: owner:
: table: owner:
02: sqltype: 448 VARYING scale: 0 subtype: 0 len: 255 charset: 1 OCTETS
: name: ENCRYPT alias: E_CHAR
: table: owner:
03: sqltype: 448 VARYING scale: 0 subtype: 0 len: 6 charset: 0 NONE
: table: owner:
03: sqltype: 448 VARYING scale: 0 subtype: 0 len: 6 charset: 1 OCTETS
: name: DECRYPT alias: D_BIN
: table: owner:
: table: owner:
"""
expected_stderr_1 = """
@ -1391,6 +1391,7 @@ expected_stderr_1 = """
@pytest.mark.version('>=4.0')
def test_1(act_1: Action):
act_1.charset = 'NONE'
act_1.expected_stdout = expected_stdout_1
act_1.expected_stderr = expected_stderr_1
act_1.execute()

View File

@ -2,21 +2,21 @@
#
# id: bugs.core_5990
# title: Pool of external connections
# decription:
# decription:
# Test assumes that firebird.conf contains:
# ExtConnPoolSize = 100 (or at any other value >= 6)
# ExtConnPoolLifeTime = 10
# We run six execute blocks with COMMIT after each of them.
# When EDS pool is enabled then every new execute block will use the same attachment as it was established in the 1st EB.
# We check this by running query that show number of duplicates for each of N attachments: this number must be equal to N-1.
# ::: NB :::
# Final statement must be 'ALTER EXTERNAL CONNECTIONS POOL CLEAR ALL' otherwise DB file will be kept by engine at least
# ::: NB :::
# Final statement must be 'ALTER EXTERNAL CONNECTIONS POOL CLEAR ALL' otherwise DB file will be kept by engine at least
# for 10 seconds after this test finish (see parameter 'ExtConnPoolLifeTime').
#
#
# Thank hvlad for additional explanations, discuss in e-mail was 26.04.19 09:38.
#
#
# Checked on 4.0.0.1501 (both SS and CS): OK, 1.343s.
#
#
# tracker_id: CORE-5990
# min_versions: ['4.0']
# versions: 4.0
@ -36,7 +36,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
recreate view v_conn as
select
select
cast(rdb$get_context('SYSTEM', 'EXT_CONN_POOL_SIZE') as int) as pool_size,
cast(rdb$get_context('SYSTEM', 'EXT_CONN_POOL_IDLE_COUNT') as int) as pool_idle,
cast(rdb$get_context('SYSTEM', 'EXT_CONN_POOL_ACTIVE_COUNT') as int) as pool_active,
@ -145,11 +145,11 @@ test_script_1 = """
--set echo on;
--select * from v_conn;
--select a.id, a.established_attach_id, count(*)over(partition by established_attach_id)-1 dup_cnt
--select a.id, a.established_attach_id, count(*)over(partition by established_attach_id)-1 dup_cnt
set list on;
select a.id, count(*)over(partition by established_attach_id)-1 dup_cnt
from att_info a
select a.id, count(*)over(partition by established_attach_id)-1 dup_cnt
from att_info a
order by id;
ALTER EXTERNAL CONNECTIONS POOL CLEAR ALL; -- !! mandatory otherwise database file will be kept by engine and fbtest will not able to drop it !!

View File

@ -2,18 +2,18 @@
#
# id: bugs.core_6044
# title: ISQL issues with increased identifier length
# decription:
# Confirmed problem on WI-T4.0.0.1421: FB crashed when we create sequence
# decription:
# Confirmed problem on WI-T4.0.0.1421: FB crashed when we create sequence
# with name = 63 on-ascii characters and then ask it using 'show sequ' command.
# Also, FB crashe when we created a table with column which name contains 63
# Also, FB crashe when we created a table with column which name contains 63
# non-ascii characters and then this table metadata is queried by 'show table <T>' command.
# Checked on 4.0.0.1485: OK, 1.576s.
#
# Checked on 4.0.0.1485: OK, 1.576s.
#
# 18.08.2020: added filter for 'current value: ...' of sequence. FB 4.x became incompatible
# with previous versions since 06-aug-2020.
# See also CORE-6084 and its fix: https://github.com/FirebirdSQL/firebird/commit/23dc0c6297825b2e9006f4d5a2c488702091033d
#
#
#
#
# tracker_id: CORE-6044
# min_versions: ['4.0']
# versions: 4.0
@ -25,7 +25,7 @@ from firebird.qa import db_factory, isql_act, Action
# version: 4.0
# resources: None
substitutions_1 = [('current value.*', 'current value')]
substitutions_1 = [('current value.*', 'current value'), ('COLL-VERSION=153\\.14', 'COLL-VERSION=153.88')]
init_script_1 = """"""
@ -40,7 +40,7 @@ test_script_1 = """
create table "ТаблицаКотораяВсегдаДолжнаСодержатьТолькоСамуюСвежуюИнформациюю"(
"СтолбецКоторыйВсегдаДолжнаСодержатьТолькоСамуюСвежуюИнформациюю"
"ДоменДляХраненияСтроковыхДанныхКоторыеПредставимыДляСортировкии"
,constraint
,constraint
"ПервичныйКлючНаТаблицуКотораяВсегдаДолжнаСодержатьСвежайшуюИнфу"
primary key
("СтолбецКоторыйВсегдаДолжнаСодержатьТолькоСамуюСвежуюИнформациюю")

View File

@ -2,15 +2,15 @@
#
# id: bugs.core_6049
# title: Builtin functions converting binary string to hexadecimal representation and vice versa
# decription:
# decription:
# Test may need to be more complex. Currently only basic operations are checked:
# * ability to insert into binary field result of hex_decode()
# * result of double conversion: bin_data -> base64_encode -> base64_decode
# * result of double conversion: bin_data -> base64_encode -> base64_decode
# - must be equal to initial bin_data (and similar for bin_data -> hex_encode -> hex_decode)
# We get columns type details using sqlda_display in order to fix them in expected_stdout.
#
#
# Checked on 4.0.0.1496: OK, 1.679s.
#
#
# tracker_id: CORE-6049
# min_versions: ['4.0']
# versions: 4.0
@ -37,12 +37,12 @@ test_script_1 = """
set list on;
set sqlda_display on;
select
select
t.*
,uid = "b64_decode(b64_encode(uid))" as "b64_dec(b64_enc(uid)) result"
,uid = "hex_decode(hex_encode(uid))" as "hex_dec(hex_enc(uid)) result"
from (
select
select
uid
,base64_encode(uid) as "b64_encode(uid)"
,base64_decode(base64_encode(uid)) as "b64_decode(b64_encode(uid))"
@ -91,6 +91,7 @@ expected_stdout_1 = """
@pytest.mark.version('>=4.0')
def test_1(act_1: Action):
act_1.charset = 'NONE'
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout

View File

@ -2,7 +2,7 @@
#
# id: bugs.core_6143
# title: Error 'Multiple maps found for ...' is raised in not appropriate case
# decription:
# decription:
# Confirmed bug on: 4.0.0.1535, 3.0.5.33152.
# Checked on:
# 4.0.0.1614: OK, 2.740s.
@ -13,7 +13,9 @@
# as for trusted role and also for "usual" way (i.e. when used specifies 'ROLE ...' clause).
# Discussion about this with Alex was in 23-sep-2019, and his solution not yet known.
# For this reason it was decided to comment code that relates tgo ROLE mapping in this test.
#
#
# [pcisar] 3.11.2021 This test fails for 4.0, WHO_AM_I = TMP$C6143_FOO
#
# tracker_id: CORE-6143
# min_versions: ['3.0.5']
# versions: 3.0.5
@ -40,7 +42,7 @@ test_script_1 = """
execute block as
begin
execute statement 'drop role tmp$r6143_boss';
when any do
when any do
begin
end
end
@ -49,7 +51,7 @@ test_script_1 = """
commit;
create or alter view v_show_mapping as
select
select
a.rdb$map_name
,a.rdb$map_using
,a.rdb$map_plugin
@ -57,8 +59,8 @@ test_script_1 = """
,a.rdb$map_from_type
,a.rdb$map_from
,a.rdb$map_to_type
,a.rdb$map_to
from rdb$database d
,a.rdb$map_to
from rdb$database d
left join rdb$auth_mapping a on 1=1
where rdb$map_from containing 'tmp$c6143' or rdb$map_from containing 'tmp$r6143'
;
@ -88,13 +90,13 @@ test_script_1 = """
connect '$(DSN)' user tmp$c6143_foo password '123' role tmp$r6143_boss;
select
select
'Connected OK when local mapping is duplicated.' as msg
,current_user as who_am_i -- <<< TMP$C6143_BAR must be shown here, *NOT* tmp$c6143_foo
-- temply diabled, wait for solution by Alex, see letters to him 23.09.2019 12:02:
-- ,current_role as what_my_role -- <<< WHAT ROLE MUST BE SHOWN HERE, *BOSS or *ACNT ???
from rdb$database;
set count on;
select * from v_show_mapping;
set count on;
@ -112,8 +114,8 @@ test_script_1 = """
-- ++++++++++++++++++++++++ T E S T G L O B A L M A P P I N G +++++++++++++++++++++++
create or alter global mapping gmap_foo2rio_a using plugin srp from user tmp$c6143_foo to user tmp$c6143_rio;
create or alter global mapping gmap_foo2rio_b using plugin srp from user tmp$c6143_foo to user tmp$c6143_rio;
create or alter global mapping gmap_foo2rio_a using plugin srp from user tmp$c6143_foo to user tmp$c6143_rio;
create or alter global mapping gmap_foo2rio_b using plugin srp from user tmp$c6143_foo to user tmp$c6143_rio;
create or alter global mapping gmap_boss2mngr_a using plugin srp from role tmp$r6143_boss to role tmp$r6143_mngr;
create or alter global mapping gmap_boss2mngr_b using plugin srp from role tmp$c6143_boss to role tmp$r6143_mngr;
@ -184,12 +186,12 @@ expected_stdout_1 = """
RDB$MAP_FROM TMP$C6143_BOSS
RDB$MAP_TO_TYPE 1
RDB$MAP_TO TMP$R6143_ACNT
Records affected: 4
MSG Connected OK when global mapping is duplicated.
WHO_AM_I TMP$C6143_RIO
Records affected: 1
"""

View File

@ -2,13 +2,13 @@
#
# id: bugs.core_6160
# title: SUBSTRING of non-text/-blob is described to return NONE character set in DSQL
# decription:
# decription:
# Confirmed output of: ' ... charset: 0 NONE' on 4.0.0.1627.
# Works as described in the ticket since 4.0.0.1632 ('... charset: 2 ASCII').
# NOTE. In the 'substitution' section we remove all rows except line with phrase 'charset' in it.
# Furter, we have to remove digital ID for this charset because it can be changed in the future:
# 'charset: 2 ASCII' --> 'charset: ASCII'
#
#
# tracker_id: CORE-6160
# min_versions: ['4.0']
# versions: 4.0
@ -28,9 +28,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
-- 01: sqltype: 448 VARYING scale: 0 subtype: 0 len: 1 charset: 2 ASCII
set sqlda_display on;
set sqlda_display on;
set planonly;
select substring(1 from 1 for 1) from rdb$database;
select substring(1 from 1 for 1) from rdb$database;
select substring(current_date from 1 for 1) from rdb$database;
"""
@ -43,6 +43,7 @@ expected_stdout_1 = """
@pytest.mark.version('>=4.0')
def test_1(act_1: Action):
act_1.charset = 'NONE'
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout

View File

@ -2,10 +2,10 @@
#
# id: bugs.core_6185
# title: Some (wrong ?) parameters of ENCRYPT() leads FB to crash
# decription:
# decription:
# Confirmed crash on 4.0.0.1637.
# Checked on 4.0.0.1691 SS: OK, 1.658s.
#
#
# tracker_id: CORE-6185
# min_versions: ['4.0']
# versions: 4.0
@ -63,7 +63,7 @@ test_script_1 = """
set term ;^
commit;
select result_msg from sp_block_test('aes');
select result_msg from sp_block_test('aes');
select encrypt( 'fooriobar' using CHACHA20 key q'{1110FB89-AD32-4E}' iv q'{114E811E}' counter cast(null as bigint) ) as encrypt_str from rdb$database;
"""
@ -76,6 +76,7 @@ expected_stdout_1 = """
@pytest.mark.version('>=4.0')
def test_1(act_1: Action):
act_1.charset = 'NONE'
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout

View File

@ -2,14 +2,14 @@
#
# id: bugs.core_6206
# title: VARCHAR of insufficient length used for set bind of decfloat to varchar
# decription:
# decription:
# Confirmed bug on 4.0.0.1685
# Checked on 4.0.0.1691: OK, 1.165s.
#
#
# 26.06.2020: changed SET BIND argument from numeric(38) to INT128, adjusted output
# (letter from Alex, 25.06.2020 17:56; needed after discuss CORE-6342).
# Checked on 4.0.0.2078.
#
#
# tracker_id: CORE-6206
# min_versions: ['4.0']
# versions: 4.0
@ -58,6 +58,7 @@ expected_stdout_1 = """
@pytest.mark.version('>=4.0')
def test_1(act_1: Action):
act_1.charset = 'NONE'
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout

View File

@ -2,12 +2,12 @@
#
# id: bugs.core_6300
# title: Next attachment id, next statement id - get this info via MON$ query and rdb$get_context()
# decription:
# decription:
# Check SQLDA output by query mon$database columns and context variabled that are described in doc/sql.extensions/README.context_variables2
# See also: https://github.com/FirebirdSQL/firebird/commit/22ad236f625716f5f2885f8d9e783cca9516f7b3
# Checked on 4.0.0.2170.
#
# tracker_id:
#
# tracker_id:
# min_versions: ['4.0']
# versions: 4.0
# qmid: bugs.core_6300
@ -50,6 +50,7 @@ expected_stdout_1 = """
@pytest.mark.version('>=4.0')
def test_1(act_1: Action):
act_1.charset = 'NONE'
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout

View File

@ -2,13 +2,13 @@
#
# id: functional.arno.optimizer.opt_inner_join_03
# title: INNER JOIN join order
# decription:
# With a INNER JOIN the relation with the smallest expected result should be the first one in process order.
# The next relation should be the next relation with expected smallest result based on previous relation
# decription:
# With a INNER JOIN the relation with the smallest expected result should be the first one in process order.
# The next relation should be the next relation with expected smallest result based on previous relation
# and do on till last relation.
# Before 2.0, Firebird did stop checking order possibilties above 7 relations.
#
# tracker_id:
#
# tracker_id:
# min_versions: []
# versions: 2.0
# qmid: functional.arno.optimizer.opt_inner_join_03
@ -120,14 +120,28 @@ act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """PLAN JOIN (T1 NATURAL, T1K INDEX (PK_TABLE_1K), T2K INDEX (PK_TABLE_2K), T3K INDEX (PK_TABLE_3K), T5K INDEX (PK_TABLE_5K), T4K INDEX (PK_TABLE_4K), T6K INDEX (PK_TABLE_6K), T8K INDEX (PK_TABLE_8K), T10K INDEX (PK_TABLE_10K))
COUNT
=====================
COUNT
=====================
1
"""
@pytest.mark.version('>=2.0')
@pytest.mark.version('>=2.0,<4')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
expected_stdout_2 = """PLAN JOIN (T1 NATURAL, T1K INDEX (PK_TABLE_1K), T2K INDEX (PK_TABLE_2K), T3K INDEX (PK_TABLE_3K), T4K INDEX (PK_TABLE_4K), T5K INDEX (PK_TABLE_5K), T6K INDEX (PK_TABLE_6K), T8K INDEX (PK_TABLE_8K), T10K INDEX (PK_TABLE_10K))
COUNT
=====================
1
"""
@pytest.mark.version('>=4')
def test_1(act_1: Action):
act_1.charset = 'NONE'
act_1.expected_stdout = expected_stdout_2
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout

File diff suppressed because it is too large Load Diff

View File

@ -2,8 +2,8 @@
#
# id: functional.datatypes.decfloat_binding_to_legacy
# title: Test ability for DECFLOAT values to be represented as other data types using LEGACY keyword.
# decription:
# We check here that values from DECFLOAT will be actually converted to legacy datatypes
# decription:
# We check here that values from DECFLOAT will be actually converted to legacy datatypes
# according to following table from sql.extensions\\README.set_bind.md:
# ----------------------------------------------------------
# | Native datatype | Legacy datatype |
@ -16,20 +16,20 @@
# ----------------------------------------------------------
# SQLDA must contain the same datatypes when we use either explicit rule or LEGACY keyword.
# Checked on 4.0.0.1691 SS: 1.113s.
#
#
# WARNING, 11.03.2020.
# Test verifies binding of TIME WITH TIMEZONE data and uses America/Los_Angeles timezone.
# But there is daylight saving time in the USA, they change clock at the begining of March.
#
#
# For this reason query like: "select time '10:00 America/Los_Angeles' from ..." will return
# different values depending on current date. For example, if we are in Moscow timezone then
# returned value will be either 20:00 in February or 21:00 in March.
# returned value will be either 20:00 in February or 21:00 in March.
# Result for other timezone (e.g. Tokyo) will be differ, etc.
# For this reason, special replacement will be done in 'substitution' section: we replace
# value of hours with '??' because it is no matter what's the time there, we have to ensure
# only the ability to work with such time using SET BIND clause.
#
# tracker_id:
#
# tracker_id:
# min_versions: ['4.0.0']
# versions: 4.0
# qmid: None
@ -126,6 +126,7 @@ expected_stdout_1 = """
@pytest.mark.version('>=4.0')
def test_1(act_1: Action):
act_1.charset = 'NONE'
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout

View File

@ -1,15 +1,15 @@
#coding:utf-8
#
# id: functional.intfunc.encryption.block_cipher_basic
# title:
# title:
# Verify block crypto algorithms that are implemented in ENCRYPT/DECRYPT built-in functions.
# See doc\\sql.extensions\\README.builtin_functions.txt for details.
#
#
# Checked on 4.0.0.1691: OK, 1.561s.
#
# decription:
#
# tracker_id:
#
# decription:
#
# tracker_id:
# min_versions: ['4.0']
# versions: 4.0
# qmid: None
@ -56,7 +56,7 @@ test_script_1 = """
'cfb',
lpad('', cast(rdb$get_context('USER_SESSION', 'DATA_LEN') as int),uuid_to_char(gen_uuid()) ),
'0101010101010101',
lpad('',16, uuid_to_char( gen_uuid() ))
lpad('',16, uuid_to_char( gen_uuid() ))
);
@ -71,7 +71,7 @@ test_script_1 = """
'ctr',
lpad('', cast(rdb$get_context('USER_SESSION', 'DATA_LEN') as int),uuid_to_char(gen_uuid()) ),
lpad('',16, replace(uuid_to_char(gen_uuid()),'-','') ),
lpad('',16, uuid_to_char( gen_uuid() ))
lpad('',16, uuid_to_char( gen_uuid() ))
);
@ -102,7 +102,7 @@ test_script_1 = """
'ofb',
lpad('', cast(rdb$get_context('USER_SESSION', 'DATA_LEN') as int),uuid_to_char(gen_uuid()) ),
lpad('',16, replace(uuid_to_char(gen_uuid()),'-','') ),
lpad('',16, uuid_to_char( gen_uuid() ))
lpad('',16, uuid_to_char( gen_uuid() ))
);
------------------------------------------------------
@ -117,7 +117,7 @@ test_script_1 = """
from test_char
where crypto_alg = 'AES'
;
insert into test_char
select
@ -141,7 +141,7 @@ test_script_1 = """
from test_char
where crypto_alg = 'AES'
;
insert into test_char
select
@ -153,7 +153,7 @@ test_script_1 = """
from test_char
where crypto_alg = 'AES'
;
insert into test_char
select
@ -165,7 +165,7 @@ test_script_1 = """
from test_char
where crypto_alg = 'AES'
;
insert into test_char
select
@ -229,7 +229,7 @@ test_script_1 = """
do begin
v_encrypt_sttm = 'select encrypt( q''{' || c.source_text || '}'' using ' || c.crypto_alg || coalesce( ' mode ' || c.mode , '' ) || ' key q''{' || c.crypto_key || '}''' || coalesce(' iv q''{' || c.crypto_iv || '}'' ', '') || ') from rdb$database';
execute statement v_encrypt_sttm into v_encrypted;
--v_decrypt_sttm = 'select decrypt( q''{' || v_encrypted || '}'' using ' || c.crypto_alg || coalesce( ' mode ' || c.mode , '' ) || ' key q''{' || c.crypto_key || '}''' || coalesce(' iv q''{' || c.crypto_iv || '}'' ', '') || ') from rdb$database';
--v_decrypt_sttm = 'select decrypt( x''' || v_encrypted || ''' using ' || c.crypto_alg || coalesce( ' mode ' || c.mode , '' ) || ' key q''{' || c.crypto_key || '}''' || coalesce(' iv q''{' || c.crypto_iv || '}'' ', '') || ') from rdb$database';
v_decrypt_sttm = 'select decrypt( cast(? as varbinary(32700)) using ' || c.crypto_alg || coalesce( ' mode ' || c.mode , '' ) || ' key q''{' || c.crypto_key || '}''' || coalesce(' iv q''{' || c.crypto_iv || '}'' ', '') || ') from rdb$database';
@ -250,7 +250,7 @@ test_script_1 = """
set term ;^
commit;
select * from sp_char_block_test;
select * from sp_char_block_test;
commit;
"""
@ -259,223 +259,224 @@ act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
CRYPTO_ALG AES
MODE CFB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG AES
MODE CTR
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG AES
MODE ECB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG AES
MODE OFB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG ANUBIS
MODE CFB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG ANUBIS
MODE CTR
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG ANUBIS
MODE ECB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG ANUBIS
MODE OFB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG BLOWFISH
MODE CFB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG BLOWFISH
MODE CTR
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG BLOWFISH
MODE ECB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG BLOWFISH
MODE OFB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG KHAZAD
MODE CFB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG KHAZAD
MODE CTR
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG KHAZAD
MODE ECB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG KHAZAD
MODE OFB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG RC5
MODE CFB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG RC5
MODE CTR
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG RC5
MODE ECB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG RC5
MODE OFB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG RC6
MODE CFB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG RC6
MODE CTR
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG RC6
MODE ECB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG RC6
MODE OFB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG "SAFER+"
MODE CFB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG "SAFER+"
MODE CTR
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG "SAFER+"
MODE ECB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG "SAFER+"
MODE OFB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG TWOFISH
MODE CFB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG TWOFISH
MODE CTR
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG TWOFISH
MODE ECB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG TWOFISH
MODE OFB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG XTEA
MODE CFB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG XTEA
MODE CTR
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG XTEA
MODE ECB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG XTEA
MODE OFB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
"""
@pytest.mark.version('>=4.0')
def test_1(act_1: Action):
act_1.charset = 'NONE'
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout

View File

@ -1,16 +1,16 @@
#coding:utf-8
#
# id: functional.intfunc.encryption.block_cipher_special
# title:
# title:
# Verify block crypto algorithms that are implemented in ENCRYPT/DECRYPT built-in functions.
# Additional tests for key length = 192 and 256 bits.
# See doc\\sql.extensions\\README.builtin_functions.txt for details.
#
#
# Checked on 4.0.0.1691: OK, 1.343s.
#
# decription:
#
# tracker_id:
#
# decription:
#
# tracker_id:
# min_versions: ['4.0']
# versions: 4.0
# qmid: None
@ -43,7 +43,7 @@ test_script_1 = """
^
set term ;^
--############################ AES mode OFB ##########################
insert into test_char(
crypto_alg,
@ -56,7 +56,7 @@ test_script_1 = """
'ofb',
lpad('', cast(rdb$get_context('USER_SESSION', 'DATA_LEN') as int),uuid_to_char(gen_uuid()) ),
lpad('', 24, '01'), -- 192 bits
lpad('',16, uuid_to_char( gen_uuid() ))
lpad('',16, uuid_to_char( gen_uuid() ))
);
insert into test_char(
@ -70,7 +70,7 @@ test_script_1 = """
'ofb',
lpad('', cast(rdb$get_context('USER_SESSION', 'DATA_LEN') as int),uuid_to_char(gen_uuid()) ),
lpad('', 32, '01'), -- 256 bits
lpad('',16, uuid_to_char( gen_uuid() ))
lpad('',16, uuid_to_char( gen_uuid() ))
);
@ -87,7 +87,7 @@ test_script_1 = """
'cfb',
lpad('', cast(rdb$get_context('USER_SESSION', 'DATA_LEN') as int),uuid_to_char(gen_uuid()) ),
lpad('', 24, '01'), -- 192 bits
lpad('',16, uuid_to_char( gen_uuid() ))
lpad('',16, uuid_to_char( gen_uuid() ))
);
insert into test_char(
@ -101,7 +101,7 @@ test_script_1 = """
'cfb',
lpad('', cast(rdb$get_context('USER_SESSION', 'DATA_LEN') as int),uuid_to_char(gen_uuid()) ),
lpad('', 32, '01'), -- 256 bits
lpad('',16, uuid_to_char( gen_uuid() ))
lpad('',16, uuid_to_char( gen_uuid() ))
);
@ -117,7 +117,7 @@ test_script_1 = """
'ctr',
lpad('', cast(rdb$get_context('USER_SESSION', 'DATA_LEN') as int),uuid_to_char(gen_uuid()) ),
lpad('', 24, '01'), -- 192 bits
lpad('',16, uuid_to_char( gen_uuid() ))
lpad('',16, uuid_to_char( gen_uuid() ))
);
insert into test_char(
@ -131,7 +131,7 @@ test_script_1 = """
'ctr',
lpad('', cast(rdb$get_context('USER_SESSION', 'DATA_LEN') as int),uuid_to_char(gen_uuid()) ),
lpad('', 32, '01'), -- 256 bits
lpad('',16, uuid_to_char( gen_uuid() ))
lpad('',16, uuid_to_char( gen_uuid() ))
);
@ -173,7 +173,7 @@ test_script_1 = """
'cbc',
lpad('', cast(rdb$get_context('USER_SESSION', 'DATA_LEN') as int),uuid_to_char(gen_uuid()) ),
lpad('', 24, '01'), -- 192 bits
lpad('', 16, uuid_to_char( gen_uuid() ))
lpad('', 16, uuid_to_char( gen_uuid() ))
);
insert into test_char(
@ -230,7 +230,7 @@ select encrypt( lpad('', 16, 'A') using aes mode cbc key '1234567890123456789012
do begin
v_encrypt_sttm = 'select encrypt( q''{' || c.source_text || '}'' using ' || c.crypto_alg || coalesce( ' mode ' || c.mode , '' ) || ' key q''{' || c.crypto_key || '}''' || coalesce(' iv q''{' || c.crypto_iv || '}'' ', '') || ') from rdb$database';
execute statement v_encrypt_sttm into v_encrypted;
v_decrypt_sttm = 'select decrypt( cast(? as varbinary(32700)) using ' || c.crypto_alg || coalesce( ' mode ' || c.mode , '' ) || ' key q''{' || c.crypto_key || '}''' || coalesce(' iv q''{' || c.crypto_iv || '}'' ', '') || ') from rdb$database';
execute statement ( v_decrypt_sttm ) ( v_encrypted ) into v_decrypted;
@ -249,7 +249,7 @@ select encrypt( lpad('', 16, 'A') using aes mode cbc key '1234567890123456789012
set term ;^
commit;
select * from sp_char_block_test;
select * from sp_char_block_test;
commit;
"""
@ -258,67 +258,68 @@ act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
CRYPTO_ALG AES
MODE OFB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG AES
MODE OFB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG AES
MODE CFB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG AES
MODE CFB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG AES
MODE CTR
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG AES
MODE CTR
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG AES
MODE ECB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG AES
MODE ECB
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG AES
MODE CBC
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
CRYPTO_ALG AES
MODE CBC
RESULT_MSG Source and decrypted strings are identical.
RESULT_MSG Source and decrypted strings are identical.
SRC_TEXT <null>
DECRYPTED_TEXT <null>
"""
@pytest.mark.version('>=4.0')
def test_1(act_1: Action):
act_1.charset = 'NONE'
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout