diff --git a/files/gtcs-window-func.sql b/files/gtcs-window-func.sql new file mode 100644 index 00000000..150c8348 --- /dev/null +++ b/files/gtcs-window-func.sql @@ -0,0 +1,47 @@ +recreate table persons ( + id integer generated by default as identity primary key, + name varchar(15) +); + +recreate table entries ( + id integer generated by default as identity primary key, + person integer references persons, + dat date, + val numeric(10,2) +); + +insert into persons (name) values ('Person 1'); +insert into persons (name) values ('Person 2'); +insert into persons (name) values ('Person 3'); +insert into persons (name) values ('Person 4'); +insert into persons (name) values ('Person 5'); + +insert into entries (person, dat, val) select id, date '2010-01-02' + id, id * 2 + 0.3 from persons; +insert into entries (person, dat, val) select id, date '2010-02-01' + id, id * 3 + 0.4 from persons; +insert into entries (person, dat, val) select id, date '2010-03-01' + id, id * 3 + 0.4 from persons; +insert into entries (person, dat, val) values (1, null, null); +commit; + +-- select * from entries; + +recreate view v1 (x1, x2, x3, x4, x5, x6, x7, x8) as + select + count(*) over (partition by p.id), count(e.val) over (partition by p.id), + min(e.val) over (partition by p.id), max(e.val) over (partition by p.id), + count(distinct e.val) over (partition by p.id), min(distinct e.val) over (partition by p.id), + max(distinct e.val) over (partition by p.id), + p.name + from entries e + join persons p + on p.id = e.person; + +recreate view v2 as + select * + from entries; + +recreate view v3 as + select v2.person, v2.val, p.name + from v2 + join persons p + on p.id = v2.person; +commit; diff --git a/tests/bugs/core_0000_test.py b/tests/bugs/core_0000_test.py index de7c1078..d4874a09 100644 --- a/tests/bugs/core_0000_test.py +++ b/tests/bugs/core_0000_test.py @@ -2,8 +2,10 @@ """ ID: dummy +ISSUE: TITLE: Dummy test DESCRIPTION: +JIRA: FBTEST: bugs.core_0000 """ diff --git a/tests/bugs/core_4841_test.py b/tests/bugs/core_4841_test.py index 0a9feddf..a3e6c4c4 100644 --- a/tests/bugs/core_4841_test.py +++ b/tests/bugs/core_4841_test.py @@ -6,6 +6,11 @@ ISSUE: 5137 TITLE: Make message about missing password being always displayed as reply on attempt to issue CREATE new login without PASSWORD clause DESCRIPTION: +NOTES: +[04.02.2022] pcisar + Test fails with 3.0.8, because command + create or alter user u01 tags (password = 'foo'); + does not produce any error. JIRA: CORE-4841 FBTEST: bugs.core_4841 """ @@ -29,8 +34,6 @@ test_script = """ act = isql_act('db', test_script, substitutions=[('[-]?Password', 'Password')]) -# version: 3.0.8 - expected_stderr = """ Statement failed, SQLSTATE = 42000 unsuccessful metadata update @@ -66,6 +69,7 @@ unsuccessful metadata update -Password must be specified when creating user """ +@pytest.mark.skip("FIXME: see notes") @pytest.mark.version('>=3.0.8') def test_1(act: Action): act.expected_stderr = expected_stderr diff --git a/tests/bugs/core_5229_test.py b/tests/bugs/core_5229_test.py index 52fc2aa6..030a8522 100644 --- a/tests/bugs/core_5229_test.py +++ b/tests/bugs/core_5229_test.py @@ -5,6 +5,11 @@ ID: issue-5508 ISSUE: 5508 TITLE: Allow to enforce IPv4 or IPv6 in URL-like connection strings DESCRIPTION: +NOTES: +[04.02.2022] pcisar + Test may fail with IPv6. + For example it fails on my Linux OpenSuSE Tumbleweed with regular setup (IPv6 should not be disabled). + Test should IMHO check IPv4/IPv6 availability on test host before runs inet6:// check. JIRA: CORE-5229 FBTEST: bugs.core_5229 """ @@ -24,6 +29,7 @@ expected_stdout = """ PROTOCOL_WHEN_CONNECT_BY_ES_EDS TCPv6 """ +@pytest.mark.skip("FIXME: see notes") @pytest.mark.version('>=3.0.1') def test_1(act: Action): sql_chk = f""" diff --git a/tests/functional/arno/derived_tables/test_01.py b/tests/functional/arno/derived_tables/test_01.py index 9e7d636e..810f95c3 100644 --- a/tests/functional/arno/derived_tables/test_01.py +++ b/tests/functional/arno/derived_tables/test_01.py @@ -4,6 +4,7 @@ ID: derived-table-01 TITLE: Simple derived table DESCRIPTION: +FBTEST: functional.arno.derived_tables.01 """ import pytest diff --git a/tests/functional/arno/derived_tables/test_02.py b/tests/functional/arno/derived_tables/test_02.py index c8ff7673..55479eee 100644 --- a/tests/functional/arno/derived_tables/test_02.py +++ b/tests/functional/arno/derived_tables/test_02.py @@ -4,6 +4,7 @@ ID: derived-table-02 TITLE: Unnamed (no relation alias) derived table DESCRIPTION: +FBTEST: functional.arno.derived_tables.02 """ import pytest diff --git a/tests/functional/arno/derived_tables/test_03.py b/tests/functional/arno/derived_tables/test_03.py index c6483e21..e7c73b90 100644 --- a/tests/functional/arno/derived_tables/test_03.py +++ b/tests/functional/arno/derived_tables/test_03.py @@ -4,6 +4,7 @@ ID: derived-table-03 TITLE: Explicit column names for derived table DESCRIPTION: +FBTEST: functional.arno.derived_tables.03 """ import pytest diff --git a/tests/functional/arno/derived_tables/test_04.py b/tests/functional/arno/derived_tables/test_04.py index c46ca9c7..025e3dc8 100644 --- a/tests/functional/arno/derived_tables/test_04.py +++ b/tests/functional/arno/derived_tables/test_04.py @@ -4,6 +4,7 @@ ID: derived-table-04 TITLE: Derived table column names must be unique DESCRIPTION: +FBTEST: functional.arno.derived_tables.04 """ import pytest diff --git a/tests/functional/arno/derived_tables/test_05.py b/tests/functional/arno/derived_tables/test_05.py index 74100116..49da7b2d 100644 --- a/tests/functional/arno/derived_tables/test_05.py +++ b/tests/functional/arno/derived_tables/test_05.py @@ -4,6 +4,7 @@ ID: derived-table-05 TITLE: Derived table column names must be unique DESCRIPTION: +FBTEST: functional.arno.derived_tables.05 """ import pytest diff --git a/tests/functional/arno/derived_tables/test_06.py b/tests/functional/arno/derived_tables/test_06.py index 15ac8221..799951b4 100644 --- a/tests/functional/arno/derived_tables/test_06.py +++ b/tests/functional/arno/derived_tables/test_06.py @@ -4,6 +4,7 @@ ID: derived-table-06 TITLE: Outer reference inside derived table to other relations in from clause is not allowed DESCRIPTION: +FBTEST: functional.arno.derived_tables.06 """ import pytest diff --git a/tests/functional/arno/derived_tables/test_07.py b/tests/functional/arno/derived_tables/test_07.py index 99887b7c..2357efc7 100644 --- a/tests/functional/arno/derived_tables/test_07.py +++ b/tests/functional/arno/derived_tables/test_07.py @@ -4,6 +4,7 @@ ID: derived-table-07 TITLE: Outer reference inside derived table to other relations in from clause is not allowed DESCRIPTION: +FBTEST: functional.arno.derived_tables.07 """ import pytest diff --git a/tests/functional/arno/derived_tables/test_08.py b/tests/functional/arno/derived_tables/test_08.py index 8a9e1d53..ce5878c2 100644 --- a/tests/functional/arno/derived_tables/test_08.py +++ b/tests/functional/arno/derived_tables/test_08.py @@ -4,6 +4,7 @@ ID: derived-table-08 TITLE: Outer reference inside derived table to other relations in from clause is not allowed DESCRIPTION: +FBTEST: functional.arno.derived_tables.08 """ import pytest diff --git a/tests/functional/arno/derived_tables/test_09.py b/tests/functional/arno/derived_tables/test_09.py index 716061ec..b4a0ea8f 100644 --- a/tests/functional/arno/derived_tables/test_09.py +++ b/tests/functional/arno/derived_tables/test_09.py @@ -4,6 +4,7 @@ ID: derived-table-09 TITLE: Outer reference inside derived table to other relations in from clause is not allowed DESCRIPTION: +FBTEST: functional.arno.derived_tables.09 """ import pytest diff --git a/tests/functional/arno/derived_tables/test_10.py b/tests/functional/arno/derived_tables/test_10.py index a1cfa76d..5a28fcbc 100644 --- a/tests/functional/arno/derived_tables/test_10.py +++ b/tests/functional/arno/derived_tables/test_10.py @@ -4,6 +4,7 @@ ID: derived-table-10 TITLE: Outer reference to upper scope-level is allowed DESCRIPTION: Such as fields inside derived table part of sub-query. +FBTEST: functional.arno.derived_tables.10 """ import pytest diff --git a/tests/functional/arno/derived_tables/test_11.py b/tests/functional/arno/derived_tables/test_11.py index 0a09a0bd..c3e43042 100644 --- a/tests/functional/arno/derived_tables/test_11.py +++ b/tests/functional/arno/derived_tables/test_11.py @@ -4,6 +4,7 @@ ID: derived-table-11 TITLE: Outer reference to upper scope-level is allowed DESCRIPTION: Such as fields inside derived table part of sub-query (IN-predicate). +FBTEST: functional.arno.derived_tables.11 """ import pytest diff --git a/tests/functional/arno/derived_tables/test_12.py b/tests/functional/arno/derived_tables/test_12.py index 608abdbf..37d958ce 100644 --- a/tests/functional/arno/derived_tables/test_12.py +++ b/tests/functional/arno/derived_tables/test_12.py @@ -4,6 +4,7 @@ ID: derived-table-12 TITLE: Outer reference to upper scope-level is allowed DESCRIPTION: Such as fields inside derived table part of sub-query (EXISTS). +FBTEST: functional.arno.derived_tables.12 """ import pytest diff --git a/tests/functional/arno/derived_tables/test_13.py b/tests/functional/arno/derived_tables/test_13.py index 95b12e8c..fe8aa1f7 100644 --- a/tests/functional/arno/derived_tables/test_13.py +++ b/tests/functional/arno/derived_tables/test_13.py @@ -4,6 +4,7 @@ ID: derived-table-13 TITLE: DISTINCT inside derived table DESCRIPTION: +FBTEST: functional.arno.derived_tables.13 """ import pytest diff --git a/tests/functional/arno/derived_tables/test_14.py b/tests/functional/arno/derived_tables/test_14.py index 5305b7c2..ae418cad 100644 --- a/tests/functional/arno/derived_tables/test_14.py +++ b/tests/functional/arno/derived_tables/test_14.py @@ -4,6 +4,7 @@ ID: derived-table-14 TITLE: FIRST / SKIP inside derived table DESCRIPTION: +FBTEST: functional.arno.derived_tables.14 """ import pytest diff --git a/tests/functional/arno/derived_tables/test_15.py b/tests/functional/arno/derived_tables/test_15.py index 792efbc0..d16d8057 100644 --- a/tests/functional/arno/derived_tables/test_15.py +++ b/tests/functional/arno/derived_tables/test_15.py @@ -4,6 +4,7 @@ ID: derived-table-15 TITLE: UNION inside derived table DESCRIPTION: +FBTEST: functional.arno.derived_tables.15 """ import pytest diff --git a/tests/functional/arno/derived_tables/test_16.py b/tests/functional/arno/derived_tables/test_16.py index 3fd40e79..f86008ec 100644 --- a/tests/functional/arno/derived_tables/test_16.py +++ b/tests/functional/arno/derived_tables/test_16.py @@ -4,6 +4,7 @@ ID: derived-table-16 TITLE: Simple derived table with aggregate inside DESCRIPTION: +FBTEST: functional.arno.derived_tables.16 """ import pytest diff --git a/tests/functional/arno/derived_tables/test_17.py b/tests/functional/arno/derived_tables/test_17.py index d5d5b9be..c7a745f9 100644 --- a/tests/functional/arno/derived_tables/test_17.py +++ b/tests/functional/arno/derived_tables/test_17.py @@ -4,6 +4,7 @@ ID: derived-table-17 TITLE: Aggregate inside derived table DESCRIPTION: +FBTEST: functional.arno.derived_tables.17 """ import pytest diff --git a/tests/functional/arno/derived_tables/test_18.py b/tests/functional/arno/derived_tables/test_18.py index 44339900..fe83b597 100644 --- a/tests/functional/arno/derived_tables/test_18.py +++ b/tests/functional/arno/derived_tables/test_18.py @@ -4,6 +4,7 @@ ID: derived-table-18 TITLE: Aggregate inside derived table DESCRIPTION: +FBTEST: functional.arno.derived_tables.18 """ import pytest diff --git a/tests/functional/arno/derived_tables/test_19.py b/tests/functional/arno/derived_tables/test_19.py index 9195e916..234b207e 100644 --- a/tests/functional/arno/derived_tables/test_19.py +++ b/tests/functional/arno/derived_tables/test_19.py @@ -4,6 +4,7 @@ ID: derived-table-19 TITLE: Sub-select inside derived table DESCRIPTION: +FBTEST: functional.arno.derived_tables.19 """ import pytest diff --git a/tests/functional/arno/derived_tables/test_20.py b/tests/functional/arno/derived_tables/test_20.py index e9b196bc..8efd81cb 100644 --- a/tests/functional/arno/derived_tables/test_20.py +++ b/tests/functional/arno/derived_tables/test_20.py @@ -4,6 +4,7 @@ ID: derived-table-20 TITLE: Sub-select inside derived table DESCRIPTION: +FBTEST: functional.arno.derived_tables.20 """ import pytest diff --git a/tests/functional/arno/derived_tables/test_21.py b/tests/functional/arno/derived_tables/test_21.py index 21e12df9..b251b490 100644 --- a/tests/functional/arno/derived_tables/test_21.py +++ b/tests/functional/arno/derived_tables/test_21.py @@ -6,6 +6,7 @@ TITLE: Implicit derived table by IN predicate DESCRIPTION: IN predicate uses derived table internally and should ignore column-name checks (Aggregate functions are unnamed by default). +FBTEST: functional.arno.derived_tables.21 """ import pytest diff --git a/tests/functional/arno/derived_tables/test_22.py b/tests/functional/arno/derived_tables/test_22.py index 2b0c548f..ff69b67f 100644 --- a/tests/functional/arno/derived_tables/test_22.py +++ b/tests/functional/arno/derived_tables/test_22.py @@ -4,6 +4,7 @@ ID: derived-table-22 TITLE: Derived table outer reference (triggers) DESCRIPTION: NEW/OLD context variables should be available inside the derived table. +FBTEST: functional.arno.derived_tables.22 """ import pytest diff --git a/tests/functional/arno/indices/test_lower_bound_asc_02_segments_01.py b/tests/functional/arno/indices/test_lower_bound_asc_02_segments_01.py index b06e4f95..5798e5ac 100644 --- a/tests/functional/arno/indices/test_lower_bound_asc_02_segments_01.py +++ b/tests/functional/arno/indices/test_lower_bound_asc_02_segments_01.py @@ -7,6 +7,7 @@ DESCRIPTION: Check if all 5 values are fetched with "equals" operator over first segment and "greater than or equal" operator on second segment. 2 values are bound to the upper segments and 1 value is bound to the lower segment. +FBTEST: functional.arno.indices.lower_bound_asc_02_segments_01 """ import pytest diff --git a/tests/functional/arno/indices/test_lower_bound_desc_02_segments_01.py b/tests/functional/arno/indices/test_lower_bound_desc_02_segments_01.py index 138b497b..690211e0 100644 --- a/tests/functional/arno/indices/test_lower_bound_desc_02_segments_01.py +++ b/tests/functional/arno/indices/test_lower_bound_desc_02_segments_01.py @@ -7,6 +7,7 @@ DESCRIPTION: Check if all 5 values are fetched with "equals" operator over first segment and "lower than or equal" operator on second segment. 2 values are bound to the lower segments and 1 value is bound to the upper segment. +FBTEST: functional.arno.indices.lower_bound_desc_02_segments_01 """ import pytest diff --git a/tests/functional/arno/indices/test_starting_with_01.py b/tests/functional/arno/indices/test_starting_with_01.py index 4b11b1c0..ae978ef3 100644 --- a/tests/functional/arno/indices/test_starting_with_01.py +++ b/tests/functional/arno/indices/test_starting_with_01.py @@ -4,6 +4,7 @@ ID: index.starting-with-01 TITLE: STARTING WITH charset NONE DESCRIPTION: STARTING WITH - Select from table with 2 entries +FBTEST: functional.arno.indices.starting_with_01 """ import pytest diff --git a/tests/functional/arno/indices/test_starting_with_02.py b/tests/functional/arno/indices/test_starting_with_02.py index 7e90837a..f18ca2ad 100644 --- a/tests/functional/arno/indices/test_starting_with_02.py +++ b/tests/functional/arno/indices/test_starting_with_02.py @@ -4,6 +4,7 @@ ID: index.starting-with-02 TITLE: STARTING WITH charset ISO8859_1 DESCRIPTION: STARTING WITH - Select from table with 2 entries +FBTEST: functional.arno.indices.starting_with_02 """ import pytest diff --git a/tests/functional/arno/indices/test_timestamps_01.py b/tests/functional/arno/indices/test_timestamps_01.py index 276819b8..7e5f6f57 100644 --- a/tests/functional/arno/indices/test_timestamps_01.py +++ b/tests/functional/arno/indices/test_timestamps_01.py @@ -6,6 +6,7 @@ TITLE: TIMESTAMP in index with values below julian date DESCRIPTION: Datetime values below the julian date (firebird base date '1858-11-17') should be stored in correct order. +FBTEST: functional.arno.indices.timestamps_01 """ import pytest diff --git a/tests/functional/arno/indices/test_upper_bound_asc_01_segments_01.py b/tests/functional/arno/indices/test_upper_bound_asc_01_segments_01.py index 9096419a..534d344f 100644 --- a/tests/functional/arno/indices/test_upper_bound_asc_01_segments_01.py +++ b/tests/functional/arno/indices/test_upper_bound_asc_01_segments_01.py @@ -1,17 +1,10 @@ #coding:utf-8 -# -# id: functional.arno.indices.upper_bound_asc_01_segments_01 -# title: ASC single index upper bound -# decription: Check if all 15 values are fetched with "lower than or equal" operator. -# tracker_id: -# min_versions: [] -# versions: 1.5 -# qmid: functional.arno.indexes.upper_bound_asc_01_segments_01 """ ID: index.upper-bound-asc-1-segment-01 TITLE: ASC single segment index upper bound DESCRIPTION: Check if all 15 values are fetched with "lower than or equal" operator. +FBTEST: functional.arno.indices.upper_bound_asc_01_segments_01 """ import pytest diff --git a/tests/functional/arno/indices/test_upper_bound_asc_01_segments_02.py b/tests/functional/arno/indices/test_upper_bound_asc_01_segments_02.py index b4e577ac..158bfd40 100644 --- a/tests/functional/arno/indices/test_upper_bound_asc_01_segments_02.py +++ b/tests/functional/arno/indices/test_upper_bound_asc_01_segments_02.py @@ -4,6 +4,7 @@ ID: index.upper-bound-asc-1-segment-02 TITLE: ASC single segment index upper bound DESCRIPTION: Check if all 32 values are fetched with "lower than" operator. +FBTEST: functional.arno.indices.upper_bound_asc_01_segments_02 """ import pytest diff --git a/tests/functional/arno/indices/test_upper_bound_asc_01_segments_03.py b/tests/functional/arno/indices/test_upper_bound_asc_01_segments_03.py index f275ac1c..fca2060f 100644 --- a/tests/functional/arno/indices/test_upper_bound_asc_01_segments_03.py +++ b/tests/functional/arno/indices/test_upper_bound_asc_01_segments_03.py @@ -4,6 +4,7 @@ ID: index.upper-bound-asc-1-segment-03 TITLE: ASC single segment index upper bound DESCRIPTION: Check if all 5 values are fetched with "lower than or equal" operator. +FBTEST: functional.arno.indices.upper_bound_asc_01_segments_03 """ import pytest diff --git a/tests/functional/arno/indices/test_upper_bound_asc_01_segments_04.py b/tests/functional/arno/indices/test_upper_bound_asc_01_segments_04.py index 757d6224..493d471c 100644 --- a/tests/functional/arno/indices/test_upper_bound_asc_01_segments_04.py +++ b/tests/functional/arno/indices/test_upper_bound_asc_01_segments_04.py @@ -4,6 +4,7 @@ ID: index.upper-bound-asc-1-segment-04 TITLE: ASC single segment index upper bound DESCRIPTION: Check if all 5 values are fetched with "lower than or equal" operator. +FBTEST: functional.arno.indices.upper_bound_asc_01_segments_04 """ import pytest diff --git a/tests/functional/arno/indices/test_upper_bound_asc_02_segments_01.py b/tests/functional/arno/indices/test_upper_bound_asc_02_segments_01.py index 1d1114c9..6f207483 100644 --- a/tests/functional/arno/indices/test_upper_bound_asc_02_segments_01.py +++ b/tests/functional/arno/indices/test_upper_bound_asc_02_segments_01.py @@ -7,6 +7,7 @@ DESCRIPTION: Check if all 5 values are fetched with "equals" operator over first segment and "lower than or equal" operator on second segment. 2 values are bound to the upper segments and 1 value is bound to the lower segments. +FBTEST: functional.arno.indices.upper_bound_asc_02_segments_01 """ import pytest diff --git a/tests/functional/arno/indices/test_upper_bound_desc_01_segments_01.py b/tests/functional/arno/indices/test_upper_bound_desc_01_segments_01.py index 0c497f01..b4784a20 100644 --- a/tests/functional/arno/indices/test_upper_bound_desc_01_segments_01.py +++ b/tests/functional/arno/indices/test_upper_bound_desc_01_segments_01.py @@ -4,6 +4,7 @@ ID: index.upper-bound-desc-1-segment-01 TITLE: DESC single segment index upper bound DESCRIPTION: Check if all 15 values are fetched with "greater than or equal" operator. +FBTEST: functional.arno.indices.upper_bound_desc_01_segments_01 """ import pytest diff --git a/tests/functional/arno/indices/test_upper_bound_desc_01_segments_02.py b/tests/functional/arno/indices/test_upper_bound_desc_01_segments_02.py index 946de6e9..dd8939ee 100644 --- a/tests/functional/arno/indices/test_upper_bound_desc_01_segments_02.py +++ b/tests/functional/arno/indices/test_upper_bound_desc_01_segments_02.py @@ -4,6 +4,7 @@ ID: index.upper-bound-desc-1-segment-02 TITLE: DESC single segment index upper bound DESCRIPTION: Check if all 15 values are fetched with "greater than" operator. +FBTEST: functional.arno.indices.upper_bound_desc_01_segments_02 """ import pytest diff --git a/tests/functional/arno/indices/test_upper_bound_desc_02_segments_01.py b/tests/functional/arno/indices/test_upper_bound_desc_02_segments_01.py index 35792086..f6bd8f8e 100644 --- a/tests/functional/arno/indices/test_upper_bound_desc_02_segments_01.py +++ b/tests/functional/arno/indices/test_upper_bound_desc_02_segments_01.py @@ -7,6 +7,7 @@ DESCRIPTION: Check if all 5 values are fetched with "equals" operator over first segment and "greater than or equal" operator on second segment. 2 values are bound to the upper segments and 1 value is bound to the lower segment. +FBTEST: functional.arno.indices.upper_bound_desc_02_segments_01 """ import pytest diff --git a/tests/functional/arno/indices/test_upper_lower_bounds_01.py b/tests/functional/arno/indices/test_upper_lower_bounds_01.py index c2ea36c7..183358d9 100644 --- a/tests/functional/arno/indices/test_upper_lower_bounds_01.py +++ b/tests/functional/arno/indices/test_upper_lower_bounds_01.py @@ -5,6 +5,7 @@ ID: index.upper-lower-bounds-01 TITLE: Upper and lower bounds DESCRIPTION: Equal comparison should be prefered. Lower and Upper bounds are bound by the same value. +FBTEST: functional.arno.indices.upper_lower_bounds_01 """ import pytest diff --git a/tests/functional/arno/indices/test_upper_lower_bounds_02.py b/tests/functional/arno/indices/test_upper_lower_bounds_02.py index 361d517a..abc91487 100644 --- a/tests/functional/arno/indices/test_upper_lower_bounds_02.py +++ b/tests/functional/arno/indices/test_upper_lower_bounds_02.py @@ -1,12 +1,4 @@ #coding:utf-8 -# -# id: functional.arno.indices.upper_lower_bounds_02 -# title: upper and lower bounds -# decription: "Less or equal than" should be prefered above "less than" and "greater or equal than" above "greater than". -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.arno.indexes.upper_lower_bounds_02 """ ID: index.upper-lower-bounds-02 @@ -14,6 +6,7 @@ TITLE: Upper and lower bounds DESCRIPTION: "Less or equal than" should be prefered above "less than" and "greater or equal than" above "greater than". +FBTEST: functional.arno.indices.upper_lower_bounds_02 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_01.py b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_01.py index e3e75315..7c40e11c 100644 --- a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_01.py +++ b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_01.py @@ -1,12 +1,4 @@ #coding:utf-8 -# -# id: functional.arno.optimizer.opt_aggregate_distribution_01 -# title: Try to deliver HAVING CLAUSE conjunctions to the WHERE clause -# decription: Comparisons which doesn't contain (anywhere hiding in the expression) aggregate-functions should be delivered to the where clause. The underlying aggregate stream could possible use it for a index and speed it up. -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.arno.optimizer.opt_aggregate_distribution_01 """ ID: optimizer.aggregate-distribution-01 @@ -15,6 +7,7 @@ DESCRIPTION: Comparisons which doesn't contain (anywhere hiding in the expression) aggregate-functions should be delivered to the where clause. The underlying aggregate stream could possible use it for a index and speed it up. +FBTEST: functional.arno.optimizer.opt_aggregate_distribution_01 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_02.py b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_02.py index d0ba3f8e..b3d18830 100644 --- a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_02.py +++ b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_02.py @@ -7,6 +7,7 @@ DESCRIPTION: Comparisons which doesn't contain (anywhere hiding in the expression) aggregate-functions should be delivered to the where clause. The underlying aggregate stream could possible use it for a index and speed it up. +FBTEST: functional.arno.optimizer.opt_aggregate_distribution_02 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_03.py b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_03.py index 7ff3c31f..7f33a839 100644 --- a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_03.py +++ b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_03.py @@ -7,6 +7,7 @@ DESCRIPTION: Comparisons which doesn't contain (anywhere hiding in the expression) aggregate-functions should be delivered to the where clause. The underlying aggregate stream could possible use it for a index and speed it up. +FBTEST: functional.arno.optimizer.opt_aggregate_distribution_03 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_04.py b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_04.py index ebdac9ee..d873357e 100644 --- a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_04.py +++ b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_04.py @@ -7,6 +7,7 @@ DESCRIPTION: Comparisons which doesn't contain (anywhere hiding in the expression) aggregate-functions should be delivered to the where clause. The underlying aggregate stream could possible use it for a index and speed it up. +FBTEST: functional.arno.optimizer.opt_aggregate_distribution_04 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_05.py b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_05.py index 06bb5664..9d0b02ef 100644 --- a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_05.py +++ b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_05.py @@ -8,6 +8,7 @@ DESCRIPTION: should be delivered to the where clause. The underlying aggregate stream could possible use it for a index and speed it up. VIEWs that contain aggregate queries always (as expected) add WHERE clause (on that VIEW) inside the HAVING clause from the aggregate. +FBTEST: functional.arno.optimizer.opt_aggregate_distribution_05 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_06.py b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_06.py index 6775a946..961c1692 100644 --- a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_06.py +++ b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_06.py @@ -8,6 +8,7 @@ DESCRIPTION: should be delivered to the where clause. The underlying aggregate stream could possible use it for a index and speed it up. VIEWs that contain aggregate queries always (as expected) add WHERE clause (on that VIEW) inside the HAVING clause from the aggregate. +FBTEST: functional.arno.optimizer.opt_aggregate_distribution_06 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_07.py b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_07.py index a8576ddc..d89c0250 100644 --- a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_07.py +++ b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_07.py @@ -7,6 +7,7 @@ DESCRIPTION: Comparisons which doesn't contain (anywhere hiding in the expression) aggregate-functions should be delivered to the where clause. The underlying aggregate stream could possible use it for a index and speed it up. +FBTEST: functional.arno.optimizer.opt_aggregate_distribution_07 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_08.py b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_08.py index c62cb23f..cadc7c1a 100644 --- a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_08.py +++ b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_08.py @@ -7,6 +7,7 @@ DESCRIPTION: Comparisons which doesn't contain (anywhere hiding in the expression) aggregate-functions should be delivered to the where clause. The underlying aggregate stream could possible use it for a index and speed it up. +FBTEST: functional.arno.optimizer.opt_aggregate_distribution_08 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_09.py b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_09.py index 318234f4..691875d2 100644 --- a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_09.py +++ b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_09.py @@ -7,6 +7,7 @@ DESCRIPTION: Comparisons which doesn't contain (anywhere hiding in the expression) aggregate-functions should be delivered to the where clause. The underlying aggregate stream could possible use it for a index and speed it up. +FBTEST: functional.arno.optimizer.opt_aggregate_distribution_09 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_10.py b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_10.py index 47d74788..2ad75e06 100644 --- a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_10.py +++ b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_10.py @@ -7,6 +7,7 @@ DESCRIPTION: Comparisons which doesn't contain (anywhere hiding in the expression) aggregate-functions should be delivered to the where clause. The underlying aggregate stream could possible use it for a index and speed it up. +FBTEST: functional.arno.optimizer.opt_aggregate_distribution_10 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_11.py b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_11.py index a0cb0681..263340d8 100644 --- a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_11.py +++ b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_11.py @@ -7,6 +7,7 @@ DESCRIPTION: Comparisons which doesn't contain (anywhere hiding in the expression) aggregate-functions should be delivered to the where clause. The underlying aggregate stream could possible use it for a index and speed it up. +FBTEST: functional.arno.optimizer.opt_aggregate_distribution_11 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_12.py b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_12.py index 5273d76f..a94d6157 100644 --- a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_12.py +++ b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_12.py @@ -7,6 +7,7 @@ DESCRIPTION: Comparisons which doesn't contain (anywhere hiding in the expression) aggregate-functions should be delivered to the where clause. The underlying aggregate stream could possible use it for a index and speed it up. +FBTEST: functional.arno.optimizer.opt_aggregate_distribution_12 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_13.py b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_13.py index b8af379b..146101e1 100644 --- a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_13.py +++ b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_13.py @@ -1,12 +1,4 @@ #coding:utf-8 -# -# id: functional.arno.optimizer.opt_aggregate_distribution_13 -# title: Try to deliver HAVING CLAUSE conjunctions to the WHERE clause -# decription: Comparisons which doesn't contain (anywhere hiding in the expression) aggregate-functions should be delivered to the where clause. The underlying aggregate stream could possible use it for a index and speed it up. -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.arno.optimizer.opt_aggregate_distribution_13 """ ID: optimizer.aggregate-distribution-13 @@ -15,6 +7,7 @@ DESCRIPTION: Comparisons which doesn't contain (anywhere hiding in the expression) aggregate-functions should be delivered to the where clause. The underlying aggregate stream could possible use it for a index and speed it up. +FBTEST: functional.arno.optimizer.opt_aggregate_distribution_13 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_14.py b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_14.py index 13228d39..02f82732 100644 --- a/tests/functional/arno/optimizer/test_opt_aggregate_distribution_14.py +++ b/tests/functional/arno/optimizer/test_opt_aggregate_distribution_14.py @@ -9,6 +9,7 @@ DESCRIPTION: should be delivered to the where clause. The underlying aggregate stream could possible use it for a index and speed it up. JIRA: CORE-2417 +FBTEST: functional.arno.optimizer.opt_aggregate_distribution_14 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_avoid_index_usage.py b/tests/functional/arno/optimizer/test_opt_avoid_index_usage.py index 9c4b339e..8f85e56a 100644 --- a/tests/functional/arno/optimizer/test_opt_avoid_index_usage.py +++ b/tests/functional/arno/optimizer/test_opt_avoid_index_usage.py @@ -8,6 +8,7 @@ DESCRIPTION: Samples here are from #3431. Confirmed usage 'PLAN INDEX ...' in FB 2.0.0.12724 JIRA: CORE-3051 +FBTEST: functional.arno.optimizer.opt_avoid_index_usage """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_full_join_01.py b/tests/functional/arno/optimizer/test_opt_full_join_01.py index d9051e43..36a8a7be 100644 --- a/tests/functional/arno/optimizer/test_opt_full_join_01.py +++ b/tests/functional/arno/optimizer/test_opt_full_join_01.py @@ -7,6 +7,7 @@ DESCRIPTION: TableX FULL OUTER JOIN TableY with relation in the ON clause. Three tables are used, where 1 table (RC) holds references to the two other tables (R and C). The two tables R and C contain both 1 value that isn't inside RC. +FBTEST: functional.arno.optimizer.opt_full_join_01 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_full_join_02.py b/tests/functional/arno/optimizer/test_opt_full_join_02.py index 41b66c04..dadad144 100644 --- a/tests/functional/arno/optimizer/test_opt_full_join_02.py +++ b/tests/functional/arno/optimizer/test_opt_full_join_02.py @@ -12,6 +12,7 @@ NOTES: added 'rc.categoryid' to 'order by' list in order to have always stable sort result. Mismatch with expected result due to different position of records with the same 'rc.relationid' occured on 4.0.0.2298. CHecked on 4.0.0.2303. +FBTEST: functional.arno.optimizer.opt_full_join_02 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_full_join_03.py b/tests/functional/arno/optimizer/test_opt_full_join_03.py index 87ea9ca0..2f30946b 100644 --- a/tests/functional/arno/optimizer/test_opt_full_join_03.py +++ b/tests/functional/arno/optimizer/test_opt_full_join_03.py @@ -7,6 +7,7 @@ DESCRIPTION: TableX FULL OUTER JOIN TableY with relation in the ON clause. Three tables are used, where 1 table (RC) holds references to the two other tables (R and C). The two tables R and C contain both 1 value that isn't inside RC. +FBTEST: functional.arno.optimizer.opt_full_join_03 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_full_join_04.py b/tests/functional/arno/optimizer/test_opt_full_join_04.py index 7b8aa147..f152f656 100644 --- a/tests/functional/arno/optimizer/test_opt_full_join_04.py +++ b/tests/functional/arno/optimizer/test_opt_full_join_04.py @@ -9,6 +9,7 @@ DESCRIPTION: The two tables R and C contain both 1 value that isn't inside RC. ===== NB: 'UNION ALL' is used here, so PLAN for 2.5 will be of TWO separate rows. +FBTEST: functional.arno.optimizer.opt_full_join_04 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_inner_join_01.py b/tests/functional/arno/optimizer/test_opt_inner_join_01.py index af8dcd9c..22bf95ec 100644 --- a/tests/functional/arno/optimizer/test_opt_inner_join_01.py +++ b/tests/functional/arno/optimizer/test_opt_inner_join_01.py @@ -6,6 +6,7 @@ TITLE: INNER JOIN join order DESCRIPTION: With a INNER JOIN the table with the smallest expected result should be the first one in process order. +FBTEST: functional.arno.optimizer.opt_inner_join_01 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_inner_join_02.py b/tests/functional/arno/optimizer/test_opt_inner_join_02.py index 30cefdbd..ba04d337 100644 --- a/tests/functional/arno/optimizer/test_opt_inner_join_02.py +++ b/tests/functional/arno/optimizer/test_opt_inner_join_02.py @@ -9,6 +9,7 @@ DESCRIPTION: should be the second smallest. Note that calculation is based on page-size. Thus for tables which use the same nr. of data-pages, but have in reality different nr. of records the table N could be bigger as table N+1 in the order. +FBTEST: functional.arno.optimizer.opt_inner_join_02 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_inner_join_03.py b/tests/functional/arno/optimizer/test_opt_inner_join_03.py index d769af96..95725bfe 100644 --- a/tests/functional/arno/optimizer/test_opt_inner_join_03.py +++ b/tests/functional/arno/optimizer/test_opt_inner_join_03.py @@ -9,6 +9,7 @@ DESCRIPTION: result based on previous relation and do on till last relation. Before 2.0, Firebird did stop checking order possibilties above 7 relations. +FBTEST: functional.arno.optimizer.opt_inner_join_03 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_inner_join_04.py b/tests/functional/arno/optimizer/test_opt_inner_join_04.py index af251b0a..8e0329fb 100644 --- a/tests/functional/arno/optimizer/test_opt_inner_join_04.py +++ b/tests/functional/arno/optimizer/test_opt_inner_join_04.py @@ -4,6 +4,7 @@ ID: optimizer.inner-join-04 TITLE: INNER JOIN join order LIKE and IS NULL DESCRIPTION: IS NULL should also be used for determing join order. +FBTEST: functional.arno.optimizer.opt_inner_join_04 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_inner_join_05.py b/tests/functional/arno/optimizer/test_opt_inner_join_05.py index dc3c0781..fcff599f 100644 --- a/tests/functional/arno/optimizer/test_opt_inner_join_05.py +++ b/tests/functional/arno/optimizer/test_opt_inner_join_05.py @@ -4,6 +4,7 @@ ID: optimizer.inner-join-05 TITLE: INNER JOIN join order LIKE and STARTING WITH DESCRIPTION: LIKE and STARTING WITH should also be used for determing join order. +FBTEST: functional.arno.optimizer.opt_inner_join_05 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_inner_join_06.py b/tests/functional/arno/optimizer/test_opt_inner_join_06.py index 0c416f3d..de6e9053 100644 --- a/tests/functional/arno/optimizer/test_opt_inner_join_06.py +++ b/tests/functional/arno/optimizer/test_opt_inner_join_06.py @@ -8,6 +8,7 @@ DESCRIPTION: process order. All inner joins are combined to 1 inner join, because then a order can be decided between them. Relations from a VIEW can also be "merged" to the 1 inner join (of course not with outer joins/unions/etc..) +FBTEST: functional.arno.optimizer.opt_inner_join_06 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_inner_join_07.py b/tests/functional/arno/optimizer/test_opt_inner_join_07.py index d26d376a..9edf846b 100644 --- a/tests/functional/arno/optimizer/test_opt_inner_join_07.py +++ b/tests/functional/arno/optimizer/test_opt_inner_join_07.py @@ -8,6 +8,7 @@ DESCRIPTION: in process order. The next relation should be the next relation with expected smallest result based on previous relation and do on till last relation. Old/Current limitation in Firebird does stop checking order possibilties above 7 relations. +FBTEST: functional.arno.optimizer.opt_inner_join_07 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_inner_join_08.py b/tests/functional/arno/optimizer/test_opt_inner_join_08.py index 56935225..02fa9fb2 100644 --- a/tests/functional/arno/optimizer/test_opt_inner_join_08.py +++ b/tests/functional/arno/optimizer/test_opt_inner_join_08.py @@ -4,6 +4,7 @@ ID: optimizer.inner-join-08 TITLE: INNER JOIN join order and VIEW DESCRIPTION: Try to merge the top INNER JOINs of VIEWS/TABLES together to 1 inner join. +FBTEST: functional.arno.optimizer.opt_inner_join_08 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_inner_join_09.py b/tests/functional/arno/optimizer/test_opt_inner_join_09.py index 89f4ec4f..d733b9c3 100644 --- a/tests/functional/arno/optimizer/test_opt_inner_join_09.py +++ b/tests/functional/arno/optimizer/test_opt_inner_join_09.py @@ -9,6 +9,7 @@ DESCRIPTION: result based on previous relation and do on till last relation. Distribution is tested if it's conjunctions are distributed from WHERE clause. +FBTEST: functional.arno.optimizer.opt_inner_join_09 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_inner_join_10.py b/tests/functional/arno/optimizer/test_opt_inner_join_10.py index 2826b0c9..9636cc56 100644 --- a/tests/functional/arno/optimizer/test_opt_inner_join_10.py +++ b/tests/functional/arno/optimizer/test_opt_inner_join_10.py @@ -10,6 +10,7 @@ DESCRIPTION: It is expected that a unique index gives fewer results then non-unique index. Thus non-unique indexes will be at the end by determing join order. +FBTEST: functional.arno.optimizer.opt_inner_join_10 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_inner_join_merge_01.py b/tests/functional/arno/optimizer/test_opt_inner_join_merge_01.py index 7a271713..bed72aa5 100644 --- a/tests/functional/arno/optimizer/test_opt_inner_join_merge_01.py +++ b/tests/functional/arno/optimizer/test_opt_inner_join_merge_01.py @@ -7,6 +7,7 @@ DESCRIPTION: X JOIN Y ON (X.Field = Y.Field) When no index can be used on a INNER JOIN and there's a relation setup between X and Y then a MERGE should be performed. +FBTEST: functional.arno.optimizer.opt_inner_join_merge_01 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_inner_join_merge_02.py b/tests/functional/arno/optimizer/test_opt_inner_join_merge_02.py index 507633c8..5083af17 100644 --- a/tests/functional/arno/optimizer/test_opt_inner_join_merge_02.py +++ b/tests/functional/arno/optimizer/test_opt_inner_join_merge_02.py @@ -7,6 +7,7 @@ DESCRIPTION: X JOIN Y ON (X.Field = Y.Field) When no index can be used on a INNER JOIN and there's a relation setup between X and Y then a MERGE should be performed. +FBTEST: functional.arno.optimizer.opt_inner_join_merge_02 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_inner_join_merge_03.py b/tests/functional/arno/optimizer/test_opt_inner_join_merge_03.py index b14842ca..946b1bba 100644 --- a/tests/functional/arno/optimizer/test_opt_inner_join_merge_03.py +++ b/tests/functional/arno/optimizer/test_opt_inner_join_merge_03.py @@ -7,6 +7,7 @@ DESCRIPTION: X JOIN Y ON (X.Field + (10 * 2) = Y.Field + 20) When no index can be used on a INNER JOIN and there's a relation setup between X and Y then a MERGE should be performed. Also when expressions are used. +FBTEST: functional.arno.optimizer.opt_inner_join_merge_03 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_inner_join_merge_04.py b/tests/functional/arno/optimizer/test_opt_inner_join_merge_04.py index 4b32f31e..45c5e09a 100644 --- a/tests/functional/arno/optimizer/test_opt_inner_join_merge_04.py +++ b/tests/functional/arno/optimizer/test_opt_inner_join_merge_04.py @@ -7,6 +7,7 @@ DESCRIPTION: X JOIN Y ON (X.Field = Y.Field) When no index can be used on a INNER JOIN and there's a relation setup between X and Y then a MERGE should be performed. An equality between NULLs should not be seen as true. +FBTEST: functional.arno.optimizer.opt_inner_join_merge_04 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_inner_join_merge_05.py b/tests/functional/arno/optimizer/test_opt_inner_join_merge_05.py index 66ebe9e4..2171c0b6 100644 --- a/tests/functional/arno/optimizer/test_opt_inner_join_merge_05.py +++ b/tests/functional/arno/optimizer/test_opt_inner_join_merge_05.py @@ -7,6 +7,7 @@ DESCRIPTION: X JOIN Y ON (X.Field = Y.Field) When no index can be used on a INNER JOIN and there's a relation setup between X and Y then a MERGE should be performed. Of course also when a VIEW is used. +FBTEST: functional.arno.optimizer.opt_inner_join_merge_05 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_inner_join_merge_06.py b/tests/functional/arno/optimizer/test_opt_inner_join_merge_06.py index f82bca7a..d3e531df 100644 --- a/tests/functional/arno/optimizer/test_opt_inner_join_merge_06.py +++ b/tests/functional/arno/optimizer/test_opt_inner_join_merge_06.py @@ -7,6 +7,7 @@ DESCRIPTION: X JOIN Y ON (X.Field = Y.Field) When no index can be used on a INNER JOIN and there's a relation setup between X and Y then a MERGE should be performed. Test with selectable Stored Procedure. +FBTEST: functional.arno.optimizer.opt_inner_join_merge_06 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_left_join_01.py b/tests/functional/arno/optimizer/test_opt_left_join_01.py index 3e812ce8..c39869bb 100644 --- a/tests/functional/arno/optimizer/test_opt_left_join_01.py +++ b/tests/functional/arno/optimizer/test_opt_left_join_01.py @@ -6,6 +6,7 @@ TITLE: LEFT OUTER JOIN with no match at all DESCRIPTION: TableX LEFT OUTER JOIN TableY with no match, thus result should contain all NULLs for TableY references. +FBTEST: functional.arno.optimizer.opt_left_join_01 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_left_join_02.py b/tests/functional/arno/optimizer/test_opt_left_join_02.py index f8d20e85..56e4530a 100644 --- a/tests/functional/arno/optimizer/test_opt_left_join_02.py +++ b/tests/functional/arno/optimizer/test_opt_left_join_02.py @@ -7,6 +7,7 @@ DESCRIPTION: TableX LEFT OUTER JOIN TableY with no match, thus result should contain all NULLs for TableY references. WHERE clause contains IS NULL on a field which is also in a single segment index. The WHERE clause shouldn't be distributed to the joined table.. +FBTEST: functional.arno.optimizer.opt_left_join_02 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_left_join_03.py b/tests/functional/arno/optimizer/test_opt_left_join_03.py index 5edc0cb7..f81570b6 100644 --- a/tests/functional/arno/optimizer/test_opt_left_join_03.py +++ b/tests/functional/arno/optimizer/test_opt_left_join_03.py @@ -7,6 +7,7 @@ DESCRIPTION: TableX LEFT OUTER JOIN TableY with full match. ON clause contains (1 = 1) and WHERE clause contains relation between TableX and TableY. The WHERE comparison should be distributed to TableY. Thus TableY should use the index. +FBTEST: functional.arno.optimizer.opt_left_join_03 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_left_join_04.py b/tests/functional/arno/optimizer/test_opt_left_join_04.py index 262f4eaf..fcdbcbbd 100644 --- a/tests/functional/arno/optimizer/test_opt_left_join_04.py +++ b/tests/functional/arno/optimizer/test_opt_left_join_04.py @@ -7,6 +7,7 @@ DESCRIPTION: TableX LEFT OUTER JOIN TableY with full match. Every reference from TableY should have a value. This test also tests if not the ON clause is distributed to the outer context TableX. +FBTEST: functional.arno.optimizer.opt_left_join_04 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_left_join_05.py b/tests/functional/arno/optimizer/test_opt_left_join_05.py index 09f71f4d..99ece632 100644 --- a/tests/functional/arno/optimizer/test_opt_left_join_05.py +++ b/tests/functional/arno/optimizer/test_opt_left_join_05.py @@ -10,6 +10,7 @@ DESCRIPTION: This test also tests if not the ON clause is distributed to the outer context TableX. Also if not the extra created nodes (comparisons) from a equality node and a A # B node (# =, <, <=, >=, >) are distributed to the outer context. +FBTEST: functional.arno.optimizer.opt_left_join_05 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_left_join_06.py b/tests/functional/arno/optimizer/test_opt_left_join_06.py index 27fdc968..ff3d1457 100644 --- a/tests/functional/arno/optimizer/test_opt_left_join_06.py +++ b/tests/functional/arno/optimizer/test_opt_left_join_06.py @@ -10,6 +10,7 @@ DESCRIPTION: This test also tests if not the ON clause is distributed to the outer context TableX. Also if not the extra created nodes (comparisons) from a equality node and a A # B node (# =, <, <=, >=, >) are distributed to the outer context. +FBTEST: functional.arno.optimizer.opt_left_join_06 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_left_join_07.py b/tests/functional/arno/optimizer/test_opt_left_join_07.py index bef22120..efb86d5e 100644 --- a/tests/functional/arno/optimizer/test_opt_left_join_07.py +++ b/tests/functional/arno/optimizer/test_opt_left_join_07.py @@ -6,6 +6,7 @@ TITLE: 4 JOINed tables with 1 LEFT OUTER JOIN DESCRIPTION: A INNER JOINed TableD to a LEFT JOINed TableC should be able to access the outer TableB of TableC. Also TableB is INNER JOINed to TableA. Three indexes can and should be used here. +FBTEST: functional.arno.optimizer.opt_left_join_07 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_left_join_08.py b/tests/functional/arno/optimizer/test_opt_left_join_08.py index 3c6ecd92..b8f3ac0b 100644 --- a/tests/functional/arno/optimizer/test_opt_left_join_08.py +++ b/tests/functional/arno/optimizer/test_opt_left_join_08.py @@ -7,6 +7,7 @@ DESCRIPTION: TableX LEFT OUTER JOIN TableY with full match, but TableY results limited in ON clause. Which should result in partial NULL results for TableY. Due the WHERE clause a index for TableX should be used. +FBTEST: functional.arno.optimizer.opt_left_join_08 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_left_join_09.py b/tests/functional/arno/optimizer/test_opt_left_join_09.py index aec98b67..4ddfab4d 100644 --- a/tests/functional/arno/optimizer/test_opt_left_join_09.py +++ b/tests/functional/arno/optimizer/test_opt_left_join_09.py @@ -7,6 +7,7 @@ DESCRIPTION: TableX LEFT OUTER JOIN TableY with full match, but TableY results limited in ON clause. Which should result in partial NULL results for TableY, but these are not visible because they are filtered in the WHERE clause by "greater or equal than" operator. +FBTEST: functional.arno.optimizer.opt_left_join_09 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_left_join_10.py b/tests/functional/arno/optimizer/test_opt_left_join_10.py index 12b17bee..b0636afa 100644 --- a/tests/functional/arno/optimizer/test_opt_left_join_10.py +++ b/tests/functional/arno/optimizer/test_opt_left_join_10.py @@ -7,6 +7,7 @@ DESCRIPTION: TableX LEFT OUTER JOIN TableY with no match, thus result should contain all NULLs for TableY references. WHERE clause contains IS NULL on a field which is also in a single segment index. The WHERE clause shouldn't be distributed to the joined table. +FBTEST: functional.arno.optimizer.opt_left_join_10 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_left_join_11.py b/tests/functional/arno/optimizer/test_opt_left_join_11.py index c45b55e3..36af4cd9 100644 --- a/tests/functional/arno/optimizer/test_opt_left_join_11.py +++ b/tests/functional/arno/optimizer/test_opt_left_join_11.py @@ -7,6 +7,7 @@ DESCRIPTION: TableX LEFT OUTER JOIN TableY with no match, thus result should contain all NULLs for TableY references. WHERE clause contains STARTING WITH on a field which is also in a single segment index. The WHERE clause should be distributed to the joined table. +FBTEST: functional.arno.optimizer.opt_left_join_11 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_left_join_12.py b/tests/functional/arno/optimizer/test_opt_left_join_12.py index 06a150f2..e53fc1f0 100644 --- a/tests/functional/arno/optimizer/test_opt_left_join_12.py +++ b/tests/functional/arno/optimizer/test_opt_left_join_12.py @@ -6,6 +6,7 @@ TITLE: LEFT OUTER JOIN with distribution CASE DESCRIPTION: TableX LEFT OUTER JOIN TableY with partial match. WHERE clause contains CASE expression based on TableY. The WHERE clause should not be distributed to the joined table. +FBTEST: functional.arno.optimizer.opt_left_join_12 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_left_join_13.py b/tests/functional/arno/optimizer/test_opt_left_join_13.py index 16e5da42..7d0e4225 100644 --- a/tests/functional/arno/optimizer/test_opt_left_join_13.py +++ b/tests/functional/arno/optimizer/test_opt_left_join_13.py @@ -5,6 +5,7 @@ ID: optimizer.left-join-13 TITLE: LEFT OUTER JOIN VIEW with full match DESCRIPTION: TableX LEFT OUTER JOIN ViewY with full match. Every reference from ViewY should have a value. +FBTEST: functional.arno.optimizer.opt_left_join_13 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_mixed_joins_01.py b/tests/functional/arno/optimizer/test_opt_mixed_joins_01.py index 9269bd7d..1c9f9748 100644 --- a/tests/functional/arno/optimizer/test_opt_mixed_joins_01.py +++ b/tests/functional/arno/optimizer/test_opt_mixed_joins_01.py @@ -5,6 +5,7 @@ ID: optimizer.mixed-joins-01 TITLE: Mixed JOINS DESCRIPTION: Tables without indexes should be merged (when inner join) and those who can use a index, should use it. +FBTEST: functional.arno.optimizer.opt_mixed_joins_01 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_mixed_joins_02.py b/tests/functional/arno/optimizer/test_opt_mixed_joins_02.py index 11cd2184..d5bf389c 100644 --- a/tests/functional/arno/optimizer/test_opt_mixed_joins_02.py +++ b/tests/functional/arno/optimizer/test_opt_mixed_joins_02.py @@ -5,6 +5,7 @@ ID: optimizer.mixed-joins-02 TITLE: Mixed JOINS DESCRIPTION: Tables without indexes should be merged (when inner join) and those who can use a index, should use it. +FBTEST: functional.arno.optimizer.opt_mixed_joins_02 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_mixed_joins_03.py b/tests/functional/arno/optimizer/test_opt_mixed_joins_03.py index 83b52a5d..4085fe39 100644 --- a/tests/functional/arno/optimizer/test_opt_mixed_joins_03.py +++ b/tests/functional/arno/optimizer/test_opt_mixed_joins_03.py @@ -5,6 +5,7 @@ ID: optimizer.mixed-joins-03 TITLE: Mixed JOINS DESCRIPTION: Tables without indexes should be merged (when inner join) and those who can use a index, should use it. +FBTEST: functional.arno.optimizer.opt_mixed_joins_03 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_mixed_joins_04.py b/tests/functional/arno/optimizer/test_opt_mixed_joins_04.py index ae704e23..3d26bb2e 100644 --- a/tests/functional/arno/optimizer/test_opt_mixed_joins_04.py +++ b/tests/functional/arno/optimizer/test_opt_mixed_joins_04.py @@ -5,6 +5,7 @@ ID: optimizer.mixed-joins-04 TITLE: Mixed JOINS DESCRIPTION: Tables without indexes should be merged (when inner join) and those who can use a index, should use it. +FBTEST: functional.arno.optimizer.opt_mixed_joins_04 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_mixed_joins_05.py b/tests/functional/arno/optimizer/test_opt_mixed_joins_05.py index 0e69ad99..6d1ef447 100644 --- a/tests/functional/arno/optimizer/test_opt_mixed_joins_05.py +++ b/tests/functional/arno/optimizer/test_opt_mixed_joins_05.py @@ -5,6 +5,7 @@ ID: optimizer.mixed-joins-05 TITLE: Mixed JOINS DESCRIPTION: Tables without indexes should be merged (when inner join) and those who can use a index, should use it. +FBTEST: functional.arno.optimizer.opt_mixed_joins_05 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_mixed_joins_06.py b/tests/functional/arno/optimizer/test_opt_mixed_joins_06.py index 05ed2479..e00d0b4e 100644 --- a/tests/functional/arno/optimizer/test_opt_mixed_joins_06.py +++ b/tests/functional/arno/optimizer/test_opt_mixed_joins_06.py @@ -5,6 +5,7 @@ ID: optimizer.mixed-joins-06 TITLE: Mixed JOINS DESCRIPTION: Tables without indexes should be merged (when inner join) and those who can use a index, should use it. +FBTEST: functional.arno.optimizer.opt_mixed_joins_06 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_multi_index_selection_01.py b/tests/functional/arno/optimizer/test_opt_multi_index_selection_01.py index d9f94189..7a75c583 100644 --- a/tests/functional/arno/optimizer/test_opt_multi_index_selection_01.py +++ b/tests/functional/arno/optimizer/test_opt_multi_index_selection_01.py @@ -7,6 +7,7 @@ DESCRIPTION: Check if it will select only the index with the unique index when equal operator is performed on all segments in index. Also prefer ASC index above DESC unique index. Unique index together with equals operator will always be the best index to choose. +FBTEST: functional.arno.optimizer.opt_multi_index_selection_01 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_multi_index_selection_02.py b/tests/functional/arno/optimizer/test_opt_multi_index_selection_02.py index 3c6e5eb7..4e9d04e7 100644 --- a/tests/functional/arno/optimizer/test_opt_multi_index_selection_02.py +++ b/tests/functional/arno/optimizer/test_opt_multi_index_selection_02.py @@ -7,6 +7,7 @@ DESCRIPTION: Check if it will select the indexes which can be used. (Indexes with selectivity more than 10x the best are ignored) See SELECTIVITY_THRESHOLD_FACTOR in opt.cpp +FBTEST: functional.arno.optimizer.opt_multi_index_selection_02 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_multi_index_selection_03.py b/tests/functional/arno/optimizer/test_opt_multi_index_selection_03.py index 418e447a..7dfb76a3 100644 --- a/tests/functional/arno/optimizer/test_opt_multi_index_selection_03.py +++ b/tests/functional/arno/optimizer/test_opt_multi_index_selection_03.py @@ -8,6 +8,7 @@ DESCRIPTION: Full-segment-matched indexes have higher priority as partial matched indexes. (Indexes with selectivity more than 10x the best are ignored) See SELECTIVITY_THRESHOLD_FACTOR in opt.cpp +FBTEST: functional.arno.optimizer.opt_multi_index_selection_03 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_multi_index_selection_04.py b/tests/functional/arno/optimizer/test_opt_multi_index_selection_04.py index 5cd533d1..4c578456 100644 --- a/tests/functional/arno/optimizer/test_opt_multi_index_selection_04.py +++ b/tests/functional/arno/optimizer/test_opt_multi_index_selection_04.py @@ -9,6 +9,7 @@ DESCRIPTION: (up to two segments and only ASC) is made. The best here is using 2 indexes, except if the index for the "greater or equal" operator is much worser as the index used for the other two operators. +FBTEST: functional.arno.optimizer.opt_multi_index_selection_04 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_multi_index_selection_05.py b/tests/functional/arno/optimizer/test_opt_multi_index_selection_05.py index 93af9343..b9abbb42 100644 --- a/tests/functional/arno/optimizer/test_opt_multi_index_selection_05.py +++ b/tests/functional/arno/optimizer/test_opt_multi_index_selection_05.py @@ -7,6 +7,7 @@ DESCRIPTION: Check if it will select the index with the best selectivity and with the biggest segment match. 2 equals operators and 1 greater or equal operator and every index combination is made (only ASC). The best here is using 1 index (F2_F3_F1 or F3_F2_F1). +FBTEST: functional.arno.optimizer.opt_multi_index_selection_05 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_multi_index_selection_07.py b/tests/functional/arno/optimizer/test_opt_multi_index_selection_07.py index c8a39fc9..79fe5cf8 100644 --- a/tests/functional/arno/optimizer/test_opt_multi_index_selection_07.py +++ b/tests/functional/arno/optimizer/test_opt_multi_index_selection_07.py @@ -6,6 +6,7 @@ TITLE: Best match index selection (multi segment) DESCRIPTION: Check if it will select the index with the best selectivity and match. IS NULL should also be used in compound indexes. +FBTEST: functional.arno.optimizer.opt_multi_index_selection_07 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_multi_index_selection_08.py b/tests/functional/arno/optimizer/test_opt_multi_index_selection_08.py index 0507302d..cca9f707 100644 --- a/tests/functional/arno/optimizer/test_opt_multi_index_selection_08.py +++ b/tests/functional/arno/optimizer/test_opt_multi_index_selection_08.py @@ -7,6 +7,7 @@ DESCRIPTION: STARTING WITH can also use a index and it should in fact be possible to use a compound index. Of course the STARTING WITH conjunction can only be bound the end (of all possible matches, same as >, >=, <, <=). +FBTEST: functional.arno.optimizer.opt_multi_index_selection_08 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_selectivity_01.py b/tests/functional/arno/optimizer/test_opt_selectivity_01.py index 75eb72bc..78823b62 100644 --- a/tests/functional/arno/optimizer/test_opt_selectivity_01.py +++ b/tests/functional/arno/optimizer/test_opt_selectivity_01.py @@ -5,6 +5,7 @@ ID: optimizer.selectivity-01 TITLE: SELECTIVITY - SET STATISTICS DESCRIPTION: Check if selectivity is calculated correctly. +FBTEST: functional.arno.optimizer.opt_selectivity_01 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_selectivity_02.py b/tests/functional/arno/optimizer/test_opt_selectivity_02.py index 6ea4668e..6ed81fcc 100644 --- a/tests/functional/arno/optimizer/test_opt_selectivity_02.py +++ b/tests/functional/arno/optimizer/test_opt_selectivity_02.py @@ -5,6 +5,7 @@ ID: optimizer.selectivity-02 TITLE: SELECTIVITY - CREATE INDEX DESCRIPTION: Check if selectivity is calculated correctly. +FBTEST: functional.arno.optimizer.opt_selectivity_02 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_selectivity_03.py b/tests/functional/arno/optimizer/test_opt_selectivity_03.py index 7208df2f..53ac2690 100644 --- a/tests/functional/arno/optimizer/test_opt_selectivity_03.py +++ b/tests/functional/arno/optimizer/test_opt_selectivity_03.py @@ -5,6 +5,7 @@ ID: optimizer.selectivity-03 TITLE: SELECTIVITY - INDEX INACTIVE / ACTIVE DESCRIPTION: Check if selectivity is calculated correctly. +FBTEST: functional.arno.optimizer.opt_selectivity_03 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_single_index_selection_01.py b/tests/functional/arno/optimizer/test_opt_single_index_selection_01.py index 99485766..71949bcd 100644 --- a/tests/functional/arno/optimizer/test_opt_single_index_selection_01.py +++ b/tests/functional/arno/optimizer/test_opt_single_index_selection_01.py @@ -7,6 +7,7 @@ DESCRIPTION: Check if it will select only the index with the unique index when equal operator is performed on field in index. Also prefer ASC index above DESC unique index. Unique index together with equals operator will always be the best index to choose. +FBTEST: functional.arno.optimizer.opt_single_index_selection_01 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_single_index_selection_02.py b/tests/functional/arno/optimizer/test_opt_single_index_selection_02.py index 16efa9e5..f0c7aae1 100644 --- a/tests/functional/arno/optimizer/test_opt_single_index_selection_02.py +++ b/tests/functional/arno/optimizer/test_opt_single_index_selection_02.py @@ -7,6 +7,7 @@ DESCRIPTION: Check if it will select the index with the best selectivity. UNIQUE index is the best and prefer ASC index. Only the equals conjunctions should be bound to the index, because it's the most selective. +FBTEST: functional.arno.optimizer.opt_single_index_selection_02 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_single_index_selection_03.py b/tests/functional/arno/optimizer/test_opt_single_index_selection_03.py index cdd3d33b..0e79bbe5 100644 --- a/tests/functional/arno/optimizer/test_opt_single_index_selection_03.py +++ b/tests/functional/arno/optimizer/test_opt_single_index_selection_03.py @@ -8,6 +8,7 @@ DESCRIPTION: Also prefer ASC index above DESC unique index. Unique index isn't the only best to use here, because there's not a equals operator on it. +FBTEST: functional.arno.optimizer.opt_single_index_selection_03 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_single_index_selection_05.py b/tests/functional/arno/optimizer/test_opt_single_index_selection_05.py index 2103aa4f..45a1baf1 100644 --- a/tests/functional/arno/optimizer/test_opt_single_index_selection_05.py +++ b/tests/functional/arno/optimizer/test_opt_single_index_selection_05.py @@ -17,6 +17,7 @@ DESCRIPTION: The cost for index F1 and F2 together is the best total cost. Cost = (data-pages * totalSelectivity) + total index cost. +FBTEST: functional.arno.optimizer.opt_single_index_selection_05 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_single_index_selection_06.py b/tests/functional/arno/optimizer/test_opt_single_index_selection_06.py index 1869992d..ae40156b 100644 --- a/tests/functional/arno/optimizer/test_opt_single_index_selection_06.py +++ b/tests/functional/arno/optimizer/test_opt_single_index_selection_06.py @@ -7,6 +7,7 @@ DESCRIPTION: Check if it will select the index with the best selectivity. Also prefer ASC index above DESC unique index. Unique index isn't the best to use here (as the only one), because there's not a equals operator on it. +FBTEST: functional.arno.optimizer.opt_single_index_selection_06 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_single_index_selection_07.py b/tests/functional/arno/optimizer/test_opt_single_index_selection_07.py index 8e715d47..6a810e39 100644 --- a/tests/functional/arno/optimizer/test_opt_single_index_selection_07.py +++ b/tests/functional/arno/optimizer/test_opt_single_index_selection_07.py @@ -6,6 +6,7 @@ TITLE: Best match index selection (single segment) DESCRIPTION: Check if it will select the index with the best selectivity. IS NULL can also use a index, but 1 index is enough and prefer ASC index. +FBTEST: functional.arno.optimizer.opt_single_index_selection_07 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_single_index_selection_08.py b/tests/functional/arno/optimizer/test_opt_single_index_selection_08.py index 886d950b..68225f5d 100644 --- a/tests/functional/arno/optimizer/test_opt_single_index_selection_08.py +++ b/tests/functional/arno/optimizer/test_opt_single_index_selection_08.py @@ -6,6 +6,7 @@ TITLE: Best match index selection (single segment) DESCRIPTION: Check if it will select the index with the best selectivity. STARTING WITH can also use a index, but 1 index is enough and prefer ASC index. +FBTEST: functional.arno.optimizer.opt_single_index_selection_08 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_single_index_selection_09.py b/tests/functional/arno/optimizer/test_opt_single_index_selection_09.py index e6f12633..ccb7184d 100644 --- a/tests/functional/arno/optimizer/test_opt_single_index_selection_09.py +++ b/tests/functional/arno/optimizer/test_opt_single_index_selection_09.py @@ -6,6 +6,7 @@ TITLE: Best match index selection (single segment) OR DESCRIPTION: Check if it will select the index with the best selectivity. UNIQUE index is the best and prefer ASC index. Only 1 index per conjunction is enough. +FBTEST: functional.arno.optimizer.opt_single_index_selection_09 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_single_index_selection_10.py b/tests/functional/arno/optimizer/test_opt_single_index_selection_10.py index 81d10917..41e86c0a 100644 --- a/tests/functional/arno/optimizer/test_opt_single_index_selection_10.py +++ b/tests/functional/arno/optimizer/test_opt_single_index_selection_10.py @@ -7,6 +7,7 @@ DESCRIPTION: Check if it will select the index with the best selectivity. UNIQUE index is the best and prefer ASC index. 1 index per OR conjunction is enough and the equals conjunctions should be bound to the index, because it's the most selective. +FBTEST: functional.arno.optimizer.opt_single_index_selection_10 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_single_index_selection_11.py b/tests/functional/arno/optimizer/test_opt_single_index_selection_11.py index a8ddecb7..44e669fd 100644 --- a/tests/functional/arno/optimizer/test_opt_single_index_selection_11.py +++ b/tests/functional/arno/optimizer/test_opt_single_index_selection_11.py @@ -6,6 +6,7 @@ TITLE: Best match index selection (single segment) DESCRIPTION: Check if it will select the best index. IS NULL can return more records thus prefer equal. +FBTEST: functional.arno.optimizer.opt_single_index_selection_11 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_sort_by_index_01.py b/tests/functional/arno/optimizer/test_opt_sort_by_index_01.py index cbef44d9..d9825d86 100644 --- a/tests/functional/arno/optimizer/test_opt_sort_by_index_01.py +++ b/tests/functional/arno/optimizer/test_opt_sort_by_index_01.py @@ -6,6 +6,7 @@ TITLE: ORDER BY ASC using index (unique) DESCRIPTION: ORDER BY X When a index can be used for sorting, use it. +FBTEST: functional.arno.optimizer.opt_sort_by_index_01 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_sort_by_index_02.py b/tests/functional/arno/optimizer/test_opt_sort_by_index_02.py index 393bc807..b8c99920 100644 --- a/tests/functional/arno/optimizer/test_opt_sort_by_index_02.py +++ b/tests/functional/arno/optimizer/test_opt_sort_by_index_02.py @@ -6,6 +6,7 @@ TITLE: ORDER BY DESC using index (unique) DESCRIPTION: ORDER BY X When a index can be used for sorting, use it. +FBTEST: functional.arno.optimizer.opt_sort_by_index_02 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_sort_by_index_03.py b/tests/functional/arno/optimizer/test_opt_sort_by_index_03.py index eaf68dae..dbf8dfac 100644 --- a/tests/functional/arno/optimizer/test_opt_sort_by_index_03.py +++ b/tests/functional/arno/optimizer/test_opt_sort_by_index_03.py @@ -6,6 +6,7 @@ TITLE: ORDER BY ASC using index (non-unique) DESCRIPTION: ORDER BY X When a index can be used for sorting, use it. +FBTEST: functional.arno.optimizer.opt_sort_by_index_03 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_sort_by_index_04.py b/tests/functional/arno/optimizer/test_opt_sort_by_index_04.py index 181eaee9..32f305d2 100644 --- a/tests/functional/arno/optimizer/test_opt_sort_by_index_04.py +++ b/tests/functional/arno/optimizer/test_opt_sort_by_index_04.py @@ -6,6 +6,7 @@ TITLE: ORDER BY DESC using index (non-unique) DESCRIPTION: ORDER BY X When a index can be used for sorting, use it. +FBTEST: functional.arno.optimizer.opt_sort_by_index_04 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_sort_by_index_05.py b/tests/functional/arno/optimizer/test_opt_sort_by_index_05.py index c2491375..34badfa5 100644 --- a/tests/functional/arno/optimizer/test_opt_sort_by_index_05.py +++ b/tests/functional/arno/optimizer/test_opt_sort_by_index_05.py @@ -6,6 +6,7 @@ TITLE: MAX() and DESC index (non-unique) DESCRIPTION: SELECT MAX(FieldX) FROM X When a index can be used for sorting, use it. +FBTEST: functional.arno.optimizer.opt_sort_by_index_05 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_sort_by_index_06.py b/tests/functional/arno/optimizer/test_opt_sort_by_index_06.py index 73ef1e5f..51dd4152 100644 --- a/tests/functional/arno/optimizer/test_opt_sort_by_index_06.py +++ b/tests/functional/arno/optimizer/test_opt_sort_by_index_06.py @@ -6,6 +6,7 @@ TITLE: MAX() and ASC index (non-unique) DESCRIPTION: SELECT MAX(FieldX) FROM X ASC index cannot be used for MAX() aggregate function. +FBTEST: functional.arno.optimizer.opt_sort_by_index_06 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_sort_by_index_07.py b/tests/functional/arno/optimizer/test_opt_sort_by_index_07.py index 5ffb20af..8d3a4b2b 100644 --- a/tests/functional/arno/optimizer/test_opt_sort_by_index_07.py +++ b/tests/functional/arno/optimizer/test_opt_sort_by_index_07.py @@ -6,6 +6,7 @@ TITLE: MIN() and ASC index (non-unique) DESCRIPTION: SELECT MIN(FieldX) FROM X When a index can be used for sorting, use it. +FBTEST: functional.arno.optimizer.opt_sort_by_index_07 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_sort_by_index_08.py b/tests/functional/arno/optimizer/test_opt_sort_by_index_08.py index 1af03d08..de9935f3 100644 --- a/tests/functional/arno/optimizer/test_opt_sort_by_index_08.py +++ b/tests/functional/arno/optimizer/test_opt_sort_by_index_08.py @@ -6,6 +6,7 @@ TITLE: MIN() and DESC index (non-unique) DESCRIPTION: SELECT MIN(FieldX) FROM X DESC index cannot be used for MIN() aggregate function. +FBTEST: functional.arno.optimizer.opt_sort_by_index_08 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_sort_by_index_09.py b/tests/functional/arno/optimizer/test_opt_sort_by_index_09.py index a6623e37..a43dc550 100644 --- a/tests/functional/arno/optimizer/test_opt_sort_by_index_09.py +++ b/tests/functional/arno/optimizer/test_opt_sort_by_index_09.py @@ -6,6 +6,7 @@ TITLE: ORDER BY ASC using index (non-unique) DESCRIPTION: ORDER BY X If WHERE clause is present it should also use index if possible. +FBTEST: functional.arno.optimizer.opt_sort_by_index_09 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_sort_by_index_10.py b/tests/functional/arno/optimizer/test_opt_sort_by_index_10.py index d09d8982..98f8e18c 100644 --- a/tests/functional/arno/optimizer/test_opt_sort_by_index_10.py +++ b/tests/functional/arno/optimizer/test_opt_sort_by_index_10.py @@ -6,6 +6,7 @@ TITLE: ORDER BY ASC using index (multi) DESCRIPTION: ORDER BY X, Y When more fields are given in ORDER BY clause try to use a compound index. +FBTEST: functional.arno.optimizer.opt_sort_by_index_10 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_sort_by_index_11.py b/tests/functional/arno/optimizer/test_opt_sort_by_index_11.py index 9b191bf9..b85e6566 100644 --- a/tests/functional/arno/optimizer/test_opt_sort_by_index_11.py +++ b/tests/functional/arno/optimizer/test_opt_sort_by_index_11.py @@ -6,6 +6,7 @@ TITLE: ORDER BY ASC using index (multi) DESCRIPTION: ORDER BY X, Y When more fields are given in ORDER BY clause try to use a compound index. +FBTEST: functional.arno.optimizer.opt_sort_by_index_11 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_sort_by_index_12.py b/tests/functional/arno/optimizer/test_opt_sort_by_index_12.py index 481fd1db..e0bc27a8 100644 --- a/tests/functional/arno/optimizer/test_opt_sort_by_index_12.py +++ b/tests/functional/arno/optimizer/test_opt_sort_by_index_12.py @@ -7,6 +7,7 @@ DESCRIPTION: ORDER BY X ASC, Y DESC When more fields are given in ORDER BY clause try to use a compound index, but look out for mixed directions. +FBTEST: functional.arno.optimizer.opt_sort_by_index_12 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_sort_by_index_13.py b/tests/functional/arno/optimizer/test_opt_sort_by_index_13.py index f76f5637..8000d364 100644 --- a/tests/functional/arno/optimizer/test_opt_sort_by_index_13.py +++ b/tests/functional/arno/optimizer/test_opt_sort_by_index_13.py @@ -7,6 +7,7 @@ DESCRIPTION: WHERE X = 1 ORDER BY Y ASC WHERE clause and ORDER BY nodes can sometimes be merged to get optimal result from compound index. +FBTEST: functional.arno.optimizer.opt_sort_by_index_13 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_sort_by_index_14.py b/tests/functional/arno/optimizer/test_opt_sort_by_index_14.py index e3df4483..8dcac193 100644 --- a/tests/functional/arno/optimizer/test_opt_sort_by_index_14.py +++ b/tests/functional/arno/optimizer/test_opt_sort_by_index_14.py @@ -6,6 +6,7 @@ TITLE: ORDER BY ASC NULLS FIRST using index DESCRIPTION: ORDER BY X ASC NULLS FIRST When a index can be used for sorting, use it. +FBTEST: functional.arno.optimizer.opt_sort_by_index_14 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_sort_by_index_15.py b/tests/functional/arno/optimizer/test_opt_sort_by_index_15.py index 9d04d27e..a19ae4a7 100644 --- a/tests/functional/arno/optimizer/test_opt_sort_by_index_15.py +++ b/tests/functional/arno/optimizer/test_opt_sort_by_index_15.py @@ -6,6 +6,7 @@ TITLE: ORDER BY ASC NULLS LAST using index DESCRIPTION: ORDER BY X ASC NULLS LAST When a index can be used for sorting, use it. +FBTEST: functional.arno.optimizer.opt_sort_by_index_15 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_sort_by_index_16.py b/tests/functional/arno/optimizer/test_opt_sort_by_index_16.py index 468a50dc..5b4363b6 100644 --- a/tests/functional/arno/optimizer/test_opt_sort_by_index_16.py +++ b/tests/functional/arno/optimizer/test_opt_sort_by_index_16.py @@ -6,6 +6,7 @@ TITLE: ORDER BY DESC NULLS FIRST using index DESCRIPTION: ORDER BY X DESC NULLS FIRST When a index can be used for sorting, use it. +FBTEST: functional.arno.optimizer.opt_sort_by_index_16 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_sort_by_index_17.py b/tests/functional/arno/optimizer/test_opt_sort_by_index_17.py index 1ed101a2..d435172d 100644 --- a/tests/functional/arno/optimizer/test_opt_sort_by_index_17.py +++ b/tests/functional/arno/optimizer/test_opt_sort_by_index_17.py @@ -6,6 +6,7 @@ TITLE: ORDER BY DESC NULLS LAST using index DESCRIPTION: ORDER BY X DESC NULLS LAST When a index can be used for sorting, use it. +FBTEST: functional.arno.optimizer.opt_sort_by_index_17 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_sort_by_index_18.py b/tests/functional/arno/optimizer/test_opt_sort_by_index_18.py index 4db1ac23..bd1e8d8c 100644 --- a/tests/functional/arno/optimizer/test_opt_sort_by_index_18.py +++ b/tests/functional/arno/optimizer/test_opt_sort_by_index_18.py @@ -6,6 +6,7 @@ TITLE: ORDER BY ASC using index (single) and WHERE clause DESCRIPTION: WHERE X = 1 ORDER BY Y Index for both X and Y should be used when available. +FBTEST: functional.arno.optimizer.opt_sort_by_index_18 """ import pytest diff --git a/tests/functional/arno/optimizer/test_opt_sort_by_index_19.py b/tests/functional/arno/optimizer/test_opt_sort_by_index_19.py index 6a10e7d8..2df37d14 100644 --- a/tests/functional/arno/optimizer/test_opt_sort_by_index_19.py +++ b/tests/functional/arno/optimizer/test_opt_sort_by_index_19.py @@ -6,6 +6,7 @@ TITLE: ORDER BY ASC using index (multi) and WHERE clause DESCRIPTION: WHERE X = 1 ORDER BY Y When multi-segment index is present with X as first and Y as second this index can be used. +FBTEST: functional.arno.optimizer.opt_sort_by_index_19 """ import pytest diff --git a/tests/functional/basic/db/test_01.py b/tests/functional/basic/db/test_01.py index 2703cf34..fee6b73b 100644 --- a/tests/functional/basic/db/test_01.py +++ b/tests/functional/basic/db/test_01.py @@ -4,6 +4,7 @@ ID: new-database-01 TITLE: New DB - RDB$DATABASE content DESCRIPTION: Check the correct content of RDB$DATABASE in new database. +FBTEST: functional.basic.db.01 """ import pytest diff --git a/tests/functional/basic/db/test_02.py b/tests/functional/basic/db/test_02.py index e91e0175..bffb4eb3 100644 --- a/tests/functional/basic/db/test_02.py +++ b/tests/functional/basic/db/test_02.py @@ -4,6 +4,7 @@ ID: new-database-02 TITLE: New DB - RDB$CHARACTER_SETS DESCRIPTION: Check the correct content of RDB$CHARACTER_SETS for new database +FBTEST: functional.basic.db.02 """ import pytest diff --git a/tests/functional/basic/db/test_03.py b/tests/functional/basic/db/test_03.py index 97c6164a..dee7190d 100644 --- a/tests/functional/basic/db/test_03.py +++ b/tests/functional/basic/db/test_03.py @@ -4,6 +4,7 @@ ID: new-database-03 TITLE: New DB - RDB$COLLATIONS DESCRIPTION: Check the correct content of RDB$COLLATIONS on new DB. +FBTEST: functional.basic.db.03 """ import pytest diff --git a/tests/functional/basic/db/test_04.py b/tests/functional/basic/db/test_04.py index 4c3c47b5..377ea2ca 100644 --- a/tests/functional/basic/db/test_04.py +++ b/tests/functional/basic/db/test_04.py @@ -4,6 +4,7 @@ ID: new-database-04 TITLE: New DB - RDB$EXCEPTIONS DESCRIPTION: Check for correct content of RDB$EXCEPTIONS in new database. +FBTEST: functional.basic.db.04 """ import pytest diff --git a/tests/functional/basic/db/test_05.py b/tests/functional/basic/db/test_05.py index 3e7b1bc2..f01bf2eb 100644 --- a/tests/functional/basic/db/test_05.py +++ b/tests/functional/basic/db/test_05.py @@ -4,6 +4,7 @@ ID: new-database-05 TITLE: New DB - RDB$DEPENDENCIES DESCRIPTION: Check for correct content of RDB$DEPENDENCIES in new database. +FBTEST: functional.basic.db.05 """ import pytest diff --git a/tests/functional/basic/db/test_06.py b/tests/functional/basic/db/test_06.py index 7b17a0c2..0dbab98d 100644 --- a/tests/functional/basic/db/test_06.py +++ b/tests/functional/basic/db/test_06.py @@ -4,6 +4,7 @@ ID: new-database-06 TITLE: New DB - RDB$FIELD_DIMENSIONS DESCRIPTION: Check for correct content of RDB$FIELD_DIMENSIONS in new database. +FBTEST: functional.basic.db.06 """ import pytest diff --git a/tests/functional/basic/db/test_07.py b/tests/functional/basic/db/test_07.py index 663a4a15..c440a382 100644 --- a/tests/functional/basic/db/test_07.py +++ b/tests/functional/basic/db/test_07.py @@ -4,6 +4,7 @@ ID: new-database-07 TITLE: New DB - RDB$FIELDS DESCRIPTION: Check for correct content of RDB$FIELDS in new database. +FBTEST: functional.basic.db.07 """ import pytest diff --git a/tests/functional/basic/db/test_08.py b/tests/functional/basic/db/test_08.py index 02813a74..f8971651 100644 --- a/tests/functional/basic/db/test_08.py +++ b/tests/functional/basic/db/test_08.py @@ -4,6 +4,7 @@ ID: new-database-08 TITLE: New DB - RDB$FILES DESCRIPTION: Check for correct content of RDB$FILES in new database. +FBTEST: functional.basic.db.08 """ import pytest diff --git a/tests/functional/basic/db/test_09.py b/tests/functional/basic/db/test_09.py index d340cf21..be0b87fc 100644 --- a/tests/functional/basic/db/test_09.py +++ b/tests/functional/basic/db/test_09.py @@ -4,6 +4,7 @@ ID: new-database-09 TITLE: New DB - RDB$FILTERS DESCRIPTION: Check for correct content of RDB$FILTERS in new database. +FBTEST: functional.basic.db.09 """ import pytest diff --git a/tests/functional/basic/db/test_10.py b/tests/functional/basic/db/test_10.py index a49140cd..ab38cae5 100644 --- a/tests/functional/basic/db/test_10.py +++ b/tests/functional/basic/db/test_10.py @@ -4,6 +4,7 @@ ID: new-database-10 TITLE: New DB - RDB$FORMATS DESCRIPTION: Check for correct content of RDB$FORMATS in new database. +FBTEST: functional.basic.db.10 """ import pytest diff --git a/tests/functional/basic/db/test_11.py b/tests/functional/basic/db/test_11.py index 19917285..57b0d81a 100644 --- a/tests/functional/basic/db/test_11.py +++ b/tests/functional/basic/db/test_11.py @@ -4,6 +4,7 @@ ID: new-database-11 TITLE: New DB - RDB$FUNCTION_ARGUMENTS DESCRIPTION: Check for correct content of RDB$FUNCTION_ARGUMENTS in a new database. +FBTEST: functional.basic.db.11 """ import pytest diff --git a/tests/functional/basic/db/test_12.py b/tests/functional/basic/db/test_12.py index 8625639c..aff442dd 100644 --- a/tests/functional/basic/db/test_12.py +++ b/tests/functional/basic/db/test_12.py @@ -4,6 +4,7 @@ ID: new-database-12 TITLE: New DB - RDB$FUNCTIONS content DESCRIPTION: Check for correct content of RDB$FUNCTIONS in a new database. +FBTEST: functional.basic.db.12 """ import pytest diff --git a/tests/functional/basic/db/test_13.py b/tests/functional/basic/db/test_13.py index b89043e7..9b60c82a 100644 --- a/tests/functional/basic/db/test_13.py +++ b/tests/functional/basic/db/test_13.py @@ -10,6 +10,7 @@ NOTES: 2. Field rdb$description has been moved at the end of output (select) list. 3. Added query to select FIELDS list of table because main check does not use asterisk and we have to know if DDL of table will have any changes in future. +FBTEST: functional.basic.db.13 """ import pytest diff --git a/tests/functional/basic/db/test_14.py b/tests/functional/basic/db/test_14.py index f6430ad5..eadfa6e2 100644 --- a/tests/functional/basic/db/test_14.py +++ b/tests/functional/basic/db/test_14.py @@ -4,6 +4,7 @@ ID: new-database-14 TITLE: New DB - RDB$CHECK_CONSTRAINTS DESCRIPTION: Check for correct content of RDB$CHECK_CONSTRAINTS in new database. +FBTEST: functional.basic.db.14 """ import pytest diff --git a/tests/functional/basic/db/test_15.py b/tests/functional/basic/db/test_15.py index 2221fd7c..4d5b3ee6 100644 --- a/tests/functional/basic/db/test_15.py +++ b/tests/functional/basic/db/test_15.py @@ -4,6 +4,7 @@ ID: new-database-15 TITLE: New DB - RDB$INDEX_SEGMENTS content DESCRIPTION: Check the correct content of RDB$INDEX_SEGMENTS in new database. +FBTEST: functional.basic.db.15 """ import pytest diff --git a/tests/functional/basic/db/test_16.py b/tests/functional/basic/db/test_16.py index 5157ca7e..f2215e8d 100644 --- a/tests/functional/basic/db/test_16.py +++ b/tests/functional/basic/db/test_16.py @@ -10,6 +10,7 @@ NOTES: Moved all BLOB fields at the end of output, suppress comparison of their IDs. Added query to select FIELDS list of table because main check does not use asterisk and we have to know if DDL of table will have any changes in future. +FBTEST: functional.basic.db.16 """ import pytest diff --git a/tests/functional/basic/db/test_17.py b/tests/functional/basic/db/test_17.py index d1dffc83..9ef299d4 100644 --- a/tests/functional/basic/db/test_17.py +++ b/tests/functional/basic/db/test_17.py @@ -4,6 +4,7 @@ ID: new-database-17 TITLE: New DB - RDB$LOG_FILES content DESCRIPTION: Check the correct content of RDB$LOG_FILES in new database. +FBTEST: functional.basic.db.17 """ import pytest diff --git a/tests/functional/basic/db/test_18.py b/tests/functional/basic/db/test_18.py index 2826e5aa..8cb7113b 100644 --- a/tests/functional/basic/db/test_18.py +++ b/tests/functional/basic/db/test_18.py @@ -4,6 +4,7 @@ ID: new-database-18 TITLE: New DB - RDB$DATABASE content DESCRIPTION: Check the correct content of RDB$DATABASE in new database. +FBTEST: functional.basic.db.18 """ import pytest diff --git a/tests/functional/basic/db/test_19.py b/tests/functional/basic/db/test_19.py index c5d87d2f..3476f536 100644 --- a/tests/functional/basic/db/test_19.py +++ b/tests/functional/basic/db/test_19.py @@ -4,6 +4,7 @@ ID: new-database-19 TITLE: New DB - RDB$PROCEDURE_PARAMETERS content DESCRIPTION: Check the correct content of RDB$PROCEDURE_PARAMETERS in new database. +FBTEST: functional.basic.db.19 """ import pytest diff --git a/tests/functional/basic/db/test_20.py b/tests/functional/basic/db/test_20.py index b6641987..45cae3dc 100644 --- a/tests/functional/basic/db/test_20.py +++ b/tests/functional/basic/db/test_20.py @@ -4,6 +4,7 @@ ID: new-database-20 TITLE: New DB - RDB$PROCEDURES content DESCRIPTION: Check the correct content of RDB$PROCEDURES in new database. +FBTEST: functional.basic.db.20 """ import pytest diff --git a/tests/functional/basic/db/test_21.py b/tests/functional/basic/db/test_21.py index 509c872f..fa060014 100644 --- a/tests/functional/basic/db/test_21.py +++ b/tests/functional/basic/db/test_21.py @@ -4,6 +4,7 @@ ID: new-database-21 TITLE: New DB - RDB$REF_CONSTRAINTS content DESCRIPTION: Check the correct content of RDB$REF_CONSTRAINTS in new database. +FBTEST: functional.basic.db.21 """ import pytest diff --git a/tests/functional/basic/db/test_22.py b/tests/functional/basic/db/test_22.py index 969a9c1f..7fc7d83e 100644 --- a/tests/functional/basic/db/test_22.py +++ b/tests/functional/basic/db/test_22.py @@ -4,6 +4,7 @@ ID: new-database-22 TITLE: New DB - RDB$DATABASE content DESCRIPTION: Check the correct content of RDB$DATABASE in new database. +FBTEST: functional.basic.db.22 """ import pytest diff --git a/tests/functional/basic/db/test_23.py b/tests/functional/basic/db/test_23.py index 88fba673..ff2a2005 100644 --- a/tests/functional/basic/db/test_23.py +++ b/tests/functional/basic/db/test_23.py @@ -12,6 +12,7 @@ NOTES: 2.2. Ignore values of IDs in lines like "trigger_name: RDB$TRIGGER_**". 3. Added query to select FIELDS list of table because main check does not use asterisk and we have to know if DDL of table will have any changes in future. +FBTEST: functional.basic.db.23 """ import pytest diff --git a/tests/functional/basic/db/test_24.py b/tests/functional/basic/db/test_24.py index 78aee1c0..58c354f1 100644 --- a/tests/functional/basic/db/test_24.py +++ b/tests/functional/basic/db/test_24.py @@ -4,6 +4,7 @@ ID: new-database-24 TITLE: New DB - RDB$RELATION_FIELDS content DESCRIPTION: Check the correct content of RDB$RELATION_FIELDS in new database. +FBTEST: functional.basic.db.24 """ import pytest diff --git a/tests/functional/basic/db/test_25.py b/tests/functional/basic/db/test_25.py index 1b2fa3ec..78400d7a 100644 --- a/tests/functional/basic/db/test_25.py +++ b/tests/functional/basic/db/test_25.py @@ -4,6 +4,7 @@ ID: new-database-25 TITLE: New DB - RDB$ROLES content DESCRIPTION: Check the correct content of RDB$ROLES in new database. +FBTEST: functional.basic.db.25 """ import pytest diff --git a/tests/functional/basic/db/test_26.py b/tests/functional/basic/db/test_26.py index bffa5d8c..8bb0ead4 100644 --- a/tests/functional/basic/db/test_26.py +++ b/tests/functional/basic/db/test_26.py @@ -8,6 +8,7 @@ NOTES: [28.10.2015] Updated expected_stdout, added block to subst-section in order to ignore differences in values like "SQL$****" of field RDB$SECURITY_CLASS. +FBTEST: functional.basic.db.26 """ import pytest diff --git a/tests/functional/basic/db/test_27.py b/tests/functional/basic/db/test_27.py index 9c5cec17..a7a4d226 100644 --- a/tests/functional/basic/db/test_27.py +++ b/tests/functional/basic/db/test_27.py @@ -4,6 +4,7 @@ ID: new-database-27 TITLE: New DB - RDB$TRANSACTIONS content DESCRIPTION: Check the correct content of RDB$TRANSACTIONS in new database. +FBTEST: functional.basic.db.27 """ import pytest diff --git a/tests/functional/basic/db/test_28.py b/tests/functional/basic/db/test_28.py index 2232823e..b23b5508 100644 --- a/tests/functional/basic/db/test_28.py +++ b/tests/functional/basic/db/test_28.py @@ -4,6 +4,7 @@ ID: new-database-28 TITLE: New DB - RDB$TRIGGER_MESSAGES content DESCRIPTION: Check the correct content of RDB$TRIGGER_MESSAGES in new database. +FBTEST: functional.basic.db.28 """ import pytest diff --git a/tests/functional/basic/db/test_29.py b/tests/functional/basic/db/test_29.py index 6ac4a939..90ab5bb7 100644 --- a/tests/functional/basic/db/test_29.py +++ b/tests/functional/basic/db/test_29.py @@ -4,6 +4,7 @@ ID: new-database-29 TITLE: New DB - RDB$TRIGGERS content DESCRIPTION: Check the correct content of RDB$TRIGGERS in new database. +FBTEST: functional.basic.db.29 """ import pytest diff --git a/tests/functional/basic/db/test_30.py b/tests/functional/basic/db/test_30.py index 9e696d6b..fdeec0f0 100644 --- a/tests/functional/basic/db/test_30.py +++ b/tests/functional/basic/db/test_30.py @@ -4,6 +4,7 @@ ID: new-database-30 TITLE: New DB - RDB$TYPES content DESCRIPTION: Check the correct content of RDB$TYPES in new database. +FBTEST: functional.basic.db.30 """ import pytest diff --git a/tests/functional/basic/db/test_31.py b/tests/functional/basic/db/test_31.py index 7818b390..c576fa25 100644 --- a/tests/functional/basic/db/test_31.py +++ b/tests/functional/basic/db/test_31.py @@ -8,6 +8,7 @@ NOTES: [30.10.2015] field rdb$grantor now contain NULLs in all records for new empty database (since build ~32134). Confirmed by Alex that this is OK 30.10.2015 15:28. +FBTEST: functional.basic.db.31 """ import pytest diff --git a/tests/functional/basic/db/test_32.py b/tests/functional/basic/db/test_32.py index be766d59..d9d6c19a 100644 --- a/tests/functional/basic/db/test_32.py +++ b/tests/functional/basic/db/test_32.py @@ -4,6 +4,7 @@ ID: new-database-32 TITLE: New DB - RDB$VIEW_RELATIONS content DESCRIPTION: Check the correct content of RDB$VIEW_RELATIONS in new database. +FBTEST: functional.basic.db.32 """ import pytest diff --git a/tests/functional/basic/isql/test_00.py b/tests/functional/basic/isql/test_00.py index a5098570..e9b2dac0 100644 --- a/tests/functional/basic/isql/test_00.py +++ b/tests/functional/basic/isql/test_00.py @@ -4,6 +4,7 @@ ID: isql-01 TITLE: Check output of "HELP" and "HELP SET" commands DESCRIPTION: NB: this test can also cover issue of CORE-2432 ("Missing SHOW COLLATIONs in HELP") +FBTEST: functional.basic.isql.00 """ import pytest diff --git a/tests/functional/basic/isql/test_01.py b/tests/functional/basic/isql/test_01.py index 229f0829..c1a13d87 100644 --- a/tests/functional/basic/isql/test_01.py +++ b/tests/functional/basic/isql/test_01.py @@ -4,6 +4,7 @@ ID: isql-02 TITLE: ISQL - SHOW DATABASE DESCRIPTION: Check for correct output of SHOW DATABASE on empty database. +FBTEST: functional.basic.isql.01 """ import pytest diff --git a/tests/functional/basic/isql/test_02.py b/tests/functional/basic/isql/test_02.py index 3cd58289..715379c2 100644 --- a/tests/functional/basic/isql/test_02.py +++ b/tests/functional/basic/isql/test_02.py @@ -4,6 +4,7 @@ ID: isql-03 TITLE: ISQL - SHOW SYSTEM TABLES DESCRIPTION: Check for correct output of "SHOW SYSTEM;" command on empty database. +FBTEST: functional.basic.isql.02 """ import pytest diff --git a/tests/functional/basic/isql/test_03.py b/tests/functional/basic/isql/test_03.py index 67bd2af4..32ac0ed9 100644 --- a/tests/functional/basic/isql/test_03.py +++ b/tests/functional/basic/isql/test_03.py @@ -6,6 +6,7 @@ ISSUE: 1383 TITLE: ISQL - SHOW SYSTEM parameters DESCRIPTION: Extend ISQL SHOW SYSTEM command to accept parameters TABLES, COLLATIONS and FUNCTIONS JIRA: CORE-978 +FBTEST: functional.basic.isql.03 """ import pytest diff --git a/tests/functional/basic/isql/test_05.py b/tests/functional/basic/isql/test_05.py index e12341b1..21f5dfde 100644 --- a/tests/functional/basic/isql/test_05.py +++ b/tests/functional/basic/isql/test_05.py @@ -13,6 +13,7 @@ DESCRIPTION: PS. Best place of this test in functional folder rather than in 'issues' one. JIRA: CORE-5382 +FBTEST: functional.basic.isql.05 """ import pytest diff --git a/tests/functional/basic/test_snapshot_files_check_list.py b/tests/functional/basic/test_snapshot_files_check_list.py index 2ad8c3b5..cf4d6299 100644 --- a/tests/functional/basic/test_snapshot_files_check_list.py +++ b/tests/functional/basic/test_snapshot_files_check_list.py @@ -11,6 +11,7 @@ DESCRIPTION: Idea about this test originates to CORE-6424 (missed employee.fdb in some intermediate build), but it seems that there were several other tickets about the same (missing some of necessary files). +FBTEST: functional.basic.build.snapshot_files_check_list """ from __future__ import annotations diff --git a/tests/functional/database/alter/test_01.py b/tests/functional/database/alter/test_01.py index 2ba68aac..1fdb86d3 100644 --- a/tests/functional/database/alter/test_01.py +++ b/tests/functional/database/alter/test_01.py @@ -4,6 +4,7 @@ ID: alter-database-01 TITLE: Alter database: adding a secondary file DESCRIPTION: Adding a secondary file to the database +FBTEST: functional.database.alter.01 """ import pytest diff --git a/tests/functional/database/alter/test_02.py b/tests/functional/database/alter/test_02.py index fb40c915..642a6ee7 100644 --- a/tests/functional/database/alter/test_02.py +++ b/tests/functional/database/alter/test_02.py @@ -4,6 +4,7 @@ ID: alter-database-02 TITLE: Alter database: adding secondary file with alternate keyword DESCRIPTION: Adding secondary file with alternate keyword for database. +FBTEST: functional.database.alter.02 """ import pytest diff --git a/tests/functional/database/alter/test_03.py b/tests/functional/database/alter/test_03.py index 44583e27..0d46c634 100644 --- a/tests/functional/database/alter/test_03.py +++ b/tests/functional/database/alter/test_03.py @@ -4,6 +4,7 @@ ID: alter-database-03 TITLE: Alter database: add file with name of this database or previously added files must fail DESCRIPTION: Add same file twice must fail +FBTEST: functional.database.alter.03 """ import pytest diff --git a/tests/functional/database/create/test_01.py b/tests/functional/database/create/test_01.py index 6299560e..92d2c6f0 100644 --- a/tests/functional/database/create/test_01.py +++ b/tests/functional/database/create/test_01.py @@ -6,6 +6,7 @@ TITLE: Create database: set names and default character set DESCRIPTION: Check ability to specify SET NAMES and DEFAULT CHARACTER SET within one statement. NOTES: name of client charset must be enclosed in apostrophes, i.e.: create database ... set names 'win1251' ... +FBTEST: functional.database.create.01 """ import pytest diff --git a/tests/functional/database/create/test_02.py b/tests/functional/database/create/test_02.py index ee6d36d0..7170abf1 100644 --- a/tests/functional/database/create/test_02.py +++ b/tests/functional/database/create/test_02.py @@ -4,6 +4,7 @@ ID: create-database-02 TITLE: Create database: non sysdba user DESCRIPTION: +FBTEST: functional.database.create.02 """ import pytest diff --git a/tests/functional/database/create/test_03.py b/tests/functional/database/create/test_03.py index 805c8f68..b218b9b7 100644 --- a/tests/functional/database/create/test_03.py +++ b/tests/functional/database/create/test_03.py @@ -4,6 +4,7 @@ ID: create-database-03 TITLE: Create database: with PAGE_SIZE=1024: check actual size of page in the created database. DESCRIPTION: +FBTEST: functional.database.create.03 """ import pytest diff --git a/tests/functional/database/create/test_04.py b/tests/functional/database/create/test_04.py index daddda57..2e8f89f2 100644 --- a/tests/functional/database/create/test_04.py +++ b/tests/functional/database/create/test_04.py @@ -4,6 +4,7 @@ ID: create-database-04 TITLE: Create database: with PAGE_SIZE=2048: check actual size of page in the created database. DESCRIPTION: +FBTEST: functional.database.create.04 """ import pytest diff --git a/tests/functional/database/create/test_05.py b/tests/functional/database/create/test_05.py index 42307a4a..25c987a8 100644 --- a/tests/functional/database/create/test_05.py +++ b/tests/functional/database/create/test_05.py @@ -4,6 +4,7 @@ ID: create-database-05 TITLE: Create database: with PAGE_SIZE=4096: check actual size of page in the created database. DESCRIPTION: +FBTEST: functional.database.create.05 """ import pytest diff --git a/tests/functional/database/create/test_06.py b/tests/functional/database/create/test_06.py index 32bb6a3c..59220275 100644 --- a/tests/functional/database/create/test_06.py +++ b/tests/functional/database/create/test_06.py @@ -4,6 +4,7 @@ ID: create-database-06 TITLE: Create database: with PAGE_SIZE=8192: check actual size of page in the created database. DESCRIPTION: +FBTEST: functional.database.create.06 """ import pytest diff --git a/tests/functional/database/create/test_07.py b/tests/functional/database/create/test_07.py index 113d6f4c..7781a6c3 100644 --- a/tests/functional/database/create/test_07.py +++ b/tests/functional/database/create/test_07.py @@ -4,6 +4,7 @@ ID: create-database-07 TITLE: Create database: with PAGE_SIZE=16384: check actual size of page in the created database. DESCRIPTION: +FBTEST: functional.database.create.07 """ import pytest diff --git a/tests/functional/database/create/test_08.py b/tests/functional/database/create/test_08.py index 2177d043..69bec8ac 100644 --- a/tests/functional/database/create/test_08.py +++ b/tests/functional/database/create/test_08.py @@ -4,6 +4,7 @@ ID: create-database-08 TITLE: Create database: Multi file DB DESCRIPTION: Create database with two files. +FBTEST: functional.database.create.08 """ import pytest diff --git a/tests/functional/database/create/test_09.py b/tests/functional/database/create/test_09.py index 320ac1d0..8d388b45 100644 --- a/tests/functional/database/create/test_09.py +++ b/tests/functional/database/create/test_09.py @@ -4,6 +4,7 @@ ID: create-database-09 TITLE: Create database: Multi file DB DESCRIPTION: Create database with four files. +FBTEST: functional.database.create.09 """ import pytest diff --git a/tests/functional/database/create/test_10.py b/tests/functional/database/create/test_10.py index d567f906..1d11e7c7 100644 --- a/tests/functional/database/create/test_10.py +++ b/tests/functional/database/create/test_10.py @@ -4,6 +4,7 @@ ID: create-database-10 TITLE: Create database: Multi file DB - starting DESCRIPTION: Database with four files. Additional files specified by STARTING AT. +FBTEST: functional.database.create.10 """ import pytest diff --git a/tests/functional/database/create/test_11.py b/tests/functional/database/create/test_11.py index a2aa3ab0..ccfa23e7 100644 --- a/tests/functional/database/create/test_11.py +++ b/tests/functional/database/create/test_11.py @@ -4,6 +4,7 @@ ID: create-database-11 TITLE: Create database: Default char set NONE DESCRIPTION: This test should be implemented for all char sets. +FBTEST: functional.database.create.11 """ import pytest diff --git a/tests/functional/datatypes/test_decfloat_binding_to_legacy.py b/tests/functional/datatypes/test_decfloat_binding_to_legacy.py index 8ffd3f1a..e4beab5c 100644 --- a/tests/functional/datatypes/test_decfloat_binding_to_legacy.py +++ b/tests/functional/datatypes/test_decfloat_binding_to_legacy.py @@ -29,6 +29,7 @@ DESCRIPTION: For this reason, special replacement will be done in 'substitution' section: we replace value of hours with '??' because it is no matter what's the time there, we have to ensure only the ability to work with such time using SET BIND clause. +FBTEST: functional.datatypes.decfloat_binding_to_legacy """ import pytest diff --git a/tests/functional/datatypes/test_decfloat_binding_to_other_types.py b/tests/functional/datatypes/test_decfloat_binding_to_other_types.py index fb1c4ef6..5bb841b6 100644 --- a/tests/functional/datatypes/test_decfloat_binding_to_other_types.py +++ b/tests/functional/datatypes/test_decfloat_binding_to_other_types.py @@ -29,6 +29,7 @@ NOTES: [01.07.2020] adjusted expected output ('subtype' values). Added SET BIND from decfloat to INT128. Removed unnecessary lines from output and added substitution section for result to be properly filtered. +FBTEST: functional.datatypes.decfloat_binding_to_other_types """ import pytest diff --git a/tests/functional/datatypes/test_decfloat_boundary_values.py b/tests/functional/datatypes/test_decfloat_boundary_values.py index 11f8c4ea..054f4c51 100644 --- a/tests/functional/datatypes/test_decfloat_boundary_values.py +++ b/tests/functional/datatypes/test_decfloat_boundary_values.py @@ -7,6 +7,7 @@ JIRA: CORE-5535 TITLE: Check BOUNDARY values that are defined for DECFLOAT datatype DESCRIPTION: See doc/sql.extensions/README.data_types +FBTEST: functional.datatypes.decfloat_boundary_values """ import pytest diff --git a/tests/functional/datatypes/test_decfloat_columns_handlng.py b/tests/functional/datatypes/test_decfloat_columns_handlng.py index 78257e8d..b15bc43a 100644 --- a/tests/functional/datatypes/test_decfloat_columns_handlng.py +++ b/tests/functional/datatypes/test_decfloat_columns_handlng.py @@ -7,6 +7,7 @@ JIRA: CORE-5535 TITLE: Check ability of misc. actions against table column for DECFLOAT datatype DESCRIPTION: See doc/sql.extensions/README.data_types +FBTEST: functional.datatypes.decfloat_columns_handlng """ import pytest diff --git a/tests/functional/datatypes/test_decfloat_ddl_indices.py b/tests/functional/datatypes/test_decfloat_ddl_indices.py index aac5148c..ee05e851 100644 --- a/tests/functional/datatypes/test_decfloat_ddl_indices.py +++ b/tests/functional/datatypes/test_decfloat_ddl_indices.py @@ -7,6 +7,7 @@ ISSUE: 5803 JIRA: CORE-5535 DESCRIPTION: See doc/sql.extensions/README.data_types +FBTEST: functional.datatypes.decfloat_ddl_indices """ import pytest diff --git a/tests/functional/datatypes/test_decfloat_df16_alignment.py b/tests/functional/datatypes/test_decfloat_df16_alignment.py index bbce5d95..7beebad2 100644 --- a/tests/functional/datatypes/test_decfloat_df16_alignment.py +++ b/tests/functional/datatypes/test_decfloat_df16_alignment.py @@ -11,6 +11,7 @@ DESCRIPTION: Test is based on letter to Alex, 02.05.2017, 9:38: For ISQL 'SET LIST ON' there was auxiliary ("wrong") space character between column name for decfloat(16) and its value comparing with decfloat(34). +FBTEST: functional.datatypes.decfloat_df16_alignment """ import pytest diff --git a/tests/functional/datatypes/test_decfloat_exceptions_trapping.py b/tests/functional/datatypes/test_decfloat_exceptions_trapping.py index ff92a686..ef81bb30 100644 --- a/tests/functional/datatypes/test_decfloat_exceptions_trapping.py +++ b/tests/functional/datatypes/test_decfloat_exceptions_trapping.py @@ -12,6 +12,7 @@ DESCRIPTION: exceptional conditions cause a trap. Valid traps are: Division_by_zero, Inexact, Invalid_operation, Overflow and Underflow. By default traps are set to: Division_by_zero, Invalid_operation, Overflow, Underflow. +FBTEST: functional.datatypes.decfloat_exceptions_trapping """ import pytest diff --git a/tests/functional/datatypes/test_decfloat_literal_interpr.py b/tests/functional/datatypes/test_decfloat_literal_interpr.py index 8d1e5b1e..ad21d3f0 100644 --- a/tests/functional/datatypes/test_decfloat_literal_interpr.py +++ b/tests/functional/datatypes/test_decfloat_literal_interpr.py @@ -12,6 +12,7 @@ DESCRIPTION: Currently only double precision form of literals is checked. Literals with value out bigint scope are not checked - waiting for reply from Alex, letter 24.05.2017 21:16 +FBTEST: functional.datatypes.decfloat_literal_interpr """ import pytest diff --git a/tests/functional/datatypes/test_decfloat_literal_length.py b/tests/functional/datatypes/test_decfloat_literal_length.py index 36b43c31..79acd1e8 100644 --- a/tests/functional/datatypes/test_decfloat_literal_length.py +++ b/tests/functional/datatypes/test_decfloat_literal_length.py @@ -9,6 +9,7 @@ DESCRIPTION: See doc/sql.extensions/README.data_types Although length of DECFLOAT(34) literal can exceed 6000 bytes (0.000<6000 zeros>00123) implementation limit exists - length of such literal should not exceed 1024 bytes. +FBTEST: functional.datatypes.decfloat_literal_length """ import pytest diff --git a/tests/functional/datatypes/test_decfloat_loose_accuracy.py b/tests/functional/datatypes/test_decfloat_loose_accuracy.py index 46adcba3..ba92956c 100644 --- a/tests/functional/datatypes/test_decfloat_loose_accuracy.py +++ b/tests/functional/datatypes/test_decfloat_loose_accuracy.py @@ -7,6 +7,7 @@ DESCRIPTION: Wide range of terms can lead to wrong result of sum. https://en.wikipedia.org/wiki/Decimal_floating_point https://en.wikipedia.org/wiki/Kahan_summation_algorithm +FBTEST: functional.datatypes.decfloat_loose_accuracy """ import pytest diff --git a/tests/functional/datatypes/test_decfloat_min_distinguish.py b/tests/functional/datatypes/test_decfloat_min_distinguish.py index cd7a0d35..ad72e55f 100644 --- a/tests/functional/datatypes/test_decfloat_min_distinguish.py +++ b/tests/functional/datatypes/test_decfloat_min_distinguish.py @@ -4,6 +4,7 @@ ID: decfloat.min-distinguish TITLE: List of all values starting from 1.0 divided by 2, until previous and current become equal DESCRIPTION: +FBTEST: functional.datatypes.decfloat_min_distinguish """ import pytest diff --git a/tests/functional/datatypes/test_decfloat_nan_and_infinity_comparison.py b/tests/functional/datatypes/test_decfloat_nan_and_infinity_comparison.py index 245725e9..408ed031 100644 --- a/tests/functional/datatypes/test_decfloat_nan_and_infinity_comparison.py +++ b/tests/functional/datatypes/test_decfloat_nan_and_infinity_comparison.py @@ -4,6 +4,7 @@ ID: decfloat.nan-and-infinity-comparison TITLE: DECFLOAT should not throw exceptions when +/-NaN, +/-sNaN and +/-Infinity is used in comparisons DESCRIPTION: +FBTEST: functional.datatypes.decfloat_nan_and_infinity_comparison """ import pytest diff --git a/tests/functional/datatypes/test_decfloat_parsing_scaled_integers_and_bigint_max_min.py b/tests/functional/datatypes/test_decfloat_parsing_scaled_integers_and_bigint_max_min.py index 9f611fd7..3b456fa6 100644 --- a/tests/functional/datatypes/test_decfloat_parsing_scaled_integers_and_bigint_max_min.py +++ b/tests/functional/datatypes/test_decfloat_parsing_scaled_integers_and_bigint_max_min.py @@ -20,6 +20,7 @@ NOTES: Found a problem with interpreting values 170141183460469231731687303715884105727 and -170141183460469231731687303715884105728 Sent letter to Alex (01.07.2020 13:55), waiting for fix. Check of bind DECFLOAT to INT128 was deferred. +FBTEST: functional.datatypes.decfloat_parsing_scaled_integers_and_bigint_max_min """ import pytest diff --git a/tests/functional/datatypes/test_decfloat_round_modes.py b/tests/functional/datatypes/test_decfloat_round_modes.py index 39c93a5b..d9230d4b 100644 --- a/tests/functional/datatypes/test_decfloat_round_modes.py +++ b/tests/functional/datatypes/test_decfloat_round_modes.py @@ -10,6 +10,7 @@ DESCRIPTION: Sample with results of diff. rounding modes: ibm.com/developerworks/ru/library/dm-0801chainani/ Sample for round(1608.90*5/100, 2): sql.ru/forum/actualutils.aspx?action=gotomsg&tid=729836&msg=8243077 +FBTEST: functional.datatypes.decfloat_round_modes """ import pytest diff --git a/tests/functional/datatypes/test_decfloat_scalar_functions.py b/tests/functional/datatypes/test_decfloat_scalar_functions.py index ce172646..5362167e 100644 --- a/tests/functional/datatypes/test_decfloat_scalar_functions.py +++ b/tests/functional/datatypes/test_decfloat_scalar_functions.py @@ -18,6 +18,7 @@ NOTES: [21.08.2020] put literal numeric values into a table with DECFLOAT table; replaced UNIONED-code with separate statements. Checked on 4.0.0.2173 +FBTEST: functional.datatypes.decfloat_scalar_functions """ import pytest diff --git a/tests/functional/datatypes/test_decfloat_single_bit_in_representation.py b/tests/functional/datatypes/test_decfloat_single_bit_in_representation.py index bd36018e..aa9e0a49 100644 --- a/tests/functional/datatypes/test_decfloat_single_bit_in_representation.py +++ b/tests/functional/datatypes/test_decfloat_single_bit_in_representation.py @@ -7,6 +7,7 @@ DESCRIPTION: Get minimal distinguish from zero value for DEFCFLOAT datatype using EXP() function. Check some trivial arithmetic results for this value and pair of other values which are closest to it. See also: https://en.wikipedia.org/wiki/Decimal_floating_point +FBTEST: functional.datatypes.decfloat_single_bit_in_representation """ import pytest diff --git a/tests/functional/datatypes/test_decfloat_special_functions.py b/tests/functional/datatypes/test_decfloat_special_functions.py index 01402f68..0419cf9e 100644 --- a/tests/functional/datatypes/test_decfloat_special_functions.py +++ b/tests/functional/datatypes/test_decfloat_special_functions.py @@ -8,6 +8,7 @@ TITLE: Test functions that are designed specially for DECFLOAT handling DESCRIPTION: See doc/sql.extensions/README.data_types COMPARE_DECFLOAT; NORMALIZE_DECFLOAT; QUANTIZE; TOTALORDER +FBTEST: functional.datatypes.decfloat_special_functions """ import pytest diff --git a/tests/functional/datatypes/test_decimal_declared_scale.py b/tests/functional/datatypes/test_decimal_declared_scale.py index b65e8103..8a23c767 100644 --- a/tests/functional/datatypes/test_decimal_declared_scale.py +++ b/tests/functional/datatypes/test_decimal_declared_scale.py @@ -4,6 +4,7 @@ ID: decimal-declared-scale TITLE: Dummy test DESCRIPTION: Samples are from #3912 and #5989. +FBTEST: functional.datatypes.decimal_declared_scale """ import pytest diff --git a/tests/functional/datatypes/test_dp_single_bit_in_representation.py b/tests/functional/datatypes/test_dp_single_bit_in_representation.py index 54a6d766..20c4c382 100644 --- a/tests/functional/datatypes/test_dp_single_bit_in_representation.py +++ b/tests/functional/datatypes/test_dp_single_bit_in_representation.py @@ -4,6 +4,7 @@ ID: dp-single-bit-in-representation TITLE: Check result of EXP() which can be represented only by one ("last") significant bit DESCRIPTION: +FBTEST: functional.datatypes.dp_single_bit_in_representation """ import pytest diff --git a/tests/functional/datatypes/test_int128_binary_operations.py b/tests/functional/datatypes/test_int128_binary_operations.py index 48a31ad4..376cabcc 100644 --- a/tests/functional/datatypes/test_int128_binary_operations.py +++ b/tests/functional/datatypes/test_int128_binary_operations.py @@ -8,6 +8,7 @@ TITLE: Basic test for binary functions against INT128 datatype DESCRIPTION: Test verifies https://github.com/FirebirdSQL/firebird/commit/137c3a96e51b8bc34cb74732687067e96c971226 (Postfix for #6583: enable support of int128 in bin_* family of functions). +FBTEST: functional.datatypes.int128_binary_operations """ import pytest diff --git a/tests/functional/datatypes/test_int128_math_functions.py b/tests/functional/datatypes/test_int128_math_functions.py index 209701ce..70e4b3d5 100644 --- a/tests/functional/datatypes/test_int128_math_functions.py +++ b/tests/functional/datatypes/test_int128_math_functions.py @@ -14,6 +14,7 @@ DESCRIPTION: Some expression still can not be evaluated and produce errors - they are commented (see "deferred" here). See notes in https://github.com/FirebirdSQL/firebird/issues/6585 +FBTEST: functional.datatypes.int128_math_functions """ import pytest diff --git a/tests/functional/dml/cte/test_01.py b/tests/functional/dml/cte/test_01.py index 6d14aee4..ec0b9565 100644 --- a/tests/functional/dml/cte/test_01.py +++ b/tests/functional/dml/cte/test_01.py @@ -1,32 +1,26 @@ #coding:utf-8 -# -# id: functional.dml.cte.01 -# title: test for Non-Recursive CTEs -# decription: -# --Rules for Non-Recursive CTEs : -# --Multiple table expressions can be defined in one query -# --Any clause legal in a SELECT specification is legal in table expressions -# --Table expressions can reference one another -# --References between expressions should not have loops -# --Table expressions can be used within any part of the main query or another table expression -# --The same table expression can be used more than once in the main query -# --Table expressions (as subqueries) can be used in INSERT, UPDATE and DELETE statements -# --Table expressions are legal in PSQL code -# --WITH statements can not be nested -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.dml.cte.cte_01 + +""" +ID: dml.cte-01 +TITLE: Non-Recursive CTEs +FBTEST: functional.dml.cte.01 +DESCRIPTION: + Rules for Non-Recursive CTEs : + - Multiple table expressions can be defined in one query + - Any clause legal in a SELECT specification is legal in table expressions + - Table expressions can reference one another + - References between expressions should not have loops + - Table expressions can be used within any part of the main query or another table expression + - The same table expression can be used more than once in the main query + - Table expressions (as subqueries) can be used in INSERT, UPDATE and DELETE statements + - Table expressions are legal in PSQL code + - WITH statements can not be nested +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """ +init_script = """ CREATE TABLE employee( id_employee INTEGER , prenom VARCHAR(20) ,id_department INTEGER,age INTEGER , PRIMARY KEY(id_employee)); CREATE TABLE department(id_department INTEGER, name VARCHAR(20)); @@ -45,9 +39,9 @@ INSERT INTO employee(id_employee, prenom,id_department,age) VALUES (9,'noemie',2 """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """WITH +test_script = """WITH repartition_by_age AS ( SELECT age/10 as trancheage , id_department, COUNT(1) AS nombre @@ -68,9 +62,9 @@ and quarentenaire.trancheage = 4 ; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ NAME JEUNE TRENTENAIRE QUANTENAIRE ==================== ===================== ===================== ===================== service compta 1 2 1 @@ -78,8 +72,7 @@ production 1 3 """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/dml/cte/test_02.py b/tests/functional/dml/cte/test_02.py index 99994877..f7893fa7 100644 --- a/tests/functional/dml/cte/test_02.py +++ b/tests/functional/dml/cte/test_02.py @@ -1,59 +1,54 @@ #coding:utf-8 -# -# id: functional.dml.cte.02 -# title: test for Recursive CTEs -# decription: Rules for Recursive CTEs -# A recursive CTE is self-referencing (has a reference to itself) -# A recursive CTE is a UNION of recursive and non-recursive members: -# At least one non-recursive member (anchor) must be present -# Non-recursive members are placed first in the UNION -# Recursive members are separated from anchor members and from one another with UNION ALL clauses, i.e., -# non-recursive member (anchor) -# UNION [ALL | DISTINCT] -# non-recursive member (anchor) -# UNION [ALL | DISTINCT] -# non-recursive member (anchor) -# UNION ALL -# recursive member -# UNION ALL -# recursive member -# -# References between CTEs should not have loops -# Aggregates (DISTINCT, GROUP BY, HAVING) and aggregate functions (SUM, COUNT, MAX etc) are not allowed in recursive members -# A recursive member can have only one reference to itself and only in a FROM clause -# A recursive reference cannot participate in an outer join -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.dml.cte.cte_02 - -import pytest -from firebird.qa import db_factory, isql_act, Action - -# version: 3.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """ CREATE TABLE product( id_product INTEGER , name VARCHAR(20) ,id_type_product INTEGER, PRIMARY KEY(id_product)); - CREATE TABLE type_product(id_type_product INTEGER, name VARCHAR(20),id_sub_type INTEGER); - INSERT INTO type_product(id_type_product,name,id_sub_type) values(1,'DVD',NULL); - INSERT INTO type_product(id_type_product,name,id_sub_type) values(2,'BOOK',NULL); - INSERT INTO type_product(id_type_product,name,id_sub_type) values(3,'FILM SF',1); - INSERT INTO type_product(id_type_product,name,id_sub_type) values(4,'FILM ACTION',1); - INSERT INTO type_product(id_type_product,name,id_sub_type) values(5,'FILM ROMANCE',1); - INSERT INTO product(id_product, name,id_type_product) VALUES (1,'Harry Potter 8',3 ); - INSERT INTO product(id_product, name,id_type_product) VALUES (2,'Total Recall',3 ); - INSERT INTO product(id_product, name,id_type_product) VALUES (3,'Kingdom of Heaven',3 ); - INSERT INTO product(id_product, name,id_type_product) VALUES (4,'Desperate Housewives',5 ); - INSERT INTO product(id_product, name,id_type_product) VALUES (5,'Reign over me',5 ); - INSERT INTO product(id_product, name,id_type_product) VALUES (6,'Prison Break',4 ); """ +ID: dml.cte-02 +TITLE: Recursive CTEs +FBTEST: functional.dml.cte.02 +DESCRIPTION: + Rules for Recursive CTEs + A recursive CTE is self-referencing (has a reference to itself) + A recursive CTE is a UNION of recursive and non-recursive members: + At least one non-recursive member (anchor) must be present + Non-recursive members are placed first in the UNION + Recursive members are separated from anchor members and from one another with UNION ALL clauses, i.e., + non-recursive member (anchor) + UNION [ALL | DISTINCT] + non-recursive member (anchor) + UNION [ALL | DISTINCT] + non-recursive member (anchor) + UNION ALL + recursive member + UNION ALL + recursive member -db_1 = db_factory(sql_dialect=3, init=init_script_1) + References between CTEs should not have loops + Aggregates (DISTINCT, GROUP BY, HAVING) and aggregate functions (SUM, COUNT, MAX etc) are not allowed in recursive members + A recursive member can have only one reference to itself and only in a FROM clause + A recursive reference cannot participate in an outer join +""" -test_script_1 = """WITH RECURSIVE +import pytest +from firebird.qa import * + +init_script = """ + CREATE TABLE product( id_product INTEGER , name VARCHAR(20) ,id_type_product INTEGER, PRIMARY KEY(id_product)); + CREATE TABLE type_product(id_type_product INTEGER, name VARCHAR(20),id_sub_type INTEGER); + INSERT INTO type_product(id_type_product,name,id_sub_type) values(1,'DVD',NULL); + INSERT INTO type_product(id_type_product,name,id_sub_type) values(2,'BOOK',NULL); + INSERT INTO type_product(id_type_product,name,id_sub_type) values(3,'FILM SF',1); + INSERT INTO type_product(id_type_product,name,id_sub_type) values(4,'FILM ACTION',1); + INSERT INTO type_product(id_type_product,name,id_sub_type) values(5,'FILM ROMANCE',1); + INSERT INTO product(id_product, name,id_type_product) VALUES (1,'Harry Potter 8',3 ); + INSERT INTO product(id_product, name,id_type_product) VALUES (2,'Total Recall',3 ); + INSERT INTO product(id_product, name,id_type_product) VALUES (3,'Kingdom of Heaven',3 ); + INSERT INTO product(id_product, name,id_type_product) VALUES (4,'Desperate Housewives',5 ); + INSERT INTO product(id_product, name,id_type_product) VALUES (5,'Reign over me',5 ); + INSERT INTO product(id_product, name,id_type_product) VALUES (6,'Prison Break',4 ); +""" + +db = db_factory(init=init_script) + +test_script = """WITH RECURSIVE TYPE_PRODUCT_RECUR (id_type_product,name,father) AS ( SELECT id_type_product ,'+ ' || name as name , id_type_product as father @@ -81,9 +76,9 @@ on C.ID_TYPE_PRODUCT = T.id_type_product; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ ID_TYPE_PRODUCT NAME COUNT_P =============== ====================== ===================== 1 + DVD 6 @@ -94,8 +89,7 @@ ID_TYPE_PRODUCT NAME COUNT_P """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/dml/delete/test_01.py b/tests/functional/dml/delete/test_01.py index 28e26be1..991ca594 100644 --- a/tests/functional/dml/delete/test_01.py +++ b/tests/functional/dml/delete/test_01.py @@ -1,34 +1,26 @@ #coding:utf-8 -# -# id: functional.dml.delete.01 -# title: DELETE -# decription: -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.dml.delete.delete_01 + +""" +ID: dml.delete-01 +FBTEST: functional.dml.delete.01 +TITLE: DELETE +DESCRIPTION: +""" import pytest from firebird.qa import db_factory, isql_act, Action -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE tb(id INT); +init_script = """CREATE TABLE tb(id INT); INSERT INTO tb VALUES(10); COMMIT;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """DELETE FROM tb; +test_script = """DELETE FROM tb; SELECT * FROM tb;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.execute() +act = isql_act('db', test_script) +@pytest.mark.version('>=3') +def test_1(act: Action): + act.execute() diff --git a/tests/functional/dml/delete/test_02.py b/tests/functional/dml/delete/test_02.py index 9b4cff69..28cbd8bf 100644 --- a/tests/functional/dml/delete/test_02.py +++ b/tests/functional/dml/delete/test_02.py @@ -1,43 +1,38 @@ #coding:utf-8 -# -# id: functional.dml.delete.02 -# title: DELETE with WHERE -# decription: -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.dml.delete.delete_02 + +""" +ID: dml.delete-02 +FBTEST: functional.dml.delete.02 +TITLE: DELETE with WHERE +DESCRIPTION: +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE tb(id INT); +init_script = """CREATE TABLE tb(id INT); INSERT INTO tb VALUES(10); INSERT INTO tb VALUES(10); INSERT INTO tb VALUES(20); COMMIT;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """DELETE FROM tb WHERE id>10; +test_script = """DELETE FROM tb WHERE id>10; SELECT * FROM tb;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ ID +expected_stdout = """ +ID ============ 10 -10""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +10 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/dml/delete/test_03.py b/tests/functional/dml/delete/test_03.py index 72f91731..b718765a 100644 --- a/tests/functional/dml/delete/test_03.py +++ b/tests/functional/dml/delete/test_03.py @@ -1,43 +1,38 @@ #coding:utf-8 -# -# id: functional.dml.delete.03 -# title: DELETE from VIEW -# decription: -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.dml.delete.delete_03 + +""" +ID: dml.delete-03 +FBTEST: functional.dml.delete.03 +TITLE: DELETE from VIEW +DESCRIPTION: +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE tb(id INT); +init_script = """CREATE TABLE tb(id INT); CREATE VIEW test (id) AS SELECT id FROM tb; INSERT INTO tb VALUES(10); INSERT INTO tb VALUES(10); INSERT INTO tb VALUES(null); COMMIT;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """DELETE FROM test WHERE id=10; +test_script = """DELETE FROM test WHERE id=10; SELECT * FROM tb;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ ID +expected_stdout = """ +ID ============ -""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout + +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/dml/insert/test_01.py b/tests/functional/dml/insert/test_01.py index 1bb6d157..329648ab 100644 --- a/tests/functional/dml/insert/test_01.py +++ b/tests/functional/dml/insert/test_01.py @@ -1,37 +1,31 @@ #coding:utf-8 -# -# id: functional.dml.insert.01 -# title: INSERT with Defaults -# decription: INSERT INTO -# DEFAULT VALUES -# [RETURNING ] -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.dml.insert.insert_01 + +""" +ID: dml.insert-01 +FBTEST: functional.dml.insert.01 +TITLE: INSERT with Defaults +DESCRIPTION: + INSERT INTO
+ DEFAULT VALUES + [RETURNING ] +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None -substitutions_1 = [] +db = db_factory(init="CREATE TABLE employee( prenom VARCHAR(20) default 'anonymous' , sex CHAR(1) default 'M' );") -init_script_1 = """CREATE TABLE employee( prenom VARCHAR(20) default 'anonymous' , sex CHAR(1) default 'M' );""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """insert into employee DEFAULT VALUES; +test_script = """insert into employee DEFAULT VALUES; commit; SELECT * FROM EMPLOYEE; insert into employee DEFAULT VALUES RETURNING prenom,sex; SELECT * FROM EMPLOYEE; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ PRENOM SEX ==================== ====== anonymous M @@ -48,9 +42,8 @@ anonymous M anonymous M """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/dml/join/test_01.py b/tests/functional/dml/join/test_01.py index e6cce9bf..5704ef19 100644 --- a/tests/functional/dml/join/test_01.py +++ b/tests/functional/dml/join/test_01.py @@ -1,46 +1,39 @@ #coding:utf-8 -# -# id: functional.dml.join.01 -# title: NAMED COLUMNS join -# decription: ::= -#
JOIN
-# USING ( ) -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.dml.join.join_01 + +""" +ID: dml.join-01 +FBTEST: functional.dml.join.01 +TITLE: NAMED COLUMNS join +DESCRIPTION: + ::= +
JOIN
+ USING ( ) +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE employee( id_employee INTEGER , prenom VARCHAR(20) ,id_department INTEGER, PRIMARY KEY(id_employee)); +init_script = """ +CREATE TABLE employee( id_employee INTEGER , prenom VARCHAR(20) ,id_department INTEGER, PRIMARY KEY(id_employee)); CREATE TABLE department(id_department INTEGER, name VARCHAR(20)); INSERT INTO department(id_department, name) values(1,'somme'); INSERT INTO department(id_department, name) values(2,'pas de calais'); INSERT INTO employee(id_employee, prenom,id_department) VALUES (1,'benoit',1 ); INSERT INTO employee(id_employee, prenom,id_department) VALUES (2,'tom',2 );""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """select employee.prenom , department.name from employee join department using (id_department);""" +act = isql_act('db', "select employee.prenom, department.name from employee join department using (id_department);") -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ +expected_stdout = """ PRENOM NAME ==================== ==================== benoit somme tom pas de calais """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/dml/join/test_02.py b/tests/functional/dml/join/test_02.py index 8214c8b3..8b83e7c6 100644 --- a/tests/functional/dml/join/test_02.py +++ b/tests/functional/dml/join/test_02.py @@ -1,45 +1,38 @@ #coding:utf-8 -# -# id: functional.dml.join.02 -# title: NATURAL join -# decription: ::= -#
NATURAL JOIN
-# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.dml.join.join_02 + +""" +ID: dml.join-02 +FBTEST: functional.dml.join.02 +TITLE: NATURAL join +DESCRIPTION: + ::= +
NATURAL JOIN
+""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE employee( id_employee INTEGER , prenom VARCHAR(20) ,id_department INTEGER, PRIMARY KEY(id_employee)); +init_script = """ +CREATE TABLE employee( id_employee INTEGER , prenom VARCHAR(20) ,id_department INTEGER, PRIMARY KEY(id_employee)); CREATE TABLE department(id_department INTEGER, name VARCHAR(20)); INSERT INTO department(id_department, name) values(1,'somme'); INSERT INTO department(id_department, name) values(2,'pas de calais'); INSERT INTO employee(id_employee, prenom,id_department) VALUES (1,'benoit',1 ); INSERT INTO employee(id_employee, prenom,id_department) VALUES (2,'tom',2 );""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """select employee.prenom , department.name from employee natural join department;""" +act = isql_act('db', "select employee.prenom , department.name from employee natural join department;") -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ +expected_stdout = """ PRENOM NAME ==================== ==================== benoit somme tom pas de calais """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/dml/merge/test_01.py b/tests/functional/dml/merge/test_01.py index 689fe57a..b2a25f08 100644 --- a/tests/functional/dml/merge/test_01.py +++ b/tests/functional/dml/merge/test_01.py @@ -1,22 +1,18 @@ #coding:utf-8 -# -# id: functional.dml.merge.01 -# title: Merge -# decription: -# tracker_id: CORE-815 -# min_versions: [] -# versions: 2.1 -# qmid: functional.dml.merge.merge_01 + +""" +ID: dml.merge-01 +FBTEST: functional.dml.merge.01 +ISSUE: 1201 +JIRA: CORE-815 +TITLE: MERGE statement +DESCRIPTION: +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE T1 (ID integer, NAME char(10), PRIMARY KEY(id)); +init_script = """CREATE TABLE T1 (ID integer, NAME char(10), PRIMARY KEY(id)); CREATE TABLE T2 ( ID integer, NAME char(10), PRIMARY KEY(id)); COMMIT; INSERT INTO T1 (ID,NAME) VALUES (1,'1NOMT1'); @@ -28,9 +24,9 @@ COMMIT; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """MERGE +test_script = """MERGE INTO T1 USING (SELECT * FROM T2 WHERE id > 1) cd ON (T1.id = cd.id) @@ -41,11 +37,12 @@ test_script_1 = """MERGE INSERT (id, name) VALUES (cd.id, cd.name); COMMIT; -SELECT ID, NAME FROM T1;""" +SELECT ID, NAME FROM T1; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ ID NAME ============ ========== 1 1NOMT1 @@ -53,9 +50,8 @@ expected_stdout_1 = """ 3 3NOMT2 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/dml/merge/test_02.py b/tests/functional/dml/merge/test_02.py index 7fb5020a..74cdbf1f 100644 --- a/tests/functional/dml/merge/test_02.py +++ b/tests/functional/dml/merge/test_02.py @@ -1,32 +1,24 @@ #coding:utf-8 -# -# id: functional.dml.merge.02 -# title: merge STATEMENT can have only one RETURNING which must be after all WHEN sub-statements. -# decription: -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: + +""" +ID: dml.merge-02 +FBTEST: functional.dml.merge.02 +TITLE: MERGE statement can have only one RETURNING which must be after all WHEN sub-statements +DESCRIPTION: +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [('-Token unknown .*', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; recreate table ta(id int primary key, x int, y int); recreate table tb(id int primary key, x int, y int); commit; - + insert into ta(id, x, y) values(1, 10, 100); insert into tb(id, x, y) values(1, 10, 100); commit; @@ -51,14 +43,15 @@ test_script_1 = """ ; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('-Token unknown .*', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ DELETED_ID 1 DELETED_X 10 DELETED_Y 100 """ -expected_stderr_1 = """ + +expected_stderr = """ Statement failed, SQLSTATE = 42000 Dynamic SQL Error -SQL error code = -104 @@ -67,11 +60,9 @@ expected_stderr_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/dml/merge/test_03.py b/tests/functional/dml/merge/test_03.py index 93a36fb9..1e1875b4 100644 --- a/tests/functional/dml/merge/test_03.py +++ b/tests/functional/dml/merge/test_03.py @@ -1,28 +1,18 @@ #coding:utf-8 -# -# id: functional.dml.merge.03 -# title: MERGE ... RETURNING must refer either ALIAS of the table (if it is defined) or context variables OLD and NEW -# decription: -# Checked on 4.0.0.2240 -# -# tracker_id: -# min_versions: [] -# versions: 4.0 -# qmid: + +""" +ID: dml.merge-03 +FBTEST: functional.dml.merge.03 +TITLE: MERGE ... RETURNING must refer either ALIAS of the table (if it is defined) or context variables OLD and NEW +DESCRIPTION: +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('-At line .*', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; recreate table test_a(id int primary key, x int); @@ -41,7 +31,7 @@ test_script_1 = """ delete returning test_b.id, test_b.x ; - rollback; + rollback; -- [ 2 ] must PASS: merge into test_b t @@ -64,9 +54,9 @@ test_script_1 = """ rollback; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('-At line .*', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ OLD_ID 1 OLD_T_X 100 @@ -75,7 +65,8 @@ expected_stdout_1 = """ NEW_ID -2 NEW_X -101 """ -expected_stderr_1 = """ + +expected_stderr = """ Statement failed, SQLSTATE = 42S22 Dynamic SQL Error -SQL error code = -206 @@ -84,11 +75,9 @@ expected_stderr_1 = """ """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/dml/update_or_insert/test_01.py b/tests/functional/dml/update_or_insert/test_01.py index 49599d8b..156e5610 100644 --- a/tests/functional/dml/update_or_insert/test_01.py +++ b/tests/functional/dml/update_or_insert/test_01.py @@ -1,33 +1,25 @@ #coding:utf-8 -# -# id: functional.dml.update_or_insert.01 -# title: UPDATE OR INSERT -# decription: Simple UPDATE OR INSERT -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.dml.update_or_insert.update_or_insert_01 + +""" +ID: dml.update-or-insert-01 +FBTEST: functional.dml.update_or_insert.01 +TITLE: UPDATE OR INSERT +DESCRIPTION: Simple UPDATE OR INSERT +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory(init="CREATE TABLE TMPTEST( id INTEGER , name VARCHAR(20) , PRIMARY KEY(id));") -substitutions_1 = [] - -init_script_1 = """CREATE TABLE TMPTEST( id INTEGER , name VARCHAR(20) , PRIMARY KEY(id));""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """UPDATE OR INSERT INTO TMPTEST(id, name) VALUES (1,'bob' ); +test_script = """UPDATE OR INSERT INTO TMPTEST(id, name) VALUES (1,'bob' ); select name from TMPTEST where id =1; UPDATE OR INSERT INTO TMPTEST(id, name) VALUES (1,'ivan' ); select name from TMPTEST where id =1;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ NAME ==================== bob @@ -38,9 +30,8 @@ NAME ivan """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/dml/update_or_insert/test_02.py b/tests/functional/dml/update_or_insert/test_02.py index 5338476b..c443b768 100644 --- a/tests/functional/dml/update_or_insert/test_02.py +++ b/tests/functional/dml/update_or_insert/test_02.py @@ -1,52 +1,41 @@ #coding:utf-8 -# -# id: functional.dml.update_or_insert.02 -# title: UPDATE OR INSERT -# decription: WITH RETURNING Clause -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.dml.update_or_insert.update_or_insert_02 + +""" +ID: dml.update-or-insert-02 +FBTEST: functional.dml.update_or_insert.02 +TITLE: UPDATE OR INSERT +DESCRIPTION: With RETURNING clause +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE TMPTEST( id INTEGER , name VARCHAR(20) , PRIMARY KEY(id));""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SET TERM ^ ; +db = db_factory(init="CREATE TABLE TMPTEST( id INTEGER , name VARCHAR(20) , PRIMARY KEY(id));") +test_script = """ +SET TERM ^ ; EXECUTE BLOCK RETURNS (V integer) AS - BEGIN UPDATE OR INSERT INTO TMPTEST(id, name) VALUES (1,'ivan' ) RETURNING id INTO :V; SUSPEND; END^ +SET TERM ; ^ +""" +act = isql_act('db', test_script) -SET TERM ; ^""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ +expected_stdout = """ V ============ 1 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/dml/update_or_insert/test_03.py b/tests/functional/dml/update_or_insert/test_03.py index cf8a65c8..9925b38b 100644 --- a/tests/functional/dml/update_or_insert/test_03.py +++ b/tests/functional/dml/update_or_insert/test_03.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.dml.update_or_insert.03 -# title: UPDATE OR INSERT -# decription: MATCHING Clause -# tracker_id: -# min_versions: [] -# versions: 2.5.0 -# qmid: functional.dml.update_or_insert.update_or_insert_03 + +""" +ID: dml.update-or-insert-03 +FBTEST: functional.dml.update_or_insert.03 +TITLE: UPDATE OR INSERT +DESCRIPTION: MATCHING clause +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5.0 -# resources: None +db = db_factory(init="CREATE TABLE TMPTEST_NOKEY ( id INTEGER , name VARCHAR(20));") -substitutions_1 = [] - -init_script_1 = """CREATE TABLE TMPTEST_NOKEY ( id INTEGER , name VARCHAR(20));""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """UPDATE OR INSERT INTO TMPTEST_NOKEY(id, name) VALUES (1,'ivan' ) +test_script = """UPDATE OR INSERT INTO TMPTEST_NOKEY(id, name) VALUES (1,'ivan' ) MATCHING (id); select name from TMPTEST_NOKEY where id =1; @@ -32,9 +24,9 @@ select name from TMPTEST_NOKEY where id =1; UPDATE OR INSERT INTO TMPTEST_NOKEY(id, name) VALUES (1,'ivan' );""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ NAME ==================== ivan @@ -44,16 +36,15 @@ NAME ==================== bob """ -expected_stderr_1 = """Statement failed, SQLSTATE = 22000 + +expected_stderr = """Statement failed, SQLSTATE = 22000 Dynamic SQL Error -Primary key required on table TMPTEST_NOKEY""" -@pytest.mark.version('>=2.5.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/domain/alter/test_01.py b/tests/functional/domain/alter/test_01.py index 72784f1d..2b1d0351 100644 --- a/tests/functional/domain/alter/test_01.py +++ b/tests/functional/domain/alter/test_01.py @@ -1,28 +1,22 @@ #coding:utf-8 -# -# id: functional.domain.alter.01 -# title: ALTER DOMAIN - SET DEFAULT -# decription: -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.domain.alter.alter_domain_01 + +""" +ID: domain.alter-01 +FBTEST: functional.domain.alter.01 +TITLE: ALTER DOMAIN - SET DEFAULT +DESCRIPTION: +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None - -substitutions_1 = [('RDB\\$DEFAULT_SOURCE.*', '')] - -init_script_1 = """ +init_script = """ create domain test varchar(63); """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ +test_script = """ alter domain test set default 'test string'; commit; set list on; @@ -31,17 +25,16 @@ test_script_1 = """ from rdb$fields where rdb$field_name=upper('test'); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('RDB\\$DEFAULT_SOURCE.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ RDB$FIELD_NAME TEST RDB$DEFAULT_SOURCE 2:1e1 default 'test string' """ -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/alter/test_02.py b/tests/functional/domain/alter/test_02.py index f4833e96..d2240c5e 100644 --- a/tests/functional/domain/alter/test_02.py +++ b/tests/functional/domain/alter/test_02.py @@ -1,29 +1,23 @@ #coding:utf-8 -# -# id: functional.domain.alter.02 -# title: ALTER DOMAIN - DROP DEFAULT -# decription: -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.domain.alter.alter_domain_02 + +""" +ID: domain.alter-02 +FBTEST: functional.domain.alter.02 +TITLE: ALTER DOMAIN - DROP DEFAULT +DESCRIPTION: +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """ +init_script = """ create domain test varchar(63) default 'test string'; commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ +test_script = """ alter domain test drop default; commit; set list on; @@ -32,16 +26,15 @@ test_script_1 = """ where rdb$field_name = upper('test'); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ RDB$FIELD_NAME TEST RDB$DEFAULT_SOURCE """ -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/alter/test_03.py b/tests/functional/domain/alter/test_03.py index da599c94..65fbd4bb 100644 --- a/tests/functional/domain/alter/test_03.py +++ b/tests/functional/domain/alter/test_03.py @@ -1,46 +1,39 @@ #coding:utf-8 -# -# id: functional.domain.alter.03 -# title: ALTER DOMAIN - ADD CONSTRAINT -# decription: -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.domain.alter.alter_domain_03 + +""" +ID: domain.alter-03 +FBTEST: functional.domain.alter.03 +TITLE: ALTER DOMAIN - ADD CONSTRAINT +DESCRIPTION: +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None - -substitutions_1 = [('RDB\\$VALIDATION_SOURCE.*', '')] - -init_script_1 = """ +init_script = """ create domain test varchar(63); commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ +test_script = """ alter domain test add constraint check (value like 'te%'); commit; set list on; set blob all; - select rdb$field_name, rdb$validation_source from rdb$fields where rdb$field_name=upper('test'); + select rdb$field_name, rdb$validation_source from rdb$fields where rdb$field_name=upper('test'); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('RDB\\$VALIDATION_SOURCE.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ RDB$FIELD_NAME TEST check (value like 'te%') """ -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/alter/test_04.py b/tests/functional/domain/alter/test_04.py index 47a4c5ba..c0d54e3b 100644 --- a/tests/functional/domain/alter/test_04.py +++ b/tests/functional/domain/alter/test_04.py @@ -1,29 +1,23 @@ #coding:utf-8 -# -# id: functional.domain.alter.04 -# title: ALTER DOMAIN - DROP CONSTRAINT -# decription: -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.domain.alter.alter_domain_04 + +""" +ID: domain.alter-04 +FBTEST: functional.domain.alter.04 +TITLE: ALTER DOMAIN - DROP CONSTRAINT +DESCRIPTION: +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """ +init_script = """ create domain test varchar(63) check(value like 'te%'); commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ +test_script = """ set list on; alter domain test drop constraint; commit; @@ -32,16 +26,15 @@ test_script_1 = """ where rdb$field_name = upper('test'); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ RDB$FIELD_NAME TEST RDB$VALIDATION_SOURCE """ -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/alter/test_05.py b/tests/functional/domain/alter/test_05.py index 0ea71cc0..6f4b4899 100644 --- a/tests/functional/domain/alter/test_05.py +++ b/tests/functional/domain/alter/test_05.py @@ -1,38 +1,27 @@ #coding:utf-8 -# -# id: functional.domain.alter.05 -# title: ALTER DOMAIN - Alter domain that doesn't exists -# decription: -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.domain.alter.alter_domain_05 + +""" +ID: domain.alter-05 +FBTEST: functional.domain.alter.05 +TITLE: ALTER DOMAIN - Alter domain that doesn't exists +DESCRIPTION: +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(init="CREATE DOMAIN test VARCHAR(63);") -substitutions_1 = [] +act = isql_act('db', "ALTER DOMAIN notexists DROP CONSTRAINT;") -init_script_1 = """CREATE DOMAIN test VARCHAR(63);""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ALTER DOMAIN notexists DROP CONSTRAINT;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stderr_1 = """Statement failed, SQLSTATE = 42000 +expected_stderr = """Statement failed, SQLSTATE = 42000 unsuccessful metadata update -ALTER DOMAIN NOTEXISTS failed -Domain not found """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/domain/create/test_01.py b/tests/functional/domain/create/test_01.py index 130595a8..b7789020 100644 --- a/tests/functional/domain/create/test_01.py +++ b/tests/functional/domain/create/test_01.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.01 -# title: CREATE DOMAIN - SMALLINT -# decription: Simple domain creation based SMALLINT datatype. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_01 + +""" +ID: domain.create-01 +FBTEST: functional.domain.create.01 +TITLE: CREATE DOMAIN - SMALLINT +DESCRIPTION: Simple domain creation based SMALLINT datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test SMALLINT; +test_script = """CREATE DOMAIN test SMALLINT; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST SMALLINT Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST SMALLINT Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_02.py b/tests/functional/domain/create/test_02.py index 4ae19cad..08b92f0e 100644 --- a/tests/functional/domain/create/test_02.py +++ b/tests/functional/domain/create/test_02.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.02 -# title: Simple domain creation based INTEGER datatype. -# decription: Simple domain creation based INTEGER datatype. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_02 + +""" +ID: domain.create-02 +FBTEST: functional.domain.create.02 +TITLE: CREATE DOMAIN - INTEGER +DESCRIPTION: Simple domain creation based INTEGER datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test INTEGER; +test_script = """CREATE DOMAIN test INTEGER; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST INTEGER Nullable""" +expected_stdout = """TEST INTEGER Nullable""" @pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_03.py b/tests/functional/domain/create/test_03.py index ded4e7ce..46a83924 100644 --- a/tests/functional/domain/create/test_03.py +++ b/tests/functional/domain/create/test_03.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.03 -# title: CREATE DOMAIN - INT -# decription: Simple domain creation based INT datatype. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_03 + +""" +ID: domain.create-03 +FBTEST: functional.domain.create.03 +TITLE: CREATE DOMAIN - INT +DESCRIPTION: Simple domain creation based INT datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test INT; +test_script = """CREATE DOMAIN test INT; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST INTEGER Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST INTEGER Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_04.py b/tests/functional/domain/create/test_04.py index 492d5436..5566c759 100644 --- a/tests/functional/domain/create/test_04.py +++ b/tests/functional/domain/create/test_04.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.04 -# title: CREATE DOMAIN - FLOAT -# decription: Simple domain creation based FLOAT datatype. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_04 + +""" +ID: domain.create-04 +FBTEST: functional.domain.create.04 +TITLE: CREATE DOMAIN - FLOAT +DESCRIPTION: Simple domain creation based FLOAT datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test FLOAT; +test_script = """CREATE DOMAIN test FLOAT; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST FLOAT Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST FLOAT Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_05.py b/tests/functional/domain/create/test_05.py index 69f6210b..cfe128a2 100644 --- a/tests/functional/domain/create/test_05.py +++ b/tests/functional/domain/create/test_05.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.05 -# title: CREATE DOMAIN - DOUBLE PRECISION -# decription: Simple domain creation based DOUBLE PRECISION datatype. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_05 + +""" +ID: domain.create-05 +FBTEST: functional.domain.create.05 +TITLE: CREATE DOMAIN - DOUBLE PRECISION +DESCRIPTION: Simple domain creation based DOUBLE PRECISION datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test DOUBLE PRECISION; +test_script = """CREATE DOMAIN test DOUBLE PRECISION; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST DOUBLE PRECISION Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST DOUBLE PRECISION Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_06.py b/tests/functional/domain/create/test_06.py index 15223789..a4fa02b6 100644 --- a/tests/functional/domain/create/test_06.py +++ b/tests/functional/domain/create/test_06.py @@ -1,36 +1,27 @@ #coding:utf-8 -# -# id: functional.domain.create.06 -# title: CREATE DOMAIN - DOUBLE PRECISION - ARRAY -# decription: Array domain creation based DOUBLE PRECISION datatype. -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.domain.create.create_domain_06 + +""" +ID: domain.create-06 +FBTEST: functional.domain.create.06 +TITLE: CREATE DOMAIN - DOUBLE PRECISION - ARRAY +DESCRIPTION: Array domain creation based DOUBLE PRECISION datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test DOUBLE PRECISION[7]; +test_script = """CREATE DOMAIN test DOUBLE PRECISION[7]; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST ARRAY OF [7] +expected_stdout = """TEST ARRAY OF [7] DOUBLE PRECISION Nullable""" -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_07.py b/tests/functional/domain/create/test_07.py index 5301a831..7894b3f7 100644 --- a/tests/functional/domain/create/test_07.py +++ b/tests/functional/domain/create/test_07.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.07 -# title: CREATE DOMAIN - DATE -# decription: Simple domain creation based DATE datatype. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_07 + +""" +ID: domain.create-07 +FBTEST: functional.domain.create.07 +TITLE: CREATE DOMAIN - DATE +DESCRIPTION: Simple domain creation based DATE datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test DATE; +test_script = """CREATE DOMAIN test DATE; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST DATE Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST DATE Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_08.py b/tests/functional/domain/create/test_08.py index 36712de4..706a753f 100644 --- a/tests/functional/domain/create/test_08.py +++ b/tests/functional/domain/create/test_08.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.08 -# title: CREATE DOMAIN - TIME -# decription: Simple domain creation based TIME datatype. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_08 + +""" +ID: domain.create-08 +FBTEST: functional.domain.create.08 +TITLE: CREATE DOMAIN - TIME +DESCRIPTION: Simple domain creation based TIME datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test TIME; +test_script = """CREATE DOMAIN test TIME; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST TIME Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST TIME Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_09.py b/tests/functional/domain/create/test_09.py index 24616d27..eeac7ccc 100644 --- a/tests/functional/domain/create/test_09.py +++ b/tests/functional/domain/create/test_09.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.09 -# title: CREATE DOMAIN - TIMESTAMP -# decription: Simple domain creation based TIMESTAMP datatype. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_09 + +""" +ID: domain.create-09 +FBTEST: functional.domain.create.09 +TITLE: CREATE DOMAIN - TIMESTAMP +DESCRIPTION: Simple domain creation based TIMESTAMP datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test TIMESTAMP; +test_script = """CREATE DOMAIN test TIMESTAMP; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST TIMESTAMP Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST TIMESTAMP Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_10.py b/tests/functional/domain/create/test_10.py index 1c5bd74b..bdbdcd10 100644 --- a/tests/functional/domain/create/test_10.py +++ b/tests/functional/domain/create/test_10.py @@ -1,36 +1,27 @@ #coding:utf-8 -# -# id: functional.domain.create.10 -# title: CREATE DOMAIN - TIMESTAMP ARRAY -# decription: Array domain creation based TIMESTAMP datatype. -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.domain.create.create_domain_10 + +""" +ID: domain.create-10 +FBTEST: functional.domain.create.10 +TITLE: CREATE DOMAIN - TIMESTAMP ARRAY +DESCRIPTION: Array domain creation based TIMESTAMP datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test TIMESTAMP [1024]; +test_script = """CREATE DOMAIN test TIMESTAMP [1024]; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST ARRAY OF [1024] +expected_stdout = """TEST ARRAY OF [1024] TIMESTAMP Nullable""" -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_11.py b/tests/functional/domain/create/test_11.py index 70ae43bb..01e0b4e4 100644 --- a/tests/functional/domain/create/test_11.py +++ b/tests/functional/domain/create/test_11.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.11 -# title: CREATE DOMAIN - DECIMAL -# decription: Simple domain creation based DECIMAL datatype. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_11 + +""" +ID: domain.create-11 +FBTEST: functional.domain.create.11 +TITLE: CREATE DOMAIN - DECIMAL +DESCRIPTION: Simple domain creation based DECIMAL datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test DECIMAL(18,4); +test_script = """CREATE DOMAIN test DECIMAL(18,4); SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST DECIMAL(18, 4) Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST DECIMAL(18, 4) Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_12.py b/tests/functional/domain/create/test_12.py index 6b67ec1a..b45d6980 100644 --- a/tests/functional/domain/create/test_12.py +++ b/tests/functional/domain/create/test_12.py @@ -1,36 +1,27 @@ #coding:utf-8 -# -# id: functional.domain.create.12 -# title: CREATE DOMAIN - DECIMAL ARRAY -# decription: Array domain creation based DECIMAL datatype. -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.domain.create.create_domain_12 + +""" +ID: domain.create-12 +FBTEST: functional.domain.create.12 +TITLE: CREATE DOMAIN - DECIMAL ARRAY +DESCRIPTION: Array domain creation based DECIMAL datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test DECIMAL(18,18)[32768]; +test_script = """CREATE DOMAIN test DECIMAL(18,18)[32768]; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST ARRAY OF [32768] +expected_stdout = """TEST ARRAY OF [32768] DECIMAL(18, 18) Nullable""" -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_13.py b/tests/functional/domain/create/test_13.py index 127f1f06..1fc46f1f 100644 --- a/tests/functional/domain/create/test_13.py +++ b/tests/functional/domain/create/test_13.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.13 -# title: CREATE DOMAIN - NUMERIC -# decription: Simple domain creation based on NUMERIC datatype. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_13 + +""" +ID: domain.create-13 +FBTEST: functional.domain.create.13 +TITLE: CREATE DOMAIN - NUMERIC +DESCRIPTION: Simple domain creation based NUMERIC datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test NUMERIC(18,18); +test_script = """CREATE DOMAIN test NUMERIC(18,18); SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST NUMERIC(18, 18) Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST NUMERIC(18, 18) Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_14.py b/tests/functional/domain/create/test_14.py index cead26b5..647410b1 100644 --- a/tests/functional/domain/create/test_14.py +++ b/tests/functional/domain/create/test_14.py @@ -1,36 +1,27 @@ #coding:utf-8 -# -# id: functional.domain.create.14 -# title: CREATE DOMAIN - NUMERIC ARRAY -# decription: Array domain creation based on NUMERIC datatype. -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.domain.create.create_domain_14 + +""" +ID: domain.create-14 +FBTEST: functional.domain.create.14 +TITLE: CREATE DOMAIN - NUMERIC ARRAY +DESCRIPTION: Array domain creation based on NUMERIC datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test NUMERIC(18,18)[32768]; +test_script = """CREATE DOMAIN test NUMERIC(18,18)[32768]; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST ARRAY OF [32768] +expected_stdout = """TEST ARRAY OF [32768] NUMERIC(18, 18) Nullable""" -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_15.py b/tests/functional/domain/create/test_15.py index 06cf62fb..e99a44c1 100644 --- a/tests/functional/domain/create/test_15.py +++ b/tests/functional/domain/create/test_15.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.15 -# title: CREATE DOMAIN - CHAR -# decription: Simple domain creation based CHAR datatype. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_15 + +""" +ID: domain.create-15 +FBTEST: functional.domain.create.15 +TITLE: CREATE DOMAIN - CHAR +DESCRIPTION: Simple domain creation based CHAR datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test CHAR(300); +test_script = """CREATE DOMAIN test CHAR(300); SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST CHAR(300) Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST CHAR(300) Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_16.py b/tests/functional/domain/create/test_16.py index 9dc6ff89..9121e1c4 100644 --- a/tests/functional/domain/create/test_16.py +++ b/tests/functional/domain/create/test_16.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.16 -# title: CREATE DOMAIN - CHARACTER -# decription: Simple domain creation based CHARACTER datatype. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_16 + +""" +ID: domain.create-16 +FBTEST: functional.domain.create.16 +TITLE: CREATE DOMAIN - CHARACTER +DESCRIPTION: Simple domain creation based CHARACTER datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test CHARACTER(32767); +test_script = """CREATE DOMAIN test CHARACTER(32767); SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST CHAR(32767) Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST CHAR(32767) Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_17.py b/tests/functional/domain/create/test_17.py index 7f0b9d00..e9e31c28 100644 --- a/tests/functional/domain/create/test_17.py +++ b/tests/functional/domain/create/test_17.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.17 -# title: CREATE DOMAIN - CHARACTER VARYING -# decription: Simple domain creation based CHARACTER VARYING datatype. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_17 + +""" +ID: domain.create-17 +FBTEST: functional.domain.create.17 +TITLE: CREATE DOMAIN - CHARACTER VARYING +DESCRIPTION: Simple domain creation based CHARACTER VARYING datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test CHARACTER VARYING(1); +test_script = """CREATE DOMAIN test CHARACTER VARYING(1); SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST VARCHAR(1) Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST VARCHAR(1) Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_18.py b/tests/functional/domain/create/test_18.py index 3dd1dc8e..4dc90603 100644 --- a/tests/functional/domain/create/test_18.py +++ b/tests/functional/domain/create/test_18.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.18 -# title: CREATE DOMAIN - VARCHAR -# decription: Simple domain creation based VARCHAR datatype. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_18 + +""" +ID: domain.create-18 +FBTEST: functional.domain.create.18 +TITLE: CREATE DOMAIN - VARCHAR +DESCRIPTION: Simple domain creation based VARCHAR datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test VARCHAR(32765); +test_script = """CREATE DOMAIN test VARCHAR(32765); SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST VARCHAR(32765) Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST VARCHAR(32765) Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_19.py b/tests/functional/domain/create/test_19.py index bc13718e..cf78cd84 100644 --- a/tests/functional/domain/create/test_19.py +++ b/tests/functional/domain/create/test_19.py @@ -1,36 +1,27 @@ #coding:utf-8 -# -# id: functional.domain.create.19 -# title: CREATE DOMAIN - VARCHAR - ARRAY -# decription: Array domain creation based on VARCHAR datatype. -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.domain.create.create_domain_19 + +""" +ID: domain.create-19 +FBTEST: functional.domain.create.19 +TITLE: CREATE DOMAIN - VARCHAR - ARRAY +DESCRIPTION: Array domain creation based on VARCHAR datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test VARCHAR(32765)[40000]; +test_script = """CREATE DOMAIN test VARCHAR(32765)[40000]; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST ARRAY OF [40000] +expected_stdout = """TEST ARRAY OF [40000] VARCHAR(32765) Nullable""" -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_20.py b/tests/functional/domain/create/test_20.py index d113dae6..f3eca48e 100644 --- a/tests/functional/domain/create/test_20.py +++ b/tests/functional/domain/create/test_20.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.20 -# title: CREATE DOMAIN - VARCHAR CHARACTER SET -# decription: Domain creation based on VARCHAR datatype with CHARACTER SET specification. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_20 + +""" +ID: domain.create-20 +FBTEST: functional.domain.create.20 +TITLE: CREATE DOMAIN - VARCHAR CHARACTER SET +DESCRIPTION: Domain creation based on VARCHAR datatype with CHARACTER SET specification +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test VARCHAR(32765) CHARACTER SET ASCII; +test_script = """CREATE DOMAIN test VARCHAR(32765) CHARACTER SET ASCII; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST VARCHAR(32765) CHARACTER SET ASCII Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST VARCHAR(32765) CHARACTER SET ASCII Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_21.py b/tests/functional/domain/create/test_21.py index d3aa7ac5..a96afc56 100644 --- a/tests/functional/domain/create/test_21.py +++ b/tests/functional/domain/create/test_21.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.21 -# title: CREATE DOMAIN - NCHAR -# decription: Simple domain creation based on NCHAR datatype. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_21 + +""" +ID: domain.create-21 +FBTEST: functional.domain.create.21 +TITLE: CREATE DOMAIN - NCHAR +DESCRIPTION: Simple domain creation based NCHAR datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test NCHAR(32767); +test_script = """CREATE DOMAIN test NCHAR(32767); SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST CHAR(32767) CHARACTER SET ISO8859_1 Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST CHAR(32767) CHARACTER SET ISO8859_1 Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_22.py b/tests/functional/domain/create/test_22.py index ac1b4364..d057d25b 100644 --- a/tests/functional/domain/create/test_22.py +++ b/tests/functional/domain/create/test_22.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.22 -# title: CREATE DOMAIN - NATIONAL CHARACTER -# decription: Simple domain creation based on NATIONAL CHARACTER datatype. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_22 + +""" +ID: domain.create-22 +FBTEST: functional.domain.create.22 +TITLE: CREATE DOMAIN - NATIONAL CHARACTER +DESCRIPTION: Simple domain creation based NATIONAL CHARACTER datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test NATIONAL CHARACTER(32767); +test_script = """CREATE DOMAIN test NATIONAL CHARACTER(32767); SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST CHAR(32767) CHARACTER SET ISO8859_1 Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST CHAR(32767) CHARACTER SET ISO8859_1 Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_23.py b/tests/functional/domain/create/test_23.py index 5aa769f1..95b7a823 100644 --- a/tests/functional/domain/create/test_23.py +++ b/tests/functional/domain/create/test_23.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.23 -# title: CREATE DOMAIN - NATIONAL CHAR -# decription: Simple domain creation based on NATIONAL CHAR datatype. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_23 + +""" +ID: domain.create-23 +FBTEST: functional.domain.create.23 +TITLE: CREATE DOMAIN - NATIONAL CHAR +DESCRIPTION: Simple domain creation based NATIONAL CHAR datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test NATIONAL CHAR(32767); +test_script = """CREATE DOMAIN test NATIONAL CHAR(32767); SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST CHAR(32767) CHARACTER SET ISO8859_1 Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST CHAR(32767) CHARACTER SET ISO8859_1 Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_24.py b/tests/functional/domain/create/test_24.py index acab9206..3684ba6a 100644 --- a/tests/functional/domain/create/test_24.py +++ b/tests/functional/domain/create/test_24.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.24 -# title: CREATE DOMAIN - NATIONAL CHAR VARYING -# decription: Simple domain creation based on NATIONAL CHAR VARYING datatype. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_24 + +""" +ID: domain.create-24 +FBTEST: functional.domain.create.24 +TITLE: CREATE DOMAIN - NATIONAL CHAR VARYING +DESCRIPTION: Simple domain creation based NATIONAL CHAR VARYING datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test NATIONAL CHAR VARYING(32765); +test_script = """CREATE DOMAIN test NATIONAL CHAR VARYING(32765); SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST VARCHAR(32765) CHARACTER SET ISO8859_1 Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST VARCHAR(32765) CHARACTER SET ISO8859_1 Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_25.py b/tests/functional/domain/create/test_25.py index d3be76f9..a7c2e41a 100644 --- a/tests/functional/domain/create/test_25.py +++ b/tests/functional/domain/create/test_25.py @@ -1,36 +1,27 @@ #coding:utf-8 -# -# id: functional.domain.create.25 -# title: CREATE DOMAIN - NATIONAL CHAR VARYING ARRAY -# decription: Array domain creation based on NATIONAL CHAR VARYING datatype. -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.domain.create.create_domain_25 + +""" +ID: domain.create-25 +FBTEST: functional.domain.create.25 +TITLE: CREATE DOMAIN - NATIONAL CHAR VARYING ARRAY +DESCRIPTION: Array domain creation based on NATIONAL CHAR VARYING datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test NATIONAL CHAR VARYING(32765) [30,30,30]; +test_script = """CREATE DOMAIN test NATIONAL CHAR VARYING(32765) [30,30,30]; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST ARRAY OF [30, 30, 30] +expected_stdout = """TEST ARRAY OF [30, 30, 30] VARCHAR(32765) CHARACTER SET ISO8859_1 Nullable""" -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_26.py b/tests/functional/domain/create/test_26.py index 8fa9acfa..b3b7ff74 100644 --- a/tests/functional/domain/create/test_26.py +++ b/tests/functional/domain/create/test_26.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.26 -# title: CREATE DOMAIN - BLOB -# decription: Simple domain creation based on BLOB datatype. -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.domain.create.create_domain_26 + +""" +ID: domain.create-26 +FBTEST: functional.domain.create.26 +TITLE: CREATE DOMAIN - BLOB +DESCRIPTION: Simple domain creation based BLOB datatype +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test BLOB; +test_script = """CREATE DOMAIN test BLOB; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST BLOB segment 80, subtype BINARY Nullable""" - -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST BLOB segment 80, subtype BINARY Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_27.py b/tests/functional/domain/create/test_27.py index dab4b9c3..f12fec9f 100644 --- a/tests/functional/domain/create/test_27.py +++ b/tests/functional/domain/create/test_27.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.27 -# title: CREATE DOMAIN - BLOB SUB TYPE -# decription: Domain creation based on BLOB datatype with SUBTYPE specification. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_27 + +""" +ID: domain.create-27 +FBTEST: functional.domain.create.27 +TITLE: CREATE DOMAIN - BLOB SUB TYPE +DESCRIPTION: Domain creation based on BLOB datatype with SUBTYPE specification +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test BLOB SUB_TYPE 1; +test_script = """CREATE DOMAIN test BLOB SUB_TYPE 1; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST BLOB segment 80, subtype TEXT Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST BLOB segment 80, subtype TEXT Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_28.py b/tests/functional/domain/create/test_28.py index c780e4c0..c512cd1c 100644 --- a/tests/functional/domain/create/test_28.py +++ b/tests/functional/domain/create/test_28.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.28 -# title: CREATE DOMAIN - BLOB SUB TYPE TEXT -# decription: Domain creation based on BLOB datatype with SUBTYPE TEXT specification. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_28 + +""" +ID: domain.create-28 +FBTEST: functional.domain.create.28 +TITLE: CREATE DOMAIN - BLOB SUB TYPE TEXT +DESCRIPTION: Domain creation based on BLOB datatype with SUBTYPE TEXT specification +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test BLOB SUB_TYPE TEXT; +test_script = """CREATE DOMAIN test BLOB SUB_TYPE TEXT; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST BLOB segment 80, subtype TEXT Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST BLOB segment 80, subtype TEXT Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_29.py b/tests/functional/domain/create/test_29.py index 5fe08446..7e8efe9c 100644 --- a/tests/functional/domain/create/test_29.py +++ b/tests/functional/domain/create/test_29.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.29 -# title: CREATE DOMAIN - BLOB SEGMENT SIZE -# decription: Domain creation based on BLOB datatype with SEGMENT SIZE specification. -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.domain.create.create_domain_29 + +""" +ID: domain.create-29 +FBTEST: functional.domain.create.29 +TITLE: CREATE DOMAIN - BLOB SEGMENT SIZE +DESCRIPTION: Domain creation based on BLOB datatype with SEGMENT SIZE specification +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test BLOB SEGMENT SIZE 244; +test_script = """CREATE DOMAIN test BLOB SEGMENT SIZE 244; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST BLOB segment 244, subtype BINARY Nullable""" - -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST BLOB segment 244, subtype BINARY Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_30.py b/tests/functional/domain/create/test_30.py index 074a09c9..50d3a986 100644 --- a/tests/functional/domain/create/test_30.py +++ b/tests/functional/domain/create/test_30.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.30 -# title: CREATE DOMAIN - BLOB SUB_TYPE CHARACTER SET -# decription: Domain creation based on BLOB datatype with SUBTYPE TEXT and CHARACTER SET specification. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_30 + +""" +ID: domain.create-30 +FBTEST: functional.domain.create.30 +TITLE: CREATE DOMAIN - BLOB SUB_TYPE CHARACTER SET +DESCRIPTION: Domain creation based on BLOB datatype with SUBTYPE TEXT and CHARACTER SET specification +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test BLOB SUB_TYPE 1 CHARACTER SET BIG_5; +test_script = """CREATE DOMAIN test BLOB SUB_TYPE 1 CHARACTER SET BIG_5; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST BLOB segment 80, subtype TEXT CHARACTER SET BIG_5 Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST BLOB segment 80, subtype TEXT CHARACTER SET BIG_5 Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_31.py b/tests/functional/domain/create/test_31.py index 9dd2ebcc..e9d1c36c 100644 --- a/tests/functional/domain/create/test_31.py +++ b/tests/functional/domain/create/test_31.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.31 -# title: CREATE DOMAIN - BLOB (seglen,subtype) -# decription: Domain creation based on BLOB datatype with seglen-subtype specification. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_31 + +""" +ID: domain.create-31 +FBTEST: functional.domain.create.31 +TITLE: CREATE DOMAIN - BLOB (seglen, subtype) +DESCRIPTION: Domain creation based on BLOB datatype with seglen-subtype specification +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test BLOB(349,1); +test_script = """CREATE DOMAIN test BLOB(349,1); SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST BLOB segment 349, subtype TEXT Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST BLOB segment 349, subtype TEXT Nullable""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_32.py b/tests/functional/domain/create/test_32.py index fcd06d2f..c1b76077 100644 --- a/tests/functional/domain/create/test_32.py +++ b/tests/functional/domain/create/test_32.py @@ -1,36 +1,27 @@ #coding:utf-8 -# -# id: functional.domain.create.32 -# title: CREATE DOMAIN - DEFAULT literal -# decription: Domain creation based on VARCHAR datatype with literal DEFAULT specification. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_32 + +""" +ID: domain.create-32 +FBTEST: functional.domain.create.32 +TITLE: CREATE DOMAIN - DEFAULT literal +DESCRIPTION: Domain creation based on VARCHAR datatype with literal DEFAULT specification +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test VARCHAR(32) DEFAULT 'def_value'; +test_script = """CREATE DOMAIN test VARCHAR(32) DEFAULT 'def_value'; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST VARCHAR(32) Nullable +expected_stdout = """TEST VARCHAR(32) Nullable DEFAULT 'def_value'""" -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_33.py b/tests/functional/domain/create/test_33.py index b9d9d734..520cb40c 100644 --- a/tests/functional/domain/create/test_33.py +++ b/tests/functional/domain/create/test_33.py @@ -1,36 +1,27 @@ #coding:utf-8 -# -# id: functional.domain.create.33 -# title: CREATE DOMAIN - DEFAULT NULL -# decription: Domain creation based on VARCHAR datatype with NULL DEFAULT specification. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_33 + +""" +ID: domain.create-33 +FBTEST: functional.domain.create.33 +TITLE: CREATE DOMAIN - DEFAULT NULL +DESCRIPTION: Domain creation based on VARCHAR datatype with NULL DEFAULT specification +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test VARCHAR(32) DEFAULT NULL; +test_script = """CREATE DOMAIN test VARCHAR(32) DEFAULT NULL; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST VARCHAR(32) Nullable +expected_stdout = """TEST VARCHAR(32) Nullable DEFAULT NULL""" -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_34.py b/tests/functional/domain/create/test_34.py index a1c79205..036218b4 100644 --- a/tests/functional/domain/create/test_34.py +++ b/tests/functional/domain/create/test_34.py @@ -1,36 +1,27 @@ #coding:utf-8 -# -# id: functional.domain.create.34 -# title: CREATE DOMAIN - DEFAULT USER -# decription: Domain creation based on VARCHAR datatype with USER DEFAULT specification. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_34 + +""" +ID: domain.create-34 +FBTEST: functional.domain.create.34 +TITLE: CREATE DOMAIN - DEFAULT USER +DESCRIPTION: Domain creation based on VARCHAR datatype with USER DEFAULT specification +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test VARCHAR(32) DEFAULT USER; +test_script = """CREATE DOMAIN test VARCHAR(32) DEFAULT USER; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST VARCHAR(32) Nullable +expected_stdout = """TEST VARCHAR(32) Nullable DEFAULT USER""" -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_35.py b/tests/functional/domain/create/test_35.py index c68e9fe2..99747e1d 100644 --- a/tests/functional/domain/create/test_35.py +++ b/tests/functional/domain/create/test_35.py @@ -1,36 +1,27 @@ #coding:utf-8 -# -# id: functional.domain.create.35 -# title: CREATE DOMAIN - DEFAULT CURRENT_USER -# decription: Domain creation based on VARCHAR datatype with CURRENT_USER DEFAULT specification. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_35 + +""" +ID: domain.create-35 +FBTEST: functional.domain.create.35 +TITLE: CREATE DOMAIN - DEFAULT CURRENT_USER +DESCRIPTION: Domain creation based on VARCHAR datatype with CURRENT_USER DEFAULT specification +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test VARCHAR(32) DEFAULT CURRENT_USER; +test_script = """CREATE DOMAIN test VARCHAR(32) DEFAULT CURRENT_USER; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST VARCHAR(32) Nullable +expected_stdout = """TEST VARCHAR(32) Nullable DEFAULT CURRENT_USER""" -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_36.py b/tests/functional/domain/create/test_36.py index 8e66674e..185cc254 100644 --- a/tests/functional/domain/create/test_36.py +++ b/tests/functional/domain/create/test_36.py @@ -1,36 +1,27 @@ #coding:utf-8 -# -# id: functional.domain.create.36 -# title: CREATE DOMAIN - DEFAULT CURRENT_ROLE -# decription: Domain creation based on VARCHAR datatype with CURRENT_ROLE DEFAULT specification. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_36 + +""" +ID: domain.create-36 +FBTEST: functional.domain.create.36 +TITLE: CREATE DOMAIN - DEFAULT CURRENT_ROLE +DESCRIPTION: Domain creation based on VARCHAR datatype with CURRENT_ROLE DEFAULT specification +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test VARCHAR(32) DEFAULT CURRENT_ROLE; +test_script = """CREATE DOMAIN test VARCHAR(32) DEFAULT CURRENT_ROLE; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST VARCHAR(32) Nullable +expected_stdout = """TEST VARCHAR(32) Nullable DEFAULT CURRENT_ROLE""" -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_37.py b/tests/functional/domain/create/test_37.py index 352daacf..3f8e5eae 100644 --- a/tests/functional/domain/create/test_37.py +++ b/tests/functional/domain/create/test_37.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.create.37 -# title: CREATE DOMAIN - NOT NULL -# decription: Domain creation based on VARCHAR datatype with NOT NULL specification. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_37 + +""" +ID: domain.create-37 +FBTEST: functional.domain.create.37 +TITLE: CREATE DOMAIN - NOT NULL +DESCRIPTION: Domain creation based on VARCHAR datatype with NOT NULL specification +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test VARCHAR(32) NOT NULL; +test_script = """CREATE DOMAIN test VARCHAR(32) NOT NULL; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST VARCHAR(32) Not Null""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST VARCHAR(32) Not Null""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_38.py b/tests/functional/domain/create/test_38.py index 62f3c3b9..be933b80 100644 --- a/tests/functional/domain/create/test_38.py +++ b/tests/functional/domain/create/test_38.py @@ -1,36 +1,27 @@ #coding:utf-8 -# -# id: functional.domain.create.38 -# title: CREATE DOMAIN - CHECK -# decription: Domain creation based on VARCHAR datatype with CHECK specification. -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.create.create_domain_38 + +""" +ID: domain.create-38 +FBTEST: functional.domain.create.38 +TITLE: CREATE DOMAIN - CHECK +DESCRIPTION: Domain creation based on VARCHAR datatype with CHECK specification +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test VARCHAR(32) CHECK(VALUE LIKE 'ER%'); +test_script = """CREATE DOMAIN test VARCHAR(32) CHECK(VALUE LIKE 'ER%'); SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST VARCHAR(32) Nullable +expected_stdout = """TEST VARCHAR(32) Nullable CHECK(VALUE LIKE 'ER%')""" -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_39.py b/tests/functional/domain/create/test_39.py index 9fe05ab3..45a1add9 100644 --- a/tests/functional/domain/create/test_39.py +++ b/tests/functional/domain/create/test_39.py @@ -1,36 +1,27 @@ #coding:utf-8 -# -# id: functional.domain.create.39 -# title: CREATE DOMAIN - COLLATE -# decription: Domain creation based on VARCHAR datatype with COLLATE specification. -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.domain.create.create_domain_39 + +""" +ID: domain.create-39 +FBTEST: functional.domain.create.39 +TITLE: CREATE DOMAIN - COLLATE +DESCRIPTION: Domain creation based on VARCHAR datatype with COLLATE specification +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test VARCHAR(32) CHARACTER SET DOS437 COLLATE DB_ITA437; +test_script = """CREATE DOMAIN test VARCHAR(32) CHARACTER SET DOS437 COLLATE DB_ITA437; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST VARCHAR(32) CHARACTER SET DOS437 Nullable +expected_stdout = """TEST VARCHAR(32) CHARACTER SET DOS437 Nullable COLLATE DB_ITA437""" -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_40.py b/tests/functional/domain/create/test_40.py index 975110d0..209283fc 100644 --- a/tests/functional/domain/create/test_40.py +++ b/tests/functional/domain/create/test_40.py @@ -1,38 +1,29 @@ #coding:utf-8 -# -# id: functional.domain.create.40 -# title: CREATE DOMAIN - all options -# decription: Domain creation based on VARCHAR datatype with all possible options. -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.domain.create.create_domain_40 + +""" +ID: domain.create-40 +FBTEST: functional.domain.create.40 +TITLE: CREATE DOMAIN - all options +DESCRIPTION: Domain creation based on VARCHAR datatype with all possible options +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test AS VARCHAR(32) CHARACTER SET DOS437 DEFAULT USER NOT NULL CHECK(VALUE LIKE 'ER%') COLLATE DB_ITA437; +test_script = """CREATE DOMAIN test AS VARCHAR(32) CHARACTER SET DOS437 DEFAULT USER NOT NULL CHECK(VALUE LIKE 'ER%') COLLATE DB_ITA437; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST VARCHAR(32) CHARACTER SET DOS437 Not Null +expected_stdout = """TEST VARCHAR(32) CHARACTER SET DOS437 Not Null DEFAULT USER CHECK(VALUE LIKE 'ER%') COLLATE DB_ITA437""" -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/create/test_41.py b/tests/functional/domain/create/test_41.py index 507b5089..63e095ae 100644 --- a/tests/functional/domain/create/test_41.py +++ b/tests/functional/domain/create/test_41.py @@ -1,38 +1,27 @@ #coding:utf-8 -# -# id: functional.domain.create.41 -# title: CREATE DOMAIN - create two domain with same name -# decription: The creation of already existing domain must fail (SQLCODE -607). -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.domain.create.create_domain_41 + +""" +ID: domain.create-41 +FBTEST: functional.domain.create.41 +TITLE: CREATE DOMAIN - create two domain with same name +DESCRIPTION: The creation of already existing domain must fail (SQLCODE -607) +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(init="CREATE DOMAIN test AS INTEGER;") -substitutions_1 = [] +act = isql_act('db', "CREATE DOMAIN test AS VARCHAR(32);") -init_script_1 = """CREATE DOMAIN test AS INTEGER;""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN test AS VARCHAR(32);""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stderr_1 = """Statement failed, SQLSTATE = 23000 +expected_stderr = """Statement failed, SQLSTATE = 23000 unsuccessful metadata update -CREATE DOMAIN TEST failed -violation of PRIMARY or UNIQUE KEY constraint "RDB$INDEX_2" on table "RDB$FIELDS" -Problematic key value is ("RDB$FIELD_NAME" = 'TEST')""" @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/domain/create/test_42.py b/tests/functional/domain/create/test_42.py index 6fd71ba8..7f37bb45 100644 --- a/tests/functional/domain/create/test_42.py +++ b/tests/functional/domain/create/test_42.py @@ -1,39 +1,28 @@ #coding:utf-8 -# -# id: functional.domain.create.42 -# title: CREATE DOMAIN - domain name equal to existing datatype -# decription: Domain creation must fail (SQLCODE -104) if domain name is equal to datatype name. -# tracker_id: -# min_versions: [] -# versions: 2.5.0 -# qmid: functional.domain.create.create_domain_42 + +""" +ID: domain.create-42 +FBTEST: functional.domain.create.42 +TITLE: CREATE DOMAIN - domain name equal to existing datatype +DESCRIPTION: Domain creation must fail (SQLCODE -104) if domain name is equal to datatype name +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "CREATE DOMAIN INT AS VARCHAR(32);") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DOMAIN INT AS VARCHAR(32);""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stderr_1 = """Statement failed, SQLSTATE = 42000 +expected_stderr = """Statement failed, SQLSTATE = 42000 Dynamic SQL Error -SQL error code = -104 -Token unknown - line 1, column 15 -INT""" -@pytest.mark.version('>=2.5.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/domain/create/test_54.py b/tests/functional/domain/create/test_54.py index 05dfaa49..a3052dd9 100644 --- a/tests/functional/domain/create/test_54.py +++ b/tests/functional/domain/create/test_54.py @@ -1,28 +1,21 @@ #coding:utf-8 -# -# id: functional.domain.create.54 -# title: Use of domains for Trigger/SP variable definition -# decription: Allow domains to be applied to variables and in/out -# parameters within a trigger or SP -# tracker_id: CORE-660 -# min_versions: [] -# versions: 2.1 -# qmid: functional.domain.create.create_domain_54 -import pytest -from firebird.qa import db_factory, isql_act, Action - -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """create domain d as integer; +""" +ID: domain.create-43 +FBTEST: functional.domain.create.54 +ISSUE: 1026 +JIRA: CORE-660 +TITLE: Use of domains for Trigger/SP variable definition +DESCRIPTION: + Allow domains to be applied to variables and in/out parameters within a trigger or SP """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +import pytest +from firebird.qa import * -test_script_1 = """set term !!; +db = db_factory(init="create domain d as integer;") + +test_script = """set term !!; create procedure sp (i type of d) returns (o type of d) as declare variable v type of d; @@ -33,9 +26,9 @@ commit!! set term ;!! show procedure sp;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """Procedure text: +expected_stdout = """Procedure text: ============================================================================= declare variable v type of d; begin @@ -46,9 +39,8 @@ Parameters: I INPUT (TYPE OF D) INTEGER O OUTPUT (TYPE OF D) INTEGER""" -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/domain/drop/test_01.py b/tests/functional/domain/drop/test_01.py index 0ec97f0a..6d6a91c8 100644 --- a/tests/functional/domain/drop/test_01.py +++ b/tests/functional/domain/drop/test_01.py @@ -1,39 +1,26 @@ #coding:utf-8 -# -# id: functional.domain.drop.01 -# title: DROP DOMAIN -# decription: DROP DOMAIN -# -# Dependencies: -# CREATE DATABASE -# CREATE DOMAIN -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.domain.drop.drop_domain_01 + +""" +ID: domain.drop-01 +FBTEST: functional.domain.drop.01 +TITLE: DROP DOMAIN +DESCRIPTION: +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory(init="CREATE DOMAIN test SMALLINT;") -substitutions_1 = [] - -init_script_1 = """CREATE DOMAIN test SMALLINT;""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """DROP DOMAIN test; +test_script = """DROP DOMAIN test; SHOW DOMAIN test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stderr_1 = """There is no domain TEST in this database""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr +expected_stderr = """There is no domain TEST in this database""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/domain/drop/test_02.py b/tests/functional/domain/drop/test_02.py index 02027dfa..3d7e00fc 100644 --- a/tests/functional/domain/drop/test_02.py +++ b/tests/functional/domain/drop/test_02.py @@ -1,44 +1,30 @@ #coding:utf-8 -# -# id: functional.domain.drop.02 -# title: DROP DOMAIN - in use -# decription: DROP DOMAIN - that was use -# -# Dependencies: -# CREATE DATABASE -# CREATE DOMAIN -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.domain.drop.drop_domain_02 + +""" +ID: domain.drop-02 +FBTEST: functional.domain.drop.02 +TITLE: DROP DOMAIN - in use +DESCRIPTION: +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE DOMAIN test SMALLINT; +init_script = """CREATE DOMAIN test SMALLINT; CREATE TABLE tb( id test);""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """DROP DOMAIN test;""" +act = isql_act('db', "DROP DOMAIN test;") -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stderr_1 = """Statement failed, SQLSTATE = 42000 +expected_stderr = """Statement failed, SQLSTATE = 42000 unsuccessful metadata update -DROP DOMAIN TEST failed -Domain TEST is used in table TB (local name ID) and cannot be dropped """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/domain/drop/test_03.py b/tests/functional/domain/drop/test_03.py index 84b195e6..ff72e95f 100644 --- a/tests/functional/domain/drop/test_03.py +++ b/tests/functional/domain/drop/test_03.py @@ -1,42 +1,27 @@ #coding:utf-8 -# -# id: functional.domain.drop.03 -# title: DROP DOMAIN - that doesn't exists -# decription: DROP DOMAIN - that doesn't exists -# Note:Bad error message (should be like "Domain TEST not exists") -# -# Dependencies: -# CREATE DATABASE -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.domain.drop.drop_domain_03 + +""" +ID: domain.drop-03 +FBTEST: functional.domain.drop.03 +TITLE: DROP DOMAIN - that doesn't exists +DESCRIPTION: +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "DROP DOMAIN test;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """DROP DOMAIN test;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stderr_1 = """Statement failed, SQLSTATE = 42000 +expected_stderr = """Statement failed, SQLSTATE = 42000 unsuccessful metadata update -DROP DOMAIN TEST failed -Domain not found """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/exception/alter/test_01.py b/tests/functional/exception/alter/test_01.py index 603c2106..98bb6166 100644 --- a/tests/functional/exception/alter/test_01.py +++ b/tests/functional/exception/alter/test_01.py @@ -1,34 +1,23 @@ #coding:utf-8 -# -# id: functional.exception.alter.01 -# title: ALTER EXCEPTION -# decription: ALTER EXCEPTION -# -# Dependencies: -# CREATE DATABASE -# CREATE EXCEPTION -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.exception.alter.alter_exception_01 + +""" +ID: exception.alter +FBTEST: functional.exception.alter.01 +TITLE: ALTER EXCEPTION +DESCRIPTION: +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """ +init_script = """ create exception test 'message to show'; commit; - """ +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ +test_script = """ alter exception test 'new message'; commit; @@ -42,17 +31,16 @@ test_script_1 = """ from rdb$exceptions e; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ EXC_NAME TEST EXC_NUMBER 1 EXC_MSG new message """ -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/exception/create/test_01.py b/tests/functional/exception/create/test_01.py index 52e5a3b3..84ccc49a 100644 --- a/tests/functional/exception/create/test_01.py +++ b/tests/functional/exception/create/test_01.py @@ -1,33 +1,21 @@ #coding:utf-8 -# -# id: functional.exception.create.01 -# title: CREATE EXCEPTION -# decription: CREATE EXCEPTION -# -# Dependencies: -# CREATE DATABASE -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.exception.create.create_exception_01 + +""" +ID: exception.create-01 +FBTEST: functional.exception.create.01 +TITLE: CREATE EXCEPTION +DESCRIPTION: +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ create exception test 'message to show'; commit; - + set list on; set width exc_name 31; set width exc_msg 80; @@ -38,17 +26,16 @@ test_script_1 = """ from rdb$exceptions e; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ - EXC_NAME TEST +expected_stdout = """ + EXC_NAME TEST EXC_NUMBER 1 EXC_MSG message to show """ -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/exception/create/test_02.py b/tests/functional/exception/create/test_02.py index c1059ae7..4b1c7a54 100644 --- a/tests/functional/exception/create/test_02.py +++ b/tests/functional/exception/create/test_02.py @@ -1,44 +1,30 @@ #coding:utf-8 -# -# id: functional.exception.create.02 -# title: CREATE EXCEPTION - try create Exception with the same name -# decription: CREATE EXCEPTION - try create Exception with the same name -# -# Dependencies: -# CREATE DATABASE -# CREATE EXCEPTION -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.exception.create.create_exception_02 + +""" +ID: exception.create-02 +FBTEST: functional.exception.create.02 +TITLE: CREATE EXCEPTION - try create Exception with the same name +DESCRIPTION: +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE EXCEPTION test 'A1'; +init_script = """CREATE EXCEPTION test 'A1'; commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(sql_dialect=3, init=init_script) -test_script_1 = """CREATE EXCEPTION test 'message to show';""" +act = isql_act('db', "CREATE EXCEPTION test 'message to show';") -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stderr_1 = """Statement failed, SQLSTATE = 42000 +expected_stderr = """Statement failed, SQLSTATE = 42000 unsuccessful metadata update -CREATE EXCEPTION TEST failed -Exception TEST already exists """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/exception/create/test_03.py b/tests/functional/exception/create/test_03.py index 0dabf098..f4c94433 100644 --- a/tests/functional/exception/create/test_03.py +++ b/tests/functional/exception/create/test_03.py @@ -1,39 +1,29 @@ #coding:utf-8 -# -# id: functional.exception.create.03 -# title: CREATE EXCEPTION - too long message -# decription: CREATE EXCEPTION - too long message -# -# Dependencies: -# CREATE DATABASE -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.exception.create.create_exception_03 + +""" +ID: exception.create-03 +FBTEST: functional.exception.create.03 +TITLE: CREATE EXCEPTION - too long message +DESCRIPTION: +NOTES: +[23.10.2015] + try to create in the SAME transaction exceptions with too long message and correct message (reduce its length with 1) + after statement fails. Do that using both ascii and non-ascii characters in these exceptions messages. + Expected result: no errors should occur on commit, exceptions should work fine. Taken from eqc ticket #12062. +[13.06.2016] + replaced 'show exception' with regular select from rdb$exception: output of SHOW commands + is volatile in unstable FB versions. +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(charset='UTF8') -substitutions_1 = [('-At block line: [\\d]+, col: [\\d]+', '-At block line')] - -init_script_1 = """""" - -db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set autoddl off; commit; - -- Updated 23-oct-2015: try to create in the SAME transaction exceptions with too long message and correct message (reduce its length with 1) - -- after statement fails. Do that using both ascii and non-ascii characters in these exceptions messages. - -- Expected result: no errors should occur on commit, exceptions should work fine. Taken from eqc ticket #12062. - -- 13.06.2016: replaced 'show exception' with regular select from rdb$exception: output of SHOW commands - -- is volatile in unstable FB versions. - create exception boo_ascii 'FOO!BAR!abcdefghijklmnoprstu012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345' ; @@ -68,19 +58,20 @@ test_script_1 = """ exception boo_utf8; end ^ - set term ;^ + set term ;^ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('-At block line: [\\d]+, col: [\\d]+', '-At block line')]) -expected_stdout_1 = """ - RDB$EXCEPTION_NAME BOO_ASCII +expected_stdout = """ + RDB$EXCEPTION_NAME BOO_ASCII RDB$MESSAGE FOOBAR!abcdefghijklmnoprstu012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345 - RDB$EXCEPTION_NAME BOO_UTF8 + RDB$EXCEPTION_NAME BOO_UTF8 RDB$MESSAGE 3ηΣημείωσηΣημείωσηΣημεσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωσηΣημείωση """ -expected_stderr_1 = """ + +expected_stderr = """ Statement failed, SQLSTATE = 42000 unsuccessful metadata update -CREATE EXCEPTION BOO_ASCII failed @@ -105,11 +96,9 @@ expected_stderr_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/exception/drop/test_01.py b/tests/functional/exception/drop/test_01.py index 8c6da321..259e512a 100644 --- a/tests/functional/exception/drop/test_01.py +++ b/tests/functional/exception/drop/test_01.py @@ -1,40 +1,29 @@ #coding:utf-8 -# -# id: functional.exception.drop.01 -# title: DROP EXCEPTION -# decription: DROP EXCEPTION -# -# Dependencies: -# CREATE DATABASE -# CREATE EXCEPTION -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.exception.drop.drop_exception_01 + +""" +ID: exception.drop-01 +FBTEST: functional.exception.drop.01 +TITLE: DROP EXCEPTION +DESCRIPTION: +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE EXCEPTION test 'message to show'; +init_script = """CREATE EXCEPTION test 'message to show'; commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """DROP EXCEPTION test; +test_script = """DROP EXCEPTION test; SHOW EXCEPTION test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stderr_1 = """There is no exception TEST in this database""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr +expected_stderr = """There is no exception TEST in this database""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/exception/drop/test_02.py b/tests/functional/exception/drop/test_02.py index 72251837..cb099f53 100644 --- a/tests/functional/exception/drop/test_02.py +++ b/tests/functional/exception/drop/test_02.py @@ -1,22 +1,17 @@ #coding:utf-8 -# -# id: functional.exception.drop.02 -# title: DROP EXCEPTION -# decription: Create exception and SP that uses it. Then try to drop exception - this attempt must FAIL. -# tracker_id: -# min_versions: [] -# versions: 2.5.0 -# qmid: functional.exception.drop.drop_exception_02 + +""" +ID: exception.drop-02 +FBTEST: functional.exception.drop.02 +TITLE: DROP EXCEPTION +DESCRIPTION: + Create exception and SP that uses it. Then try to drop exception - this attempt must FAIL. +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """ +init_script = """ create exception exc_test 'message to show'; commit; set term ^; @@ -28,26 +23,27 @@ init_script_1 = """ commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ +test_script = """ drop exception exc_test; commit; set list on; set count on; - select e.rdb$exception_name, d.rdb$dependent_name + select e.rdb$exception_name, d.rdb$dependent_name from rdb$exceptions e join rdb$dependencies d on e.rdb$exception_name = d.rdb$depended_on_name where e.rdb$exception_name = upper('exc_test'); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ RDB$EXCEPTION_NAME EXC_TEST RDB$DEPENDENT_NAME SP_TEST Records affected: 1 """ -expected_stderr_1 = """ + +expected_stderr = """ Statement failed, SQLSTATE = 42000 unsuccessful metadata update -cannot delete @@ -55,12 +51,10 @@ expected_stderr_1 = """ -there are 1 dependencies """ -@pytest.mark.version('>=2.5.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/exception/drop/test_03.py b/tests/functional/exception/drop/test_03.py index 8c84b5eb..99007e16 100644 --- a/tests/functional/exception/drop/test_03.py +++ b/tests/functional/exception/drop/test_03.py @@ -1,34 +1,23 @@ #coding:utf-8 -# -# id: functional.exception.drop.03 -# title: DROP EXCEPTION - that doesn't exists -# decription: DROP EXCEPTION - that doesn't exists -# -# Dependencies: -# CREATE DATABASE -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.exception.drop.drop_exception_03 + +""" +ID: exception.drop-03 +FBTEST: functional.exception.drop.03 +TITLE: DROP EXCEPTION - that doesn't exists +DESCRIPTION: +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """DROP EXCEPTION test; +test_script = """DROP EXCEPTION test; SHOW EXCEPTION test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stderr_1 = """Statement failed, SQLSTATE = 42000 +expected_stderr = """Statement failed, SQLSTATE = 42000 unsuccessful metadata update -DROP EXCEPTION TEST failed -Exception not found @@ -36,8 +25,7 @@ There is no exception TEST in this database """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/exception/test_handling_name_and_message.py b/tests/functional/exception/test_handling_name_and_message.py index d64be145..e737cb9d 100644 --- a/tests/functional/exception/test_handling_name_and_message.py +++ b/tests/functional/exception/test_handling_name_and_message.py @@ -1,64 +1,55 @@ #coding:utf-8 -# -# id: functional.exception.handling_name_and_message -# title: Context variables EXCEPTION and ERROR_MESSAGE for ability to log exception info (including call stack!) on server side -# decription: -# Testing new built-in context variables for exception handling (appearance: 06-sep-2016 21:12, 4.0 only): -# * 'exception' -- returns name of the active user-defined exception; -# * 'error_message' -- returns interpreted text for the active exception. -# See: https://github.com/FirebirdSQL/firebird/commit/ebd0d3c8133c62b5359100de5f1eec541e43da3b -# Explanation: doc\\sql.extensions\\README.context_variables -# -# GOOD NEWS: -# call stack now can be logged on database without calling of mon$ tables, -# simple by parsing 'error_message' content (part that follows by last newline character). -# -# WARNING-1. -# This test intentionally creates exception with non-ascii name and parametrized non-ascii message text. -# Length of exception *NAME* can be up to 63 non-ascii characters, but upper bound for length of exception -# *MESSAGE* is limited to 1023 *bytes* (NOT chars!) ==> it's max length for two-byte encoding (win1251 etc) -# will be usually much less, about 500...600 characters. This limit can not be overpassed nowadays. -# For database with default cset = utf8 table rdb$exception will have following DDL: -# RDB$EXCEPTION_NAME (RDB$EXCEPTION_NAME) CHAR(63) Nullable -# RDB$MESSAGE (RDB$MESSAGE) VARCHAR(1023) CHARACTER SET NONE Nullable -# Checked on 4.0.0.366 -# -# WARNING-2. -# It seems that handling of message with length = 1023 bytes (i.e. exactly upper limit) works wrong. -# Waiting for reply from dimitr, letter 09-sep-2016 18:27. -# -# ### NOTE ### 07.12.2016 -# 'exception' and 'error_message' context variables were replaced with calls RDB$ERROR(EXCEPTION) and RDB$ERROR(MESSAGE) -# (letter from dimitr, 06.12.2016 21:44; seems that this was done in 4.0.0.461, between 02-dec-2016 and 04-dec-2016) -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: None + +""" +ID: exception.handling-name-and-message +FBTEST: functional.exception.handling_name_and_message +TITLE: Context variables EXCEPTION and ERROR_MESSAGE for ability to log exception info (including call stack!) on server side +DESCRIPTION: + Testing new built-in context variables for exception handling (appearance: 06-sep-2016 21:12, 4.0 only): + * 'exception' -- returns name of the active user-defined exception; + * 'error_message' -- returns interpreted text for the active exception. + See: https://github.com/FirebirdSQL/firebird/commit/ebd0d3c8133c62b5359100de5f1eec541e43da3b + Explanation: doc\\sql.extensions\\README.context_variables + + GOOD NEWS: + call stack now can be logged on database without calling of mon$ tables, + simple by parsing 'error_message' content (part that follows by last newline character). + + WARNING-1. + This test intentionally creates exception with non-ascii name and parametrized non-ascii message text. + Length of exception *NAME* can be up to 63 non-ascii characters, but upper bound for length of exception + *MESSAGE* is limited to 1023 *bytes* (NOT chars!) ==> it's max length for two-byte encoding (win1251 etc) + will be usually much less, about 500...600 characters. This limit can not be overpassed nowadays. + For database with default cset = utf8 table rdb$exception will have following DDL: + RDB$EXCEPTION_NAME (RDB$EXCEPTION_NAME) CHAR(63) Nullable + RDB$MESSAGE (RDB$MESSAGE) VARCHAR(1023) CHARACTER SET NONE Nullable + Checked on 4.0.0.366 + + WARNING-2. + It seems that handling of message with length = 1023 bytes (i.e. exactly upper limit) works wrong. + Waiting for reply from dimitr, letter 09-sep-2016 18:27. +NOTES: +[07.12.2016] + 'exception' and 'error_message' context variables were replaced with calls RDB$ERROR(EXCEPTION) and RDB$ERROR(MESSAGE) + (letter from dimitr, 06.12.2016 21:44; seems that this was done in 4.0.0.461, between 02-dec-2016 and 04-dec-2016) +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory(charset='UTF8') -substitutions_1 = [('line:\\s[0-9]+,', 'line: x'), ('col:\\s[0-9]+', 'col: y')] - -init_script_1 = """""" - -db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ recreate exception "Что-то неправильно со складом" 'Остаток стал отрицательным: @1'; /* - + This will be added after trouble with length = 1023 characters will be solved (TODO after): - + recreate exception "ЙцуКенгШщзХъЭждЛорПавЫфЯчсмиТьбЮЪхЗщШШГнЕкУцЙФывААпрОолДжЭЭюБьТ" '*Лев Николаевич Толстой * *Анна Каренина * /Мне отмщение, и аз воздам/ *ЧАСТЬ ПЕРВАЯ* *I * - Все счастливые семьи похожи друг на друга, каждая несчастливая - семья несчастлива по-своему. + Все счастливые семьи похожи друг на друга, каждая несчастливая + семья несчастлива по-своему. Все смешалось в доме Облонских. Жена узнала, что муж был в связи с бывшею в их доме француженкою-гувернанткой, и объявила мужу, что не может жить с ним в одном доме. Положение это продолжалось уже @@ -70,13 +61,13 @@ test_script_1 = """ */ recreate table log_user_trouble( - e_declared_name varchar(63) character set utf8, + e_declared_name varchar(63) character set utf8, e_detailed_text varchar(2000) character set utf8 - ); + ); set term ^; create or alter procedure sp_log_user_trouble( - e_declared_name varchar(63) character set utf8, + e_declared_name varchar(63) character set utf8, e_detailed_text varchar(2000) character set utf8 ) as begin @@ -91,7 +82,7 @@ test_script_1 = """ /* show table rdb$exceptions; - select + select rdb$exception_name, char_length(trim(rdb$exception_name)) exc_name_char_len, octet_length(trim(rdb$exception_name)) exc_name_octt_len, @@ -108,7 +99,7 @@ test_script_1 = """ if (a_new_qty < 0) then exception "Что-то неправильно со складом" using( a_new_qty ); --exception "ЙцуКенгШщзХъЭждЛорПавЫфЯчсмиТьбЮЪхЗщШШГнЕкУцЙФывААпрОолДжЭЭюБьТ"; -- malformed string - + when any do if ( RDB$ERROR(EXCEPTION) is not null) then -- before 4.0.0.462 (04.12.2016): execute procedure sp_log_user_trouble(exception, error_message); @@ -116,8 +107,8 @@ test_script_1 = """ else exception; - end - + end + end ^ @@ -151,9 +142,10 @@ test_script_1 = """ select * from log_user_trouble; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('line:\\s[0-9]+,', 'line: x'), + ('col:\\s[0-9]+', 'col: y')]) -expected_stdout_1 = """ +expected_stdout = """ E_DECLARED_NAME Что-то неправильно со складом E_DETAILED_TEXT exception 1 Что-то неправильно со складом @@ -166,8 +158,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/fkey/primary/test_insert_pk_01.py b/tests/functional/fkey/primary/test_insert_pk_01.py index d355c0f7..7e4c676d 100644 --- a/tests/functional/fkey/primary/test_insert_pk_01.py +++ b/tests/functional/fkey/primary/test_insert_pk_01.py @@ -1,26 +1,21 @@ #coding:utf-8 -# -# id: functional.fkey.primary.insert_pk_01 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master transaction doesn't modify primary key. -# Detail transaction inserts record in detail_table. -# Expected: no errors -# tracker_id: -# min_versions: [] -# versions: 2.5 -# qmid: functional.fkey.primary.ins_01 + +""" +ID: fkey.primary.insert-01 +FBTEST: functional.fkey.primary.insert_pk_01 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master transaction doesn't modify primary key. + Detail transaction inserts record in detail_table. + Expected: no errors +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import tpb, Isolation -# version: 2.5 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, INT_F INTEGER ); @@ -35,53 +30,19 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, INT_F) VALUES (1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("update master_table set int_f = 10 WHERE ID=1") -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# c = con_detail.cursor() -# c.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - - -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): +@pytest.mark.version('>=3') +def test_1(act: Action): cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) - with act_1.db.connect() as con: + with act.db.connect() as con: con.begin(cust_tpb) with con.cursor() as c: c.execute("update master_table set int_f = 10 WHERE ID=1") #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") diff --git a/tests/functional/fkey/primary/test_insert_pk_02.py b/tests/functional/fkey/primary/test_insert_pk_02.py index 6f867021..80f233e0 100644 --- a/tests/functional/fkey/primary/test_insert_pk_02.py +++ b/tests/functional/fkey/primary/test_insert_pk_02.py @@ -1,26 +1,21 @@ #coding:utf-8 -# -# id: functional.fkey.primary.insert_pk_02 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master transaction modifies primary key. -# Detail transaction inserts record in detail_table. -# Expected: error - primary key in master table has been changed -# tracker_id: -# min_versions: [] -# versions: 2.5.3 -# qmid: functional.fkey.primary.ins_02 + +""" +ID: fkey.primary.insert-02 +FBTEST: functional.fkey.primary.insert_pk_02 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master transaction modifies primary key. + Detail transaction inserts record in detail_table. + Expected: error - primary key in master table has been changed +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import DatabaseError, tpb, Isolation -# version: 2.5.3 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, INT_F INTEGER ); @@ -35,52 +30,19 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, INT_F) VALUES (1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("uPDATE MASTER_TABLE SET ID = 2 WHERE ID=1") -# -# #Create second connection - update detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# c = con_detail.cursor() -# c.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.5.3') -def test_1(act_1: Action): +@pytest.mark.version('>3') +def test_1(act: Action): cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) - with act_1.db.connect() as con: + with act.db.connect() as con: con.begin(cust_tpb) with con.cursor() as c: c.execute("UPDATE MASTER_TABLE SET ID = 2 WHERE ID=1") #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: with pytest.raises(DatabaseError, diff --git a/tests/functional/fkey/primary/test_insert_pk_03.py b/tests/functional/fkey/primary/test_insert_pk_03.py index bde35d9c..66a60f0b 100644 --- a/tests/functional/fkey/primary/test_insert_pk_03.py +++ b/tests/functional/fkey/primary/test_insert_pk_03.py @@ -1,30 +1,25 @@ #coding:utf-8 -# -# id: functional.fkey.primary.insert_pk_03 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master transaction: -# 1) modifies non key field -# 2) create savepoint -# 3) modifies primary key -# 4) rollback to savepoint -# Detail transaction inserts record in detail_table. -# Expected: error - primary key has been changed -# tracker_id: -# min_versions: [] -# versions: 2.5.3 -# qmid: functional.fkey.primary.ins_03 + +""" +ID: fkey.primary.insert-03 +FBTEST: functional.fkey.primary.insert_pk_03 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master transaction: + 1) modifies non key field + 2) create savepoint + 3) modifies primary key + 4) rollback to savepoint + Detail transaction inserts record in detail_table. + Expected: error - primary key has been changed +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import DatabaseError, tpb, Isolation -# version: 2.5.3 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, INT_F INTEGER ); @@ -39,49 +34,13 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, INT_F) VALUES (1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# -# db_conn.begin(tpb=TPB_master) -# cm_1 = db_conn.cursor() -# cm_1.execute('UPDATE MASTER_TABLE SET INT_F=2') -# db_conn.savepoint('A') -# cm_1.execute('UPDATE MASTER_TABLE SET ID=2 WHERE ID=1') -# db_conn.rollback(savepoint='A') -# -# #Create second connection - update detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# cd = con_detail.cursor() -# cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) +act = python_act('db') -@pytest.mark.version('>=2.5.3') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: @@ -90,7 +49,7 @@ def test_1(act_1: Action): c.execute('UPDATE MASTER_TABLE SET ID=2 WHERE ID=1') con.rollback(savepoint='A') #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: with pytest.raises(DatabaseError, diff --git a/tests/functional/fkey/primary/test_insert_pk_04.py b/tests/functional/fkey/primary/test_insert_pk_04.py index ba4f7c72..42ff5206 100644 --- a/tests/functional/fkey/primary/test_insert_pk_04.py +++ b/tests/functional/fkey/primary/test_insert_pk_04.py @@ -1,30 +1,25 @@ #coding:utf-8 -# -# id: functional.fkey.primary.insert_pk_04 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master transaction: -# 1) modifies primary key -# 2) create savepoint -# 3) modifies non key field -# 4) rollback to savepoint -# Detail transaction inserts record in detail_table. -# Expected: error because key field in master_table was changed -# tracker_id: -# min_versions: [] -# versions: 2.5.3 -# qmid: functional.fkey.primary.ins_04 + +""" +ID: fkey.primary.insert-04 +FBTEST: functional.fkey.primary.insert_pk_04 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master transaction: + 1) modifies primary key + 2) create savepoint + 3) modifies non key field + 4) rollback to savepoint + Detail transaction inserts record in detail_table. + Expected: error because key field in master_table was changed +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import DatabaseError, tpb, Isolation -# version: 2.5.3 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, INT_F INTEGER ); @@ -39,50 +34,13 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, INT_F) VALUES (1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# -# db_conn.begin(tpb=TPB_master) -# cm_1 = db_conn.cursor() -# cm_1.execute('UPDATE MASTER_TABLE SET ID=2 WHERE ID=1') -# db_conn.savepoint('A') -# cm_1.execute('UPDATE MASTER_TABLE SET INT_F=2 ') -# db_conn.rollback(savepoint='A') -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# cd = con_detail.cursor() -# cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.5.3') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: @@ -91,7 +49,7 @@ def test_1(act_1: Action): c.execute('UPDATE MASTER_TABLE SET INT_F=2') con.rollback(savepoint='A') #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: with pytest.raises(DatabaseError, diff --git a/tests/functional/fkey/primary/test_insert_pk_05.py b/tests/functional/fkey/primary/test_insert_pk_05.py index a8d84983..c953f641 100644 --- a/tests/functional/fkey/primary/test_insert_pk_05.py +++ b/tests/functional/fkey/primary/test_insert_pk_05.py @@ -1,26 +1,21 @@ #coding:utf-8 -# -# id: functional.fkey.primary.insert_pk_05 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master transaction modifies primary key and committed -# Detail transaction inserts record in detail_table. -# Expected: no errors -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.fkey.primary.ins_05 + +""" +ID: fkey.primary.insert-05 +FBTEST: functional.fkey.primary.insert_pk_05 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master transaction modifies primary key and committed + Detail transaction inserts record in detail_table. + Expected: no errors +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import tpb, Isolation -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, INT_F INTEGER ); @@ -35,54 +30,20 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, INT_F) VALUES (1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("UPDATE MASTER_TABLE SET ID=2 WHERE ID=1") -# db_conn.commit() -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# c = con_detail.cursor() -# c.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,2)") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: c.execute("UPDATE MASTER_TABLE SET ID=2 WHERE ID=1") con.commit() #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,2)") diff --git a/tests/functional/fkey/primary/test_insert_pk_06.py b/tests/functional/fkey/primary/test_insert_pk_06.py index 853cb15a..594d53a8 100644 --- a/tests/functional/fkey/primary/test_insert_pk_06.py +++ b/tests/functional/fkey/primary/test_insert_pk_06.py @@ -1,26 +1,21 @@ #coding:utf-8 -# -# id: functional.fkey.primary.insert_pk_06 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master transaction modifies primary key and committed. -# Detail transaction inserts record in detail_table. -# Expected: no errors -# tracker_id: -# min_versions: [] -# versions: 2.5 -# qmid: functional.fkey.primary.ins_06 + +""" +ID: fkey.primary.insert-06 +FBTEST: functional.fkey.primary.insert_pk_06 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master transaction modifies primary key and committed. + Detail transaction inserts record in detail_table. + Expected: no errors +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import tpb, Isolation -# version: 2.5 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, INT_F INTEGER ); @@ -35,50 +30,13 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, INT_F) VALUES (1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("UPDATE MASTER_TABLE SET ID=2 WHERE ID=1") -# db_conn.commit() -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("UPDATE MASTER_TABLE SET INT_F=10") -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# c = con_detail.cursor() -# c.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,2)") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: @@ -87,7 +45,7 @@ def test_1(act_1: Action): con.begin(cust_tpb) c.execute("UPDATE MASTER_TABLE SET INT_F=10") #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,2)") diff --git a/tests/functional/fkey/primary/test_insert_pk_07.py b/tests/functional/fkey/primary/test_insert_pk_07.py index e26eb970..cc4bcdd5 100644 --- a/tests/functional/fkey/primary/test_insert_pk_07.py +++ b/tests/functional/fkey/primary/test_insert_pk_07.py @@ -1,28 +1,25 @@ #coding:utf-8 -# -# id: functional.fkey.primary.insert_pk_07 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master table has primary key consisting of several fields. -# Master transaction modifies non key fields. -# Detail transaction inserts record in detail_table. -# Expected: no errors. -# Related to: CORE-1606. Ability to insert child record if parent record is locked but foreign key target unchanged. -# tracker_id: -# min_versions: [] -# versions: 2.5 -# qmid: functional.fkey.primary.ins_07 + +""" +ID: fkey.primary.insert-07 +FBTEST: functional.fkey.primary.insert_pk_07 +ISSUE: 2027 +JIRA: CORE-1606 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master table has primary key consisting of several fields. + Master transaction modifies non key fields. + Detail transaction inserts record in detail_table. + Expected: no errors. + Related to #2027. Ability to insert child record if parent record is locked but foreign key target unchanged. +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import tpb, Isolation -# version: 2.5 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID_1 INTEGER NOT NULL, ID_2 VARCHAR(20) NOT NULL, INT_F INTEGER, @@ -40,51 +37,19 @@ COMMIT; INSERT INTO MASTER_TABLE (ID_1, ID_2, INT_F) VALUES (1, 'one', 10); COMMIT;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# db_conn.begin(tpb=TPB_master) -# cm_1 = db_conn.cursor() -# cm_1.execute('UPDATE MASTER_TABLE SET INT_F=2') -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# cd = con_detail.cursor() -# cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY_1, FKEY_2) VALUES (1, 1, 'one')") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: c.execute('UPDATE MASTER_TABLE SET INT_F=2') #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY_1, FKEY_2) VALUES (1, 1, 'one')") diff --git a/tests/functional/fkey/primary/test_insert_pk_08.py b/tests/functional/fkey/primary/test_insert_pk_08.py index dae5883a..7c37d6e0 100644 --- a/tests/functional/fkey/primary/test_insert_pk_08.py +++ b/tests/functional/fkey/primary/test_insert_pk_08.py @@ -1,27 +1,22 @@ #coding:utf-8 -# -# id: functional.fkey.primary.insert_pk_08 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master table has primary key consisting of several fields. -# Master transaction modifies one key field. -# Detail transaction inserts record in detail_table. -# Expected: error - primary key in master_table has been changed. -# tracker_id: -# min_versions: [] -# versions: 2.5.3 -# qmid: functional.fkey.primary.ins_08 + +""" +ID: fkey.primary.insert-08 +FBTEST: functional.fkey.primary.insert_pk_08 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master table has primary key consisting of several fields. + Master transaction modifies one key field. + Detail transaction inserts record in detail_table. + Expected: error - primary key in master_table has been changed. +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import DatabaseError, tpb, Isolation -# version: 2.5.3 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID_1 INTEGER NOT NULL, ID_2 VARCHAR(20) NOT NULL, INT_F INTEGER, @@ -39,51 +34,19 @@ COMMIT; INSERT INTO MASTER_TABLE (ID_1, ID_2, INT_F) VALUES (1, 'one', 10); COMMIT;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# db_conn.begin(tpb=TPB_master) -# cm_1 = db_conn.cursor() -# cm_1.execute('UPDATE MASTER_TABLE SET ID_1=2 WHERE ID_1=1') -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# cd = con_detail.cursor() -# cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY_1, FKEY_2) VALUES (1, 1, 'one')") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.5.3') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: c.execute('UPDATE MASTER_TABLE SET ID_1=2 WHERE ID_1=1') #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: with pytest.raises(DatabaseError, diff --git a/tests/functional/fkey/primary/test_insert_pk_09.py b/tests/functional/fkey/primary/test_insert_pk_09.py index e3712108..80f82f9a 100644 --- a/tests/functional/fkey/primary/test_insert_pk_09.py +++ b/tests/functional/fkey/primary/test_insert_pk_09.py @@ -1,27 +1,22 @@ #coding:utf-8 -# -# id: functional.fkey.primary.insert_pk_09 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master table has primary key consisting of several fields. -# Master transaction modifies all primary key fields. -# Detail transaction inserts record in detail_table. -# Expected: error - primary in master_table has been changed. -# tracker_id: -# min_versions: [] -# versions: 2.5.3 -# qmid: functional.fkey.primary.ins_09 + +""" +ID: fkey.primary.insert-09 +FBTEST: functional.fkey.primary.insert_pk_09 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master table has primary key consisting of several fields. + Master transaction modifies all primary key fields. + Detail transaction inserts record in detail_table. + Expected: error - primary in master_table has been changed. +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import DatabaseError, tpb, Isolation -# version: 2.5.3 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID_1 INTEGER NOT NULL, ID_2 VARCHAR(20) NOT NULL, INT_F INTEGER, @@ -39,53 +34,20 @@ COMMIT; INSERT INTO MASTER_TABLE (ID_1, ID_2, INT_F) VALUES (1, 'one', 10); COMMIT;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# db_conn.begin(tpb=TPB_master) -# cm_1 = db_conn.cursor() -# cm_1.execute("UPDATE MASTER_TABLE SET ID_1=2 WHERE ID_1=1") -# cm_1.execute("UPDATE MASTER_TABLE SET ID_2='two' WHERE ID_2='one'") -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# cd = con_detail.cursor() -# cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY_1, FKEY_2) VALUES (1, 1, 'one')") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.5.3') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: c.execute("UPDATE MASTER_TABLE SET ID_1=2 WHERE ID_1=1") c.execute("UPDATE MASTER_TABLE SET ID_2='two' WHERE ID_2='one'") #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: with pytest.raises(DatabaseError, diff --git a/tests/functional/fkey/primary/test_insert_pk_10.py b/tests/functional/fkey/primary/test_insert_pk_10.py index 02849760..b402c0e5 100644 --- a/tests/functional/fkey/primary/test_insert_pk_10.py +++ b/tests/functional/fkey/primary/test_insert_pk_10.py @@ -1,27 +1,22 @@ #coding:utf-8 -# -# id: functional.fkey.primary.insert_pk_10 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master table has primary key consisting of several fields. -# Master transaction modifies one field of primary key and committed. -# Detail transaction inserts record in detail_table. -# Expected: no errors -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.fkey.primary.ins_10 + +""" +ID: fkey.primary.insert-10 +FBTEST: functional.fkey.primary.insert_pk_10 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master table has primary key consisting of several fields. + Master transaction modifies one field of primary key and committed. + Detail transaction inserts record in detail_table. + Expected: no errors +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import tpb, Isolation -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID_1 INTEGER NOT NULL, ID_2 VARCHAR(20) NOT NULL, INT_F INTEGER, @@ -39,53 +34,20 @@ COMMIT; INSERT INTO MASTER_TABLE (ID_1, ID_2, INT_F) VALUES (1, 'one', 10); COMMIT;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# db_conn.begin(tpb=TPB_master) -# cm_1 = db_conn.cursor() -# cm_1.execute('UPDATE MASTER_TABLE SET ID_1=2 WHERE ID_1=1') -# db_conn.commit(); -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# cd = con_detail.cursor() -# cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY_1, FKEY_2) VALUES (1, 2, 'one')") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: c.execute('UPDATE MASTER_TABLE SET ID_1=2 WHERE ID_1=1') con.commit() #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY_1, FKEY_2) VALUES (1, 2, 'one')") diff --git a/tests/functional/fkey/primary/test_insert_pk_11.py b/tests/functional/fkey/primary/test_insert_pk_11.py index d298da9b..108e6d12 100644 --- a/tests/functional/fkey/primary/test_insert_pk_11.py +++ b/tests/functional/fkey/primary/test_insert_pk_11.py @@ -1,27 +1,22 @@ #coding:utf-8 -# -# id: functional.fkey.primary.insert_pk_11 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master table has primary key consisting of several fields. -# Master transaction modifies all primary key fields and committed. -# Detail transaction inserts record in detail_table. -# Expected: no errors. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.fkey.primary.ins_11 + +""" +ID: fkey.primary.insert-11 +FBTEST: functional.fkey.primary.insert_pk_11 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master table has primary key consisting of several fields. + Master transaction modifies all primary key fields and committed. + Detail transaction inserts record in detail_table. + Expected: no errors. +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import tpb, Isolation -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID_1 INTEGER NOT NULL, ID_2 VARCHAR(20) NOT NULL, INT_F INTEGER, @@ -39,46 +34,13 @@ COMMIT; INSERT INTO MASTER_TABLE (ID_1, ID_2, INT_F) VALUES (1, 'one', 10); COMMIT;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# db_conn.begin(tpb=TPB_master) -# cm_1 = db_conn.cursor() -# cm_1.execute("UPDATE MASTER_TABLE SET ID_1=2 WHERE ID_1=1") -# cm_1.execute("UPDATE MASTER_TABLE SET ID_2='two' WHERE ID_2='one'") -# db_conn.commit(); -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# cd = con_detail.cursor() -# cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY_1, FKEY_2) VALUES (1, 2, 'two')") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: @@ -86,7 +48,7 @@ def test_1(act_1: Action): c.execute("UPDATE MASTER_TABLE SET ID_2='two' WHERE ID_2='one'") con.commit() #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY_1, FKEY_2) VALUES (1, 2, 'two')") diff --git a/tests/functional/fkey/primary/test_insert_pk_12.py b/tests/functional/fkey/primary/test_insert_pk_12.py index 986e01ed..571ac7c0 100644 --- a/tests/functional/fkey/primary/test_insert_pk_12.py +++ b/tests/functional/fkey/primary/test_insert_pk_12.py @@ -1,27 +1,22 @@ #coding:utf-8 -# -# id: functional.fkey.primary.insert_pk_12 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master table has primary key consisting of several fields. -# Master transaction modifies all primary key fields and committed. -# Detail transaction inserts record in detail_table. -# Expected: no errors. -# tracker_id: -# min_versions: [] -# versions: 2.5 -# qmid: functional.fkey.primary.ins_12 + +""" +ID: fkey.primary.insert-12 +FBTEST: functional.fkey.primary.insert_pk_12 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master table has primary key consisting of several fields. + Master transaction modifies all primary key fields and committed. + Detail transaction inserts record in detail_table. + Expected: no errors. +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import tpb, Isolation -# version: 2.5 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID_1 INTEGER NOT NULL, ID_2 VARCHAR(20) NOT NULL, INT_F INTEGER, @@ -39,50 +34,13 @@ COMMIT; INSERT INTO MASTER_TABLE (ID_1, ID_2, INT_F) VALUES (1, 'one', 10); COMMIT;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# db_conn.begin(tpb=TPB_master) -# cm_1 = db_conn.cursor() -# cm_1.execute("UPDATE MASTER_TABLE SET ID_1=2 WHERE ID_1=1") -# cm_1.execute("UPDATE MASTER_TABLE SET ID_2='two' WHERE ID_2='one'") -# db_conn.commit(); -# db_conn.begin(tpb=TPB_master) -# cm_1 = db_conn.cursor() -# cm_1.execute("UPDATE MASTER_TABLE SET INT_F=2") -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# cd = con_detail.cursor() -# cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY_1, FKEY_2) VALUES (1, 2, 'two')") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: @@ -92,7 +50,7 @@ def test_1(act_1: Action): con.begin(cust_tpb) c.execute("UPDATE MASTER_TABLE SET INT_F=2") #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY_1, FKEY_2) VALUES (1, 2, 'two')") diff --git a/tests/functional/fkey/primary/test_insert_pk_13.py b/tests/functional/fkey/primary/test_insert_pk_13.py index bfad05f6..9f01e387 100644 --- a/tests/functional/fkey/primary/test_insert_pk_13.py +++ b/tests/functional/fkey/primary/test_insert_pk_13.py @@ -1,25 +1,20 @@ #coding:utf-8 -# -# id: functional.fkey.primary.insert_pk_13 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master transaction deletes record from master_table without commit. -# Detail transaction inserts record in detail_table -# Expected: error primary key field in master_table has been changed. -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.fkey.primary.ins_13 + +""" +ID: fkey.primary.insert-13 +FBTEST: functional.fkey.primary.insert_pk_13 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master transaction deletes record from master_table without commit. + Detail transaction inserts record in detail_table + Expected: error primary key field in master_table has been changed. +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None - -substitutions_1 = [('-At block line: [\\d]+, col: [\\d]+', '-At block line')] - -init_script_1 = """ +init_script = """ recreate table t_detl(id int); commit; recreate table t_main( @@ -35,9 +30,9 @@ init_script_1 = """ commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ +test_script = """ commit; set transaction no wait; set term ^; @@ -51,9 +46,10 @@ test_script_1 = """ set term ;^ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('-At block line: [\\d]+, col: [\\d]+', + '-At block line')]) -expected_stderr_1 = """ +expected_stderr = """ Statement failed, SQLSTATE = 23000 violation of FOREIGN KEY constraint "FK_TDETL_TMAIN" on table "T_DETL" -Foreign key reference target does not exist @@ -62,8 +58,7 @@ expected_stderr_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/fkey/primary/test_insert_pk_14.py b/tests/functional/fkey/primary/test_insert_pk_14.py index 8a8b5159..3efb1b12 100644 --- a/tests/functional/fkey/primary/test_insert_pk_14.py +++ b/tests/functional/fkey/primary/test_insert_pk_14.py @@ -1,26 +1,21 @@ #coding:utf-8 -# -# id: functional.fkey.primary.insert_pk_14 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master transaction deletes record from master_table and commit. -# Detail transaction inserts record in detail_table. -# Expected: referential integrity error. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.fkey.primary.ins_14 + +""" +ID: fkey.primary.insert-14 +FBTEST: functional.fkey.primary.insert_pk_14 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master transaction doesn't modify primary key. + Detail transaction inserts record in detail_table. + Expected: no errors +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import DatabaseError, tpb, Isolation -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, INT_F INTEGER ); @@ -35,53 +30,20 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, INT_F) VALUES (1, 10); COMMIT;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# db_conn.begin(tpb=TPB_master) -# cm_1 = db_conn.cursor() -# cm_1.execute('DELETE FROM MASTER_TABLE WHERE ID=1') -# db_conn.commit() -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# cd = con_detail.cursor() -# cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: c.execute('DELETE FROM MASTER_TABLE WHERE ID=1') con.commit() #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: with pytest.raises(DatabaseError, diff --git a/tests/functional/fkey/primary/test_insert_pk_15.py b/tests/functional/fkey/primary/test_insert_pk_15.py index 0a4cdb7b..3005371b 100644 --- a/tests/functional/fkey/primary/test_insert_pk_15.py +++ b/tests/functional/fkey/primary/test_insert_pk_15.py @@ -1,25 +1,20 @@ #coding:utf-8 -# -# id: functional.fkey.primary.insert_pk_15 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master transaction inserts record into master_table but DOES NOT commit. -# Detail transaction inserts record in detail_table and tries to COMMIT. -# Expected: referential integrity error. -# tracker_id: -# min_versions: ['2.5'] -# versions: 3.0 -# qmid: functional.fkey.primary.ins_15 + +""" +ID: fkey.primary.insert-15 +FBTEST: functional.fkey.primary.insert_pk_15 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master transaction inserts record into master_table but DOES NOT commit. + Detail transaction inserts record in detail_table and tries to COMMIT. + Expected: referential integrity error. +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None - -substitutions_1 = [('-At block line: [\\d]+, col: [\\d]+', '-At block line')] - -init_script_1 = """ +init_script = """ recreate table t_detl(id int); commit; recreate table t_main( @@ -33,9 +28,9 @@ init_script_1 = """ commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ +test_script = """ commit; set transaction no wait; set term ^; @@ -49,9 +44,10 @@ test_script_1 = """ set term ;^ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('-At block line: [\\d]+, col: [\\d]+', + '-At block line')]) -expected_stderr_1 = """ +expected_stderr = """ Statement failed, SQLSTATE = 23000 violation of FOREIGN KEY constraint "FK_TDETL_TMAIN" on table "T_DETL" -Foreign key reference target does not exist @@ -60,8 +56,7 @@ expected_stderr_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/fkey/primary/test_insert_pk_16.py b/tests/functional/fkey/primary/test_insert_pk_16.py index e0fcd2d6..a7b124c2 100644 --- a/tests/functional/fkey/primary/test_insert_pk_16.py +++ b/tests/functional/fkey/primary/test_insert_pk_16.py @@ -1,26 +1,21 @@ #coding:utf-8 -# -# id: functional.fkey.primary.insert_pk_16 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master transaction inserts record into master_table and commit. -# Detail transaction inserts record in detail_table. -# Expected: no errors. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.fkey.primary.ins_16 + +""" +ID: fkey.primary.insert-16 +FBTEST: functional.fkey.primary.insert_pk_16 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master transaction inserts record into master_table and commit. + Detail transaction inserts record in detail_table. + Expected: no errors. +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import tpb, Isolation -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, INT_F INTEGER ); @@ -33,53 +28,20 @@ CREATE TABLE DETAIL_TABLE ( ALTER TABLE DETAIL_TABLE ADD CONSTRAINT FK_DETAIL_TABLE FOREIGN KEY (FKEY) REFERENCES MASTER_TABLE (ID); COMMIT;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# db_conn.begin(tpb=TPB_master) -# cm_1 = db_conn.cursor() -# cm_1.execute('INSERT INTO MASTER_TABLE (ID, INT_F) VALUES (1, 10)') -# db_conn.commit() -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# cd = con_detail.cursor() -# cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: c.execute('INSERT INTO MASTER_TABLE (ID, INT_F) VALUES (1, 10)') con.commit() #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") diff --git a/tests/functional/fkey/primary/test_insert_pk_17.py b/tests/functional/fkey/primary/test_insert_pk_17.py index 89257ae9..7c608457 100644 --- a/tests/functional/fkey/primary/test_insert_pk_17.py +++ b/tests/functional/fkey/primary/test_insert_pk_17.py @@ -1,27 +1,22 @@ #coding:utf-8 -# -# id: functional.fkey.primary.insert_pk_17 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# 1 Master transaction modifies non key field. -# 2 Detail transaction inserts record. -# 3 Master transaction modifies primary key. -# Expected: referential integrity error -# tracker_id: -# min_versions: [] -# versions: 2.5.3 -# qmid: functional.fkey.primary.ins_17 + +""" +ID: fkey.primary.insert-17 +FBTEST: functional.fkey.primary.insert_pk_17 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + 1 Master transaction modifies non key field. + 2 Detail transaction inserts record. + 3 Master transaction modifies primary key. + Expected: referential integrity error +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import DatabaseError, tpb, Isolation -# version: 2.5.3 -# resources: None - -substitutions_1 = [('lock.*', '')] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, INT_F INTEGER ); @@ -36,56 +31,19 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, INT_F) VALUES (1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("update master_table set int_f = 10 WHERE ID=1") -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# try: -# con_detail.begin(tpb=TPB_detail) -# c = con_detail.cursor() -# c.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") -# except Exception, e: -# print (e[1]) -# -# try: -# c = db_conn.cursor() -# c.execute("update master_table set ID=10 WHERE ID=1") -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.5.3') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: c.execute("update master_table set int_f = 10 WHERE ID=1") #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") diff --git a/tests/functional/fkey/primary/test_insert_pk_18.py b/tests/functional/fkey/primary/test_insert_pk_18.py index 2a06173b..59947ae8 100644 --- a/tests/functional/fkey/primary/test_insert_pk_18.py +++ b/tests/functional/fkey/primary/test_insert_pk_18.py @@ -1,28 +1,23 @@ #coding:utf-8 -# -# id: functional.fkey.primary.insert_pk_18 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# 1 Master transaction modifies non key field. -# 2 Detail transaction add record. -# 3 Detail transaction commited -# 4 Master transaction modifies primary key. -# Expected: referential integrity error -# tracker_id: -# min_versions: [] -# versions: 2.5.3 -# qmid: functional.fkey.primary.ins_18 + +""" +ID: fkey.primary.insert-18 +FBTEST: functional.fkey.primary.insert_pk_18 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + 1 Master transaction modifies non key field. + 2 Detail transaction add record. + 3 Detail transaction commited + 4 Master transaction modifies primary key. + Expected: referential integrity error +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import DatabaseError, tpb, Isolation -# version: 2.5.3 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, INT_F INTEGER ); @@ -37,57 +32,19 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, INT_F) VALUES (1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("update master_table set int_f = 10 WHERE ID=1") -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# try: -# con_detail.begin(tpb=TPB_detail) -# c = con_detail.cursor() -# c.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") -# con_detail.commit() -# except Exception, e: -# print (e[1]) -# -# try: -# c = db_conn.cursor() -# c.execute("update master_table set ID=10 WHERE ID=1") -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.5.3') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: c.execute("update master_table set int_f = 10 WHERE ID=1") #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") diff --git a/tests/functional/fkey/primary/test_insert_pk_19.py b/tests/functional/fkey/primary/test_insert_pk_19.py index 14e0c6c9..9e2994a0 100644 --- a/tests/functional/fkey/primary/test_insert_pk_19.py +++ b/tests/functional/fkey/primary/test_insert_pk_19.py @@ -1,30 +1,25 @@ #coding:utf-8 -# -# id: functional.fkey.primary.insert_pk_19 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master transaction: -# 1) modified non key field -# 2) create savepoint -# 3) delete record from master_table -# 4) rollback to savepoint -# Detail transaction insert in detail_table record. -# Expected: no errors -# tracker_id: -# min_versions: [] -# versions: 2.5 -# qmid: functional.fkey.primary.ins_19 + +""" +ID: fkey.primary.insert-19 +FBTEST: functional.fkey.primary.insert_pk_19 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master transaction: + 1) modified non key field + 2) create savepoint + 3) delete record from master_table + 4) rollback to savepoint + Detail transaction insert in detail_table record. + Expected: no errors +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import tpb, Isolation -# version: 2.5 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, INT_F INTEGER ); @@ -39,49 +34,13 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, INT_F) VALUES (1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# db_conn.begin(tpb=TPB_master) -# cm_1 = db_conn.cursor() -# cm_1.execute('UPDATE MASTER_TABLE SET INT_F=2') -# db_conn.savepoint('A') -# cm_1.execute('DELETE FROM MASTER_TABLE WHERE ID=1') -# db_conn.rollback(savepoint='A') -# -# #Create second connection - update detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# cd = con_detail.cursor() -# cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: @@ -90,7 +49,7 @@ def test_1(act_1: Action): c.execute('DELETE FROM MASTER_TABLE WHERE ID=1') con.rollback(savepoint='A') #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") diff --git a/tests/functional/fkey/primary/test_select_pk_01.py b/tests/functional/fkey/primary/test_select_pk_01.py index 2a3724f5..d947fbc0 100644 --- a/tests/functional/fkey/primary/test_select_pk_01.py +++ b/tests/functional/fkey/primary/test_select_pk_01.py @@ -1,26 +1,21 @@ #coding:utf-8 -# -# id: functional.fkey.primary.select_pk_01 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master transaction is perform select with lock and fetch record. -# Detail transaction inserts record in detail_table. -# Expected: no errors. -# tracker_id: -# min_versions: [] -# versions: 2.5 -# qmid: functional.fkey.primary.select_01 + +""" +ID: fkey.primary.select-01 +FBTEST: functional.fkey.primary.select_pk_01 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master transaction is perform select with lock and fetch record. + Detail transaction inserts record in detail_table. + Expected: no errors. +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import tpb, Isolation -# version: 2.5 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, INT_F INTEGER ); @@ -35,53 +30,20 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, INT_F) VALUES (1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("SELECT INT_F FROM MASTER_TABLE WHERE ID=1 WITH LOCK") -# c.fetchall() -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# c = con_detail.cursor() -# c.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: c.execute("SELECT INT_F FROM MASTER_TABLE WHERE ID=1 WITH LOCK").fetchall() con.commit() #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") diff --git a/tests/functional/fkey/primary/test_select_pk_02.py b/tests/functional/fkey/primary/test_select_pk_02.py index 2abd865f..34e87038 100644 --- a/tests/functional/fkey/primary/test_select_pk_02.py +++ b/tests/functional/fkey/primary/test_select_pk_02.py @@ -1,26 +1,21 @@ #coding:utf-8 -# -# id: functional.fkey.primary.select_pk_02 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master transaction is perform select with lock but not perform fetch. -# Detail transaction inserts record in detail_table and commit; -# Master transaction fetched record and trying update it; -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.fkey.primary.select_02 + +""" +ID: fkey.primary.select-02 +FBTEST: functional.fkey.primary.select_pk_02 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master transaction is perform select with lock but not perform fetch. + Detail transaction inserts record in detail_table and commit; + Master transaction fetched record and trying update it; +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import DatabaseError, tpb, Isolation -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, INT_F INTEGER ); @@ -35,59 +30,19 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, INT_F) VALUES (1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("SELECT INT_F FROM MASTER_TABLE WHERE ID=1 WITH LOCK") -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# cd = con_detail.cursor() -# cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -# -# try: -# c.fetchall() -# c.execute("UPDATE MASTER_TABLE SET ID=2") -# db_conn.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: c.execute("SELECT INT_F FROM MASTER_TABLE WHERE ID=1 WITH LOCK") #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") diff --git a/tests/functional/fkey/primary/test_upd_pk_01.py b/tests/functional/fkey/primary/test_upd_pk_01.py index 667b5045..67ec036f 100644 --- a/tests/functional/fkey/primary/test_upd_pk_01.py +++ b/tests/functional/fkey/primary/test_upd_pk_01.py @@ -1,26 +1,21 @@ #coding:utf-8 -# -# id: functional.fkey.primary.upd_pk_01 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master table has one key field. Master transaction doesn't modify key field. -# Detail transaction updates record in detail_table record. -# Expected: no errors -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.fkey.primary.upd_01 + +""" +ID: fkey.primary.update-01 +FBTEST: functional.fkey.primary.upd_pk_01 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master table has one key field. Master transaction doesn't modify key field. + Detail transaction updates record in detail_table record. + Expected: no errors +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import tpb, Isolation -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, INT_F INTEGER ); @@ -36,52 +31,19 @@ INSERT INTO MASTER_TABLE (ID, INT_F) VALUES (1, 10); INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1, 1); COMMIT;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("update master_table set int_f = 10") -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# c = con_detail.cursor() -# c.execute("UPDATE DETAIL_TABLE SET ID=2 WHERE ID=1") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: c.execute("update master_table set int_f = 10") #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("UPDATE DETAIL_TABLE SET ID=2 WHERE ID=1") diff --git a/tests/functional/fkey/primary/test_upd_pk_02.py b/tests/functional/fkey/primary/test_upd_pk_02.py index 1a95292e..e0297475 100644 --- a/tests/functional/fkey/primary/test_upd_pk_02.py +++ b/tests/functional/fkey/primary/test_upd_pk_02.py @@ -1,26 +1,21 @@ #coding:utf-8 -# -# id: functional.fkey.primary.upd_pk_02 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master transaction modifies primary key and commited -# Detail transaction updates record in detail_table. -# Expected: no errors -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.fkey.primary.upd_02 + +""" +ID: fkey.primary.update-02 +FBTEST: functional.fkey.primary.upd_pk_02 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master transaction modifies primary key and commited + Detail transaction updates record in detail_table. + Expected: no errors +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import tpb, Isolation -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, INT_F INTEGER ); @@ -35,50 +30,13 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, INT_F) VALUES (1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("UPDATE MASTER_TABLE SET ID=2 WHERE ID=1") -# db_conn.commit() -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("UPDATE MASTER_TABLE SET INT_F=10") -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# c = con_detail.cursor() -# c.execute("UPDATE DETAIL_TABLE SET FKEY = 2") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: @@ -87,7 +45,7 @@ def test_1(act_1: Action): con.begin(cust_tpb) c.execute("UPDATE MASTER_TABLE SET INT_F=10") #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("UPDATE DETAIL_TABLE SET FKEY = 2") diff --git a/tests/functional/fkey/unique/test_insert_01.py b/tests/functional/fkey/unique/test_insert_01.py index 84f824d2..d75c02f0 100644 --- a/tests/functional/fkey/unique/test_insert_01.py +++ b/tests/functional/fkey/unique/test_insert_01.py @@ -1,26 +1,21 @@ #coding:utf-8 -# -# id: functional.fkey.unique.insert_01 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master table has one primary key field and one unique field. Master transaction doesn't modify primary key or unique field. -# Detail transaction inserts record in detail_table. -# Expected: no errors -# tracker_id: -# min_versions: [] -# versions: 2.5 -# qmid: functional.fkey.unique.ins_01 + +""" +ID: fkey.unique.insert-01 +FBTEST: functional.fkey.unique.insert_01 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master table has one primary key field and one unique field. Master transaction doesn't modify primary key or unique field. + Detail transaction inserts record in detail_table. + Expected: no errors +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import tpb, Isolation -# version: 2.5 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, UF INTEGER UNIQUE, INT_F INTEGER @@ -36,52 +31,19 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, UF, INT_F) VALUES (1, 1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("update master_table set int_f = 10 WHERE ID=1") -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# c = con_detail.cursor() -# c.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: c.execute("update master_table set int_f = 10 WHERE ID=1") #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") diff --git a/tests/functional/fkey/unique/test_insert_02.py b/tests/functional/fkey/unique/test_insert_02.py index 38a020d9..20d05709 100644 --- a/tests/functional/fkey/unique/test_insert_02.py +++ b/tests/functional/fkey/unique/test_insert_02.py @@ -1,26 +1,21 @@ #coding:utf-8 -# -# id: functional.fkey.unique.insert_02 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master table has one primary key field and one unique field. Master transaction modifies unique field -# Detail transaction inserts record in detail_table. -# Expected: error - unique field in master table has been changed -# tracker_id: -# min_versions: [] -# versions: 2.5.3 -# qmid: functional.fkey.unique.ins_02 + +""" +ID: fkey.unique.insert-02 +FBTEST: functional.fkey.unique.insert_02 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master table has one primary key field and one unique field. Master transaction modifies unique field + Detail transaction inserts record in detail_table. + Expected: error - unique field in master table has been changed +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import DatabaseError, tpb, Isolation -# version: 2.5.3 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, UF INTEGER UNIQUE, INT_F INTEGER @@ -36,52 +31,19 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, UF, INT_F) VALUES (1, 1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("UPDATE MASTER_TABLE SET UF=2 WHERE ID=1") -# -# #Create second connection - update detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# c = con_detail.cursor() -# c.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,2)") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.5.3') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: c.execute("UPDATE MASTER_TABLE SET UF=2 WHERE ID=1") #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: with pytest.raises(DatabaseError, diff --git a/tests/functional/fkey/unique/test_insert_03.py b/tests/functional/fkey/unique/test_insert_03.py index 1f4d3cec..fcbba28c 100644 --- a/tests/functional/fkey/unique/test_insert_03.py +++ b/tests/functional/fkey/unique/test_insert_03.py @@ -1,31 +1,26 @@ #coding:utf-8 -# -# id: functional.fkey.unique.insert_03 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master table has one primary key field and one unique field. -# Master transaction: -# 1) modified non key field -# 2) create savepoint -# 3) modified unique field -# 4) rollback to savepoint -# Detail transaction inserts record in detail_table. -# Expected: Error - unique field was changed -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.fkey.unique.ins_03 + +""" +ID: fkey.unique.insert-03 +FBTEST: functional.fkey.unique.insert_03 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master table has one primary key field and one unique field. + Master transaction: + 1) modified non key field + 2) create savepoint + 3) modified unique field + 4) rollback to savepoint + Detail transaction inserts record in detail_table. + Expected: Error - unique field was changed +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import DatabaseError, tpb, Isolation -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, UF INTEGER UNIQUE, INT_F INTEGER @@ -41,50 +36,13 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, UF, INT_F) VALUES (1, 1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# -# db_conn.begin(tpb=TPB_master) -# cm_1 = db_conn.cursor() -# cm_1.execute('UPDATE MASTER_TABLE SET INT_F=2') -# db_conn.savepoint('A') -# cm_1.execute('UPDATE MASTER_TABLE SET UF=2 WHERE ID=1') -# db_conn.rollback(savepoint='A') -# -# #Create second connection - update detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# cd = con_detail.cursor() -# cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,2)") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: @@ -93,7 +51,7 @@ def test_1(act_1: Action): c.execute('UPDATE MASTER_TABLE SET UF=2 WHERE ID=1') con.rollback(savepoint='A') #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: with pytest.raises(DatabaseError, diff --git a/tests/functional/fkey/unique/test_insert_04.py b/tests/functional/fkey/unique/test_insert_04.py index b8cbc55b..70ab03c3 100644 --- a/tests/functional/fkey/unique/test_insert_04.py +++ b/tests/functional/fkey/unique/test_insert_04.py @@ -1,31 +1,26 @@ #coding:utf-8 -# -# id: functional.fkey.unique.insert_04 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master table has one primary key field and one unique field. -# Master transaction: -# 1) modified unique field -# 2) create savepoint -# 3) modified non key and non unique field -# 4) rollback to savepoint -# Detail transaction inserts record in detail_table record -# Expected: error - unique field in master_table has been changed -# tracker_id: -# min_versions: [] -# versions: 2.5.3 -# qmid: functional.fkey.unique.ins_04 + +""" +ID: fkey.unique.insert-04 +FBTEST: functional.fkey.unique.insert_04 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master table has one primary key field and one unique field. + Master transaction: + 1) modified unique field + 2) create savepoint + 3) modified non key and non unique field + 4) rollback to savepoint + Detail transaction inserts record in detail_table record + Expected: error - unique field in master_table has been changed +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import DatabaseError, tpb, Isolation -# version: 2.5.3 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, UF INTEGER UNIQUE, INT_F INTEGER @@ -41,50 +36,13 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, UF, INT_F) VALUES (1, 1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# -# db_conn.begin(tpb=TPB_master) -# cm_1 = db_conn.cursor() -# cm_1.execute('UPDATE MASTER_TABLE SET UF=2 WHERE ID=1') -# db_conn.savepoint('A') -# cm_1.execute('UPDATE MASTER_TABLE SET INT_F=2 ') -# db_conn.rollback(savepoint='A') -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# cd = con_detail.cursor() -# cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,2)") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.5.3') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: @@ -93,7 +51,7 @@ def test_1(act_1: Action): c.execute('UPDATE MASTER_TABLE SET INT_F=2 ') con.rollback(savepoint='A') #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: with pytest.raises(DatabaseError, diff --git a/tests/functional/fkey/unique/test_insert_05.py b/tests/functional/fkey/unique/test_insert_05.py index 1bfc36f1..2134d774 100644 --- a/tests/functional/fkey/unique/test_insert_05.py +++ b/tests/functional/fkey/unique/test_insert_05.py @@ -1,26 +1,21 @@ #coding:utf-8 -# -# id: functional.fkey.unique.insert_05 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master table has one primary key field and one unique field. Master transaction modifies unique field and commit. -# Detail transaction inserts record in detail_table. -# Expected: no errors -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.fkey.unique.ins_05 + +""" +ID: fkey.unique.insert-05 +FBTEST: functional.fkey.unique.insert_05 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master table has one primary key field and one unique field. Master transaction modifies unique field and commit. + Detail transaction inserts record in detail_table. + Expected: no errors +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import tpb, Isolation -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, UF INTEGER UNIQUE, INT_F INTEGER @@ -36,54 +31,20 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, UF, INT_F) VALUES (1, 1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("UPDATE MASTER_TABLE SET UF=2 WHERE ID=1") -# db_conn.commit() -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# c = con_detail.cursor() -# c.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,2)") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: c.execute("UPDATE MASTER_TABLE SET UF=2 WHERE ID=1") con.commit() #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,2)") diff --git a/tests/functional/fkey/unique/test_insert_06.py b/tests/functional/fkey/unique/test_insert_06.py index fc94d348..6c95ff62 100644 --- a/tests/functional/fkey/unique/test_insert_06.py +++ b/tests/functional/fkey/unique/test_insert_06.py @@ -1,26 +1,21 @@ #coding:utf-8 -# -# id: functional.fkey.unique.insert_06 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master table has one primary key field and one unique field. Master transaction doesn't modifiy unique field and commit -# Detail transaction inserts record in detail_table. -# Expected: no errors -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.fkey.unique.ins_06 + +""" +ID: fkey.unique.insert-06 +FBTEST: functional.fkey.unique.insert_06 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master table has one primary key field and one unique field. Master transaction doesn't modifiy unique field and commit + Detail transaction inserts record in detail_table. + Expected: no errors +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import tpb, Isolation -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, UF INTEGER UNIQUE, INT_F INTEGER @@ -36,50 +31,13 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, UF, INT_F) VALUES (1, 1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("UPDATE MASTER_TABLE SET UF=null WHERE ID=1") -# db_conn.commit() -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("UPDATE MASTER_TABLE SET INT_F=10") -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# c = con_detail.cursor() -# c.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,null)") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: @@ -88,7 +46,7 @@ def test_1(act_1: Action): con.begin(cust_tpb) c.execute("UPDATE MASTER_TABLE SET INT_F=10") #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,null)") diff --git a/tests/functional/fkey/unique/test_insert_07.py b/tests/functional/fkey/unique/test_insert_07.py index ad3a06e9..443911e8 100644 --- a/tests/functional/fkey/unique/test_insert_07.py +++ b/tests/functional/fkey/unique/test_insert_07.py @@ -1,26 +1,21 @@ #coding:utf-8 -# -# id: functional.fkey.unique.insert_07 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master table has primary key consisting of several fields. -# Master transaction deletes record from master_table without commit. -# Detail transaction inserts record in detail_table. -# Expected: error - unique field in master_table has been changed. -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.fkey.unique.ins_07 + +""" +ID: fkey.unique.insert-07 +FBTEST: functional.fkey.unique.insert_07 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master table has primary key consisting of several fields. + Master transaction deletes record from master_table without commit. + Detail transaction inserts record in detail_table. + Expected: error - unique field in master_table has been changed. +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None - -substitutions_1 = [('-At block line: [\\d]+, col: [\\d]+', '-At block line')] - -init_script_1 = """ +init_script = """ recreate table t_detl(id int); commit; recreate table t_main( @@ -33,14 +28,14 @@ init_script_1 = """ master_uniq_ref int constraint t_detl_fk_mur references t_main(uniq_ref) using index t_detl_fk_mur ); commit; - + insert into t_main(id, uniq_ref) values(1, 1); commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ +test_script = """ commit; set transaction read committed record_version no wait; set term ^; @@ -54,9 +49,10 @@ test_script_1 = """ set term ;^ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('-At block line: [\\d]+, col: [\\d]+', + '-At block line')]) -expected_stderr_1 = """ +expected_stderr = """ Statement failed, SQLSTATE = 23000 violation of FOREIGN KEY constraint "T_DETL_FK_MUR" on table "T_DETL" -Foreign key reference target does not exist @@ -65,8 +61,7 @@ expected_stderr_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/fkey/unique/test_insert_08.py b/tests/functional/fkey/unique/test_insert_08.py index b7d80fad..ab2276b2 100644 --- a/tests/functional/fkey/unique/test_insert_08.py +++ b/tests/functional/fkey/unique/test_insert_08.py @@ -1,27 +1,22 @@ #coding:utf-8 -# -# id: functional.fkey.unique.insert_08 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master table has primary key and unique fields. -# Master transaction deletes record from master_table and commit. -# Detail transaction inserts record in detail_table. -# Expected: referential integrity error. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.fkey.unique.ins_08 + +""" +ID: fkey.unique.insert-08 +FBTEST: functional.fkey.unique.insert_08 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master table has primary key and unique fields. + Master transaction deletes record from master_table and commit. + Detail transaction inserts record in detail_table. + Expected: referential integrity error. +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import DatabaseError, tpb, Isolation -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, UF INTEGER UNIQUE, INT_F INTEGER @@ -37,53 +32,20 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, UF, INT_F) VALUES (1, 1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# db_conn.begin(tpb=TPB_master) -# cm_1 = db_conn.cursor() -# cm_1.execute('DELETE FROM MASTER_TABLE WHERE ID=1') -# db_conn.commit() -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# cd = con_detail.cursor() -# cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: c.execute('DELETE FROM MASTER_TABLE WHERE ID=1') con.commit() #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: with pytest.raises(DatabaseError, diff --git a/tests/functional/fkey/unique/test_insert_09.py b/tests/functional/fkey/unique/test_insert_09.py index c5b097fd..a1bba8dd 100644 --- a/tests/functional/fkey/unique/test_insert_09.py +++ b/tests/functional/fkey/unique/test_insert_09.py @@ -1,26 +1,21 @@ #coding:utf-8 -# -# id: functional.fkey.unique.insert_09 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master table has unique field. -# Master transaction inserts record into master_table without commit. -# Detail transaction inserts record in detail_table. -# Expected: referential integrity error. -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.fkey.unique.ins_09 + +""" +ID: fkey.unique.insert-09 +FBTEST: functional.fkey.unique.insert_09 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master table has unique field. + Master transaction inserts record into master_table without commit. + Detail transaction inserts record in detail_table. + Expected: referential integrity error. +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None - -substitutions_1 = [('-At block line: [\\d]+, col: [\\d]+', '-At block line')] - -init_script_1 = """ +init_script = """ recreate table t_detl(id int); commit; recreate table t_main( @@ -35,9 +30,9 @@ init_script_1 = """ commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ +test_script = """ commit; set transaction read committed record_version no wait; set term ^; @@ -51,9 +46,10 @@ test_script_1 = """ set term ;^ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('-At block line: [\\d]+, col: [\\d]+', + '-At block line')]) -expected_stderr_1 = """ +expected_stderr = """ Statement failed, SQLSTATE = 23000 violation of FOREIGN KEY constraint "T_DETL_FK_MUR" on table "T_DETL" -Foreign key reference target does not exist @@ -62,8 +58,7 @@ expected_stderr_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/fkey/unique/test_insert_11.py b/tests/functional/fkey/unique/test_insert_11.py index 63e08d89..3ec027ee 100644 --- a/tests/functional/fkey/unique/test_insert_11.py +++ b/tests/functional/fkey/unique/test_insert_11.py @@ -1,25 +1,20 @@ #coding:utf-8 -# -# id: functional.fkey.unique.insert_11 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Update unique field with that not assigned foreign key -# Expected: no errors -# tracker_id: -# min_versions: [] -# versions: 2.5 -# qmid: functional.fkey.unique.ins_11 + +""" +ID: fkey.unique.insert-10 +FBTEST: functional.fkey.unique.insert_11 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Update unique field with that not assigned foreign key + Expected: no errors +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import tpb, Isolation -# version: 2.5 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, UF INTEGER UNIQUE, INT_F INTEGER @@ -35,52 +30,19 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, UF, INT_F) VALUES (1, 1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("update master_table set UF=10 WHERE ID=1") -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# c = con_detail.cursor() -# c.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: c.execute("update master_table set UF=10 WHERE ID=1") #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") diff --git a/tests/functional/fkey/unique/test_insert_12.py b/tests/functional/fkey/unique/test_insert_12.py index 2fa3886f..a5ce76ba 100644 --- a/tests/functional/fkey/unique/test_insert_12.py +++ b/tests/functional/fkey/unique/test_insert_12.py @@ -1,29 +1,24 @@ #coding:utf-8 -# -# id: functional.fkey.unique.insert_12 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master table has one primary key field and one unique field. -# 1 Master transaction modifies non key or unique field. -# 2 Detail transaction inserts record. -# 3 Master transaction modifies unique field. -# Detail transaction inserts record in detail_table. -# Expected: referential integrity error -# tracker_id: -# min_versions: [] -# versions: 2.5.3 -# qmid: functional.fkey.unique.ins_12 + +""" +ID: fkey.unique.insert-11 +FBTEST: functional.fkey.unique.insert_12 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master table has one primary key field and one unique field. + 1 Master transaction modifies non key or unique field. + 2 Detail transaction inserts record. + 3 Master transaction modifies unique field. + Detail transaction inserts record in detail_table. + Expected: referential integrity error +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import DatabaseError, tpb, Isolation -# version: 2.5.3 -# resources: None - -substitutions_1 = [('lock.*', '')] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, UF INTEGER UNIQUE, INT_F INTEGER @@ -39,57 +34,19 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, UF, INT_F) VALUES (1, 1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("update master_table set int_f = 10 WHERE ID=1") -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# c = con_detail.cursor() -# c.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") -# except Exception, e: -# print (e[1]) -# -# try: -# c = db_conn.cursor() -# c.execute("update master_table set UF=10 WHERE ID=1") -# except Exception, e: -# print (e[0]) -#--- - -act_1 = python_act('db_1', substitutions=substitutions_1) +act = python_act('db') @pytest.mark.version('>=2.5.3') -def test_1(act_1: Action): - with act_1.db.connect() as con: +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: c.execute("update master_table set int_f = 10 WHERE ID=1") #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") diff --git a/tests/functional/fkey/unique/test_insert_13.py b/tests/functional/fkey/unique/test_insert_13.py index e1255d4b..33a4ecf0 100644 --- a/tests/functional/fkey/unique/test_insert_13.py +++ b/tests/functional/fkey/unique/test_insert_13.py @@ -1,30 +1,25 @@ #coding:utf-8 -# -# id: functional.fkey.unique.insert_13 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master table has one primary key field and one unique field. -# 1 Master transaction modifies non key or unique field. -# 2 Detail transaction inserts record. -# 3 Detail transaction committed. -# 4 Master transaction modifies unique field. -# Detail transaction inserts record in detail_table record. -# Expected: referential integrity error -# tracker_id: -# min_versions: [] -# versions: 2.5.3 -# qmid: functional.fkey.unique.ins_13 + +""" +ID: fkey.unique.insert-12 +FBTEST: functional.fkey.unique.insert_13 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master table has one primary key field and one unique field. + 1 Master transaction modifies non key or unique field. + 2 Detail transaction inserts record. + 3 Detail transaction committed. + 4 Master transaction modifies unique field. + Detail transaction inserts record in detail_table record. + Expected: referential integrity error +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import DatabaseError, tpb, Isolation -# version: 2.5.3 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, UF INTEGER UNIQUE, INT_F INTEGER @@ -40,64 +35,19 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, UF, INT_F) VALUES (1, 1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("update master_table set int_f = 10 WHERE ID=1") -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# c = con_detail.cursor() -# c.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") -# con_detail.commit() -# except Exception, e: -# print (e[1]) -# -# try: -# c = db_conn.cursor() -# c.execute("update master_table set UF=10 WHERE ID=1") -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """Error while executing SQL statement: -- SQLCODE: -530 -- violation of FOREIGN KEY constraint "FK_DETAIL_TABLE" on table "DETAIL_TABLE" -- Foreign key references are present for the record -- Problematic key value is ("UF" = 1)""" - -@pytest.mark.version('>=2.5.3') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: c.execute("update master_table set int_f = 10 WHERE ID=1") #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") diff --git a/tests/functional/fkey/unique/test_insert_14.py b/tests/functional/fkey/unique/test_insert_14.py index c1a8fd2b..bb03ec3d 100644 --- a/tests/functional/fkey/unique/test_insert_14.py +++ b/tests/functional/fkey/unique/test_insert_14.py @@ -1,27 +1,22 @@ #coding:utf-8 -# -# id: functional.fkey.unique.insert_14 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master table has unique field. -# Master transaction inserts record into master_table and commit. -# Detail transaction inserts record in detail_table. -# Expected: no errors. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.fkey.unique.ins_10 + +""" +ID: fkey.unique.insert-13 +FBTEST: functional.fkey.unique.insert_14 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master table has unique field. + Master transaction inserts record into master_table and commit. + Detail transaction inserts record in detail_table. + Expected: no errors. +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import tpb, Isolation -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, UF INTEGER UNIQUE, INT_F INTEGER @@ -35,53 +30,20 @@ CREATE TABLE DETAIL_TABLE ( ALTER TABLE DETAIL_TABLE ADD CONSTRAINT FK_DETAIL_TABLE FOREIGN KEY (FKEY) REFERENCES MASTER_TABLE (UF); COMMIT;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# db_conn.begin(tpb=TPB_master) -# cm_1 = db_conn.cursor() -# cm_1.execute('INSERT INTO MASTER_TABLE (ID, UF, INT_F) VALUES (1, 1, 10)') -# db_conn.commit() -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# cd = con_detail.cursor() -# cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: c.execute('INSERT INTO MASTER_TABLE (ID, UF, INT_F) VALUES (1, 1, 10)') con.commit() #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") diff --git a/tests/functional/fkey/unique/test_select_uf_01.py b/tests/functional/fkey/unique/test_select_uf_01.py index ee545888..3b1f7e06 100644 --- a/tests/functional/fkey/unique/test_select_uf_01.py +++ b/tests/functional/fkey/unique/test_select_uf_01.py @@ -1,26 +1,21 @@ #coding:utf-8 -# -# id: functional.fkey.unique.select_uf_01 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master transaction is perform select with lock and fetch record. -# Detail transaction inserts record in detail_table. -# Expected: no errors. -# tracker_id: -# min_versions: [] -# versions: 2.5 -# qmid: functional.fkey.unique.select_01 + +""" +ID: fkey.unique.select-01 +FBTEST: functional.fkey.unique.select_uf_01 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master transaction is perform select with lock and fetch record. + Detail transaction inserts record in detail_table. + Expected: no errors. +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import tpb, Isolation -# version: 2.5 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, UF INTEGER UNIQUE, INT_F INTEGER @@ -36,52 +31,19 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, UF, INT_F) VALUES (1, 1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("SELECT INT_F FROM MASTER_TABLE WHERE ID=1 WITH LOCK") -# c.fetchall() -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# c = con_detail.cursor() -# c.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: c.execute("SELECT INT_F FROM MASTER_TABLE WHERE ID=1 WITH LOCK").fetchall() #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") diff --git a/tests/functional/fkey/unique/test_select_uf_02.py b/tests/functional/fkey/unique/test_select_uf_02.py index a5bc1ed3..42c88edf 100644 --- a/tests/functional/fkey/unique/test_select_uf_02.py +++ b/tests/functional/fkey/unique/test_select_uf_02.py @@ -1,26 +1,21 @@ #coding:utf-8 -# -# id: functional.fkey.unique.select_uf_02 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master transaction is perform select with lock but not perform fetch. -# Detail transaction inserts record in detail_table and commit; -# Master transaction fetched record and trying update it; -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.fkey.unique.select_02 + +""" +ID: fkey.unique.select-02 +FBTEST: functional.fkey.unique.select_uf_02 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master transaction is perform select with lock but not perform fetch. + Detail transaction inserts record in detail_table and commit; + Master transaction fetched record and trying update it; +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import DatabaseError, tpb, Isolation -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, UF INTEGER UNIQUE, INT_F INTEGER @@ -36,58 +31,19 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, UF, INT_F) VALUES (1, 1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("SELECT UF, INT_F FROM MASTER_TABLE WHERE ID=1 WITH LOCK") -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# cd = con_detail.cursor() -# cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") -# con_detail.commit() -# except Exception, e: -# print (e[1]) -# try: -# c.fetchall() -# c.execute("UPDATE MASTER_TABLE SET UF=2") -# db_conn.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: c.execute("SELECT UF, INT_F FROM MASTER_TABLE WHERE ID=1 WITH LOCK") #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)") diff --git a/tests/functional/fkey/unique/test_upd_01.py b/tests/functional/fkey/unique/test_upd_01.py index 03f1223c..d5ba477a 100644 --- a/tests/functional/fkey/unique/test_upd_01.py +++ b/tests/functional/fkey/unique/test_upd_01.py @@ -1,26 +1,21 @@ #coding:utf-8 -# -# id: functional.fkey.unique.upd_01 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master table has unique field. Master transaction doesn't modified it. -# Detail transaction updates record in detail_table record. -# Expected: no errors -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.fkey.unique.upd_01 + +""" +ID: fkey.unique.update-01 +FBTEST: functional.fkey.unique.upd_01 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master table has unique field. Master transaction doesn't modified it. + Detail transaction updates record in detail_table record. + Expected: no errors +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import tpb, Isolation -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, UF INTEGER UNIQUE, INT_F INTEGER @@ -37,52 +32,19 @@ INSERT INTO MASTER_TABLE (ID, UF, INT_F) VALUES (1, 1, 10); INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1, 1); COMMIT;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("UPDATE MASTER_TABLE SET INT_F=10") -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# c = con_detail.cursor() -# c.execute("UPDATE DETAIL_TABLE SET ID=2 WHERE ID=1") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: c.execute("UPDATE MASTER_TABLE SET INT_F=10") #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("UPDATE DETAIL_TABLE SET ID=2 WHERE ID=1") diff --git a/tests/functional/fkey/unique/test_upd_02.py b/tests/functional/fkey/unique/test_upd_02.py index c8aa0d8c..738fe1ff 100644 --- a/tests/functional/fkey/unique/test_upd_02.py +++ b/tests/functional/fkey/unique/test_upd_02.py @@ -1,26 +1,21 @@ #coding:utf-8 -# -# id: functional.fkey.unique.upd_02 -# title: Check correct work fix with foreign key -# decription: Check foreign key work. -# Master table has unique field. Master transaction modifies it and commit. -# Detail transaction updates record in detail_table. -# Expected: no errors -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.fkey.unique.upd_02 + +""" +ID: fkey.unique.update-02 +FBTEST: functional.fkey.unique.upd_02 +TITLE: Check correct work fix with foreign key +DESCRIPTION: + Check foreign key work. + Master table has unique field. Master transaction modifies it and commit. + Detail transaction updates record in detail_table. + Expected: no errors +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * from firebird.driver import tpb, Isolation -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE MASTER_TABLE ( +init_script = """CREATE TABLE MASTER_TABLE ( ID INTEGER PRIMARY KEY, UF INTEGER UNIQUE, INT_F INTEGER @@ -36,50 +31,13 @@ COMMIT; INSERT INTO MASTER_TABLE (ID, UF, INT_F) VALUES (1, 1, 10); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# TPB_master = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# TPB_detail = ( -# chr(kdb.isc_tpb_write) -# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version) -# + chr(kdb.isc_tpb_nowait) -# ) -# -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("UPDATE MASTER_TABLE SET UF=2 WHERE ID=1") -# db_conn.commit() -# db_conn.begin(tpb=TPB_master) -# c = db_conn.cursor() -# c.execute("UPDATE MASTER_TABLE SET INT_F=10") -# -# #Create second connection for change detail table -# con_detail = kdb.connect( -# dsn=dsn.encode(), -# user=user_name.encode(), -# password=user_password.encode() -# ) -# -# try: -# con_detail.begin(tpb=TPB_detail) -# c = con_detail.cursor() -# c.execute("UPDATE DETAIL_TABLE SET FKEY = 2") -# con_detail.commit() -# except Exception, e: -# print (e[0]) -#--- +act = python_act('db') -act_1 = python_act('db_1', substitutions=substitutions_1) - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - with act_1.db.connect() as con: +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: cust_tpb = tpb(isolation=Isolation.READ_COMMITTED_RECORD_VERSION, lock_timeout=0) con.begin(cust_tpb) with con.cursor() as c: @@ -88,7 +46,7 @@ def test_1(act_1: Action): con.begin(cust_tpb) c.execute("UPDATE MASTER_TABLE SET INT_F=10") #Create second connection for change detail table - with act_1.db.connect() as con_detail: + with act.db.connect() as con_detail: con_detail.begin(cust_tpb) with con_detail.cursor() as cd: cd.execute("UPDATE DETAIL_TABLE SET FKEY = 2") diff --git a/tests/functional/generator/alter/test_01.py b/tests/functional/generator/alter/test_01.py index 15c497e5..b8690034 100644 --- a/tests/functional/generator/alter/test_01.py +++ b/tests/functional/generator/alter/test_01.py @@ -1,37 +1,33 @@ #coding:utf-8 -# -# id: functional.generator.alter.01 -# title: Run ALTER SEQUENCE -# decription: -# Create sequence and try several cases of ALTER SEQUENCE statement. -# Then check result that is stored in RDB$GENERATORS table and gen_id(, 0) value. -# NB: we have to issue 'COMMIT' after each ALTER SEQUENCE statement in order to see new values in RDB. -# -# 07-aug-2020: we have to separate test for 3.0 and 4.0 because INITIAL value of new sequence -# in FB 4.x now differs from "old good zero" (this is so since CORE-6084 was fixed). -# -# 13-aug-2020: changed code for FB 4.x after introduction of fix for CORE-6386: value that was initially -# written into RDB$GENERATORD.RDB$INITIAL_VALUE column must NOT changed on any kind of ALTER EQUENCE -# statement, even when it contains 'RESTART WITH' clause. Checked on 4.0.0.2151. -# -# See also: doc/README.incompatibilities.3to4.txt -# -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0, 4.0 -# qmid: + +""" +ID: generator.alter-01 +FBTEST: functional.generator.alter.01 +TITLE: ALTER SEQUENCE +DESCRIPTION: + Create sequence and try several cases of ALTER SEQUENCE statement. + Then check result that is stored in RDB$GENERATORS table and gen_id(, 0) value. + NB: we have to issue 'COMMIT' after each ALTER SEQUENCE statement in order to see new values in RDB. +NOTES: +[07.08.2020] + we have to separate test for 3.0 and 4.0 because INITIAL value of new sequence + in FB 4.x now differs from "old good zero" (this is so since CORE-6084 was fixed). +[13.08.2020] + changed code for FB 4.x after introduction of fix for CORE-6386: value that was initially + written into RDB$GENERATORD.RDB$INITIAL_VALUE column must NOT changed on any kind of ALTER EQUENCE + statement, even when it contains 'RESTART WITH' clause. Checked on 4.0.0.2151. + + See also: doc/README.incompatibilities.3to4.txt +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * + +substitutions = [('===.*', ''), ('[ \t]+', ' ')] + +db = db_factory() # version: 3.0 -# resources: None - -substitutions_1 = [('===.*', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) test_script_1 = """ recreate generator g; @@ -68,7 +64,7 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act_1 = isql_act('db', test_script_1, substitutions=substitutions) expected_stdout_1 = """ MSG RDB_INIT RDB_INCR GEN_ID_0 @@ -91,13 +87,6 @@ def test_1(act_1: Action): assert act_1.clean_stdout == act_1.clean_expected_stdout # version: 4.0 -# resources: None - -substitutions_2 = [('===.*', ''), ('[ \t]+', ' ')] - -init_script_2 = """""" - -db_2 = db_factory(sql_dialect=3, init=init_script_2) test_script_2 = """ recreate generator g start with 7654321; @@ -132,7 +121,7 @@ test_script_2 = """ select 'point-02' as msg, p.* from sp_gen_info p; ----------------------------------------------------------------------- - + -- Test when only RESTART clause presents: recreate generator g start with 7654321; commit; alter sequence g restart with -1234567; commit; @@ -145,7 +134,7 @@ test_script_2 = """ ----------------------------------------------------------------------- -- Test when both RESTART and INCREMENT BY clauses present: - + recreate generator g start with 7654321; commit; alter sequence g restart with -1234567 increment by -23456789; commit; select 'point-05' as msg, p.* from sp_gen_info p; @@ -162,20 +151,20 @@ test_script_2 = """ select 'point-08' as msg, p.* from sp_gen_info p; """ -act_2 = isql_act('db_2', test_script_2, substitutions=substitutions_2) +act_2 = isql_act('db', test_script_2, substitutions=substitutions) expected_stdout_2 = """ - MSG RDB_INIT RDB_INCR GEN_ID_CURR GEN_ID_NEXT - ======== ===================== ===================== ===================== ===================== - point-00 7654321 1 7654320 7654321 - point-01 7654321 -23456789 7654320 -15802469 - point-02 7654321 23456789 7654320 31111109 - point-03 7654321 1 -1234568 -1234567 - point-04 7654321 1 1234566 1234567 - point-05 7654321 -23456789 22222222 -1234567 - point-06 7654321 23456789 -24691356 -1234567 - point-07 7654321 -23456789 24691356 1234567 - point-08 7654321 23456789 -22222222 1234567 + MSG RDB_INIT RDB_INCR GEN_ID_CURR GEN_ID_NEXT + ======== ===================== ===================== ===================== ===================== + point-00 7654321 1 7654320 7654321 + point-01 7654321 -23456789 7654320 -15802469 + point-02 7654321 23456789 7654320 31111109 + point-03 7654321 1 -1234568 -1234567 + point-04 7654321 1 1234566 1234567 + point-05 7654321 -23456789 22222222 -1234567 + point-06 7654321 23456789 -24691356 -1234567 + point-07 7654321 -23456789 24691356 1234567 + point-08 7654321 23456789 -22222222 1234567 """ @pytest.mark.version('>=4.0') @@ -183,4 +172,3 @@ def test_2(act_2: Action): act_2.expected_stdout = expected_stdout_2 act_2.execute() assert act_2.clean_stdout == act_2.clean_expected_stdout - diff --git a/tests/functional/generator/create/test_01.py b/tests/functional/generator/create/test_01.py index 2840783c..3fb4591a 100644 --- a/tests/functional/generator/create/test_01.py +++ b/tests/functional/generator/create/test_01.py @@ -1,39 +1,34 @@ #coding:utf-8 -# -# id: functional.generator.create.01 -# title: Run CREATE GENERATOR and query related data from RDB$GENERATORS. -# decription: -# Run 'CREATE GENERATOR' statement and obtain data about it from system table (rdb$generators). -# 07-aug-2020: we have to separate test for 3.0 and 4.0 because INITIAL value of new sequence -# in FB 4.x now differs from "old good zero" (this is so since CORE-6084 was fixed). -# -# See also: doc/README.incompatibilities.3to4.txt -# -# tracker_id: -# min_versions: [] -# versions: 3.0, 4.0 -# qmid: functional.generator.create.create_generator_01 + +""" +ID: generator.create-01 +FBTEST: functional.generator.create.01 +TITLE: CREATE GENERATOR and query related data from RDB$GENERATORS +DESCRIPTION: + Run 'CREATE GENERATOR' statement and obtain data about it from system table (rdb$generators). +NOTES: +[07.08.2020] + we have to separate test for 3.0 and 4.0 because INITIAL value of new sequence + in FB 4.x now differs from "old good zero" (this is so since CORE-6084 was fixed). + + See also: doc/README.incompatibilities.3to4.txt +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [('RDB\\$SECURITY_CLASS[ ]+SQL\\$.*', ''), ('RDB\\$GENERATOR_ID.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ create generator test; commit; set list on; - select * from rdb$generators where rdb$generator_name=upper('test'); + select * from rdb$generators where rdb$generator_name=upper('test'); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('RDB\\$SECURITY_CLASS[ ]+SQL\\$.*', ''), ('RDB\\$GENERATOR_ID.*', '')]) + +# version: 3.0 expected_stdout_1 = """ RDB$GENERATOR_NAME TEST @@ -47,28 +42,12 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=3.0,<4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +def test_1(act: Action): + act.expected_stdout = expected_stdout_1 + act.execute() + assert act.clean_stdout == act.clean_expected_stdout # version: 4.0 -# resources: None - -substitutions_2 = [('RDB\\$SECURITY_CLASS[ ]+SQL\\$.*', ''), ('RDB\\$GENERATOR_ID.*', '')] - -init_script_2 = """""" - -db_2 = db_factory(sql_dialect=3, init=init_script_2) - -test_script_2 = """ - create generator test; - commit; - set list on; - select * from rdb$generators where rdb$generator_name=upper('test'); -""" - -act_2 = isql_act('db_2', test_script_2, substitutions=substitutions_2) expected_stdout_2 = """ RDB$GENERATOR_NAME TEST @@ -82,8 +61,7 @@ expected_stdout_2 = """ """ @pytest.mark.version('>=4.0') -def test_2(act_2: Action): - act_2.expected_stdout = expected_stdout_2 - act_2.execute() - assert act_2.clean_stdout == act_2.clean_expected_stdout - +def test_2(act: Action): + act.expected_stdout = expected_stdout_2 + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/generator/create/test_02.py b/tests/functional/generator/create/test_02.py index 5f26a024..31f63281 100644 --- a/tests/functional/generator/create/test_02.py +++ b/tests/functional/generator/create/test_02.py @@ -1,47 +1,35 @@ #coding:utf-8 -# -# id: functional.generator.create.02 -# title: CREATE GENERATOR - try create gen with same name -# decription: CREATE GENERATOR - try create gen with same name -# -# Dependencies: -# CREATE DATABASE -# CREATE GENERATOR -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.generator.create.create_generator_02 +""" +ID: generator.create-02 +FBTEST: functional.generator.create.02 +TITLE: CREATE GENERATOR - try create gen with same name +DESCRIPTION: +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """ +init_script = """ CREATE GENERATOR test; commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ +test_script = """ CREATE GENERATOR test; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stderr_1 = """Statement failed, SQLSTATE = 42000 +expected_stderr = """Statement failed, SQLSTATE = 42000 unsuccessful metadata update -CREATE SEQUENCE TEST failed -Sequence TEST already exists """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/generator/drop/test_01.py b/tests/functional/generator/drop/test_01.py index ff18ba71..9b4d4b82 100644 --- a/tests/functional/generator/drop/test_01.py +++ b/tests/functional/generator/drop/test_01.py @@ -1,40 +1,27 @@ #coding:utf-8 -# -# id: functional.generator.drop.01 -# title: DROP GENERATOR -# decription: DROP GENERATOR -# -# Dependencies: -# CREATE DATABASE -# CREATE GENERATOR -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.generator.drop.drop_generator_01 + +""" +ID: generator.drop-01 +FBTEST: functional.generator.drop.01 +TITLE: DROP GENERATOR +DESCRIPTION: +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE GENERATOR test; +init_script = """CREATE GENERATOR test; commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """DROP GENERATOR test; +test_script = """DROP GENERATOR test; SHOW GENERATOR TEST;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stderr_1 = """There is no generator TEST in this database""" +act = isql_act('db', test_script) @pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = "There is no generator TEST in this database" + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/generator/drop/test_02.py b/tests/functional/generator/drop/test_02.py index 2760c115..af6976f0 100644 --- a/tests/functional/generator/drop/test_02.py +++ b/tests/functional/generator/drop/test_02.py @@ -1,27 +1,16 @@ #coding:utf-8 -# -# id: functional.generator.drop.02 -# title: DROP GENERATOR - in use -# decription: DROP GENERATOR -# -# Dependencies: -# CREATE DATABASE -# CREATE GENERATOR -# CREATE PROCEDURE -# tracker_id: -# min_versions: [] -# versions: 2.5.0 -# qmid: functional.generator.drop.drop_generator_02 + +""" +ID: generator.drop-02 +FBTEST: functional.generator.drop.02 +TITLE: DROP GENERATOR - in use +DESCRIPTION: +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE GENERATOR test; +init_script = """CREATE GENERATOR test; SET TERM ^; CREATE PROCEDURE a AS DECLARE VARIABLE id INT; @@ -31,22 +20,19 @@ END ^ SET TERM ;^ commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """DROP GENERATOR test;""" +act = isql_act('db', "DROP GENERATOR test;") -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stderr_1 = """Statement failed, SQLSTATE = 42000 +expected_stderr = """Statement failed, SQLSTATE = 42000 unsuccessful metadata update -cannot delete -GENERATOR TEST -there are 1 dependencies""" -@pytest.mark.version('>=2.5.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/generator/drop/test_03.py b/tests/functional/generator/drop/test_03.py index d5a31ce7..0d50f3ea 100644 --- a/tests/functional/generator/drop/test_03.py +++ b/tests/functional/generator/drop/test_03.py @@ -1,41 +1,27 @@ #coding:utf-8 -# -# id: functional.generator.drop.03 -# title: DROP GENERATOR - generator does not exists -# decription: DROP GENERATOR - generator does not exists -# -# Dependencies: -# CREATE DATABASE -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.generator.drop.drop_generator_03 + +""" +ID: generator.drop-03 +FBTEST: functional.generator.drop.03 +TITLE: DROP GENERATOR - generator does not exists +DESCRIPTION: +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "DROP GENERATOR test;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """DROP GENERATOR test;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stderr_1 = """Statement failed, SQLSTATE = 42000 +expected_stderr = """Statement failed, SQLSTATE = 42000 unsuccessful metadata update -DROP SEQUENCE TEST failed -generator TEST is not defined """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/gtcs/test_computed_fields_01.py b/tests/functional/gtcs/test_computed_fields_01.py index 2b130bdf..70893cd4 100644 --- a/tests/functional/gtcs/test_computed_fields_01.py +++ b/tests/functional/gtcs/test_computed_fields_01.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.computed_fields_01 -# title: computed-fields-01 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_01.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: computed-fields-01 +FBTEST: functional.gtcs.computed_fields_01 +TITLE: Computed fields +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_01.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set heading off; /*-----------------*/ /* Computed by (i) */ @@ -174,9 +164,9 @@ test_script_1 = """ select 'Passed 11 - Update' from t50 where j = (4*2-4+4)/2 having count(*) = 2; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ Passed 1 - Insert Passed 1 - Update Passed 2 - Insert @@ -203,9 +193,8 @@ expected_stdout_1 = """ Passed 11 - Update """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_computed_fields_02.py b/tests/functional/gtcs/test_computed_fields_02.py index f0591bd0..daf7dbf0 100644 --- a/tests/functional/gtcs/test_computed_fields_02.py +++ b/tests/functional/gtcs/test_computed_fields_02.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.computed_fields_02 -# title: computed-fields-02 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_02.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: computed-fields-02 +FBTEST: functional.gtcs.computed_fields_02 +TITLE: Computed fields +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_02.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set heading off; /* ** Syntax test cases - Valid Arithmetic operations on @@ -232,23 +222,23 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ Passed 1(s) - Insert Passed 1(s) - Update Passed 2(s) - Insert Passed 2(s) - Update Passed 3(s) - Insert Passed 3(s) - Update - + Passed 1(d) - Insert Passed 1(d) - Update Passed 2(d) - Insert Passed 2(d) - Update Passed 3(d) - Insert Passed 3(d) - Update - + Passed 1(n) - Insert Passed 1(n) - Update Passed 2(n) - Insert @@ -271,9 +261,8 @@ expected_stdout_1 = """ Passed 3(dp) - Update """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_computed_fields_03.py b/tests/functional/gtcs/test_computed_fields_03.py index b17c57cf..ec46cfa5 100644 --- a/tests/functional/gtcs/test_computed_fields_03.py +++ b/tests/functional/gtcs/test_computed_fields_03.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.computed_fields_03 -# title: computed-fields-03 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_03.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: computed-fields-03 +FBTEST: functional.gtcs.computed_fields_03 +TITLE: Computed fields +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_03.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set heading off; /* @@ -135,9 +125,9 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ Passed 1 - Insert Passed 1 - Update Passed 2 - Insert @@ -154,9 +144,8 @@ expected_stdout_1 = """ Passed 8 - Update """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_computed_fields_04.py b/tests/functional/gtcs/test_computed_fields_04.py index e5a18ff1..a2799cde 100644 --- a/tests/functional/gtcs/test_computed_fields_04.py +++ b/tests/functional/gtcs/test_computed_fields_04.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.computed_fields_04 -# title: computed-fields-04 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_04.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: computed-fields-04 +FBTEST: functional.gtcs.computed_fields_04 +TITLE: Computed fields +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_04.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set heading off; /* @@ -135,9 +125,9 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ Passed 1 - Insert Passed 1 - Update Passed 2 - Insert @@ -154,9 +144,8 @@ expected_stdout_1 = """ Passed 8 - Update """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_computed_fields_05.py b/tests/functional/gtcs/test_computed_fields_05.py index 02760507..4f9fd89f 100644 --- a/tests/functional/gtcs/test_computed_fields_05.py +++ b/tests/functional/gtcs/test_computed_fields_05.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.computed_fields_05 -# title: computed-fields-05 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_05.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: computed-fields-05 +FBTEST: functional.gtcs.computed_fields_05 +TITLE: Computed fields +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_05.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set heading off; /* @@ -46,11 +36,11 @@ test_script_1 = """ /*----------------------------------------------------------------*/ /* Note: Using cast() to convert 'today' first to a date literal. */ - /* And, then doing date arithmetic. The proper behaviour */ - /* should just allow straight date addition as */ + /* And, then doing date arithmetic. The proper behaviour */ + /* should just allow straight date addition as */ /* set d = 'today' + 5. There is already a bug entered about*/ /* this (Bug No. xxxx). Change this test case once this */ - /* bug is fixed. */ + /* bug is fixed. */ /*----------------------------------------------------------------*/ update t0 set d = cast('today'as date) + 5 where d = 'today'; select 'Passed 1 - Update' from t0 where dc = d having count(*) = 4; @@ -126,9 +116,9 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ Passed 1 - Insert Passed 1 - Update Passed 2 - Insert @@ -143,9 +133,8 @@ expected_stdout_1 = """ Passed 6 - Update """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_computed_fields_06.py b/tests/functional/gtcs/test_computed_fields_06.py index 480bc45e..67ab7a44 100644 --- a/tests/functional/gtcs/test_computed_fields_06.py +++ b/tests/functional/gtcs/test_computed_fields_06.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.computed_fields_06 -# title: computed-fields-06 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_06.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: computed-fields-06 +FBTEST: functional.gtcs.computed_fields_06 +TITLE: Computed fields +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_01.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set heading off; /* @@ -74,9 +64,9 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ Passed 1 - Insert Passed 1 - Update Passed 2 - Insert @@ -85,9 +75,8 @@ expected_stdout_1 = """ Passed 3 - Update """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_computed_fields_07.py b/tests/functional/gtcs/test_computed_fields_07.py index a1c2b61e..73d9307c 100644 --- a/tests/functional/gtcs/test_computed_fields_07.py +++ b/tests/functional/gtcs/test_computed_fields_07.py @@ -1,42 +1,23 @@ #coding:utf-8 -# -# id: functional.gtcs.computed_fields_07 -# title: computed-fields-07 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_07.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# ::: NOTE :::: -# DATABASE MUST BE CREATED IN DIALECT 1 FOR THIS TEST -# ::::::::::::: -# Otherwise some of cases will get error related to forbidden actions, e.g.: -# Statement failed, SQLSTATE = 42000 -# Dynamic SQL Error -# -expression evaluation not supported -# -Strings cannot be added or subtracted in dialect 3 -# -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: computed-fields-07 +FBTEST: functional.gtcs.computed_fields_07 +TITLE: Computed fields +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_07.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +# SQL Dialect 1 required! +db = db_factory(sql_dialect=1) -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=1, init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set warnings off; set heading off; @@ -58,7 +39,7 @@ test_script_1 = """ select * from t0; /*-------------------------------*/ - /* Computed by integer*character */ + /* Computed by integer*character */ /*-------------------------------*/ create table t5 (f_integer integer, f_char char(5), integer_char computed by (f_integer*f_char)); commit; -- t5; @@ -73,7 +54,7 @@ test_script_1 = """ select * from t5; /*-----------------------------*/ - /* Computed by float*character */ + /* Computed by float*character */ /*-----------------------------*/ create table t10 (f_float float, f_char char(5), float_char computed by (f_float*f_char)); commit; -- t10; @@ -114,7 +95,7 @@ test_script_1 = """ update t20 set f_char = '03/20/93', f_date = '03/01/93' where f_char = '01/20/93'; update t20 set f_char = '02/26/93' where f_char = '02/27/93'; select 'Passed 4 - Update' from t20 where char_date = f_char - f_date having count(*) = 2; - + /*----------------------------*/ /* Computed by "10" + integer */ @@ -123,7 +104,7 @@ test_script_1 = """ commit; -- t25; insert into t25(f_integer) values(10); insert into t25(f_integer) values(11); - select 'Passed 5 - Insert' from t25 where literal_integer = '10' + f_integer having count(*) = 2; + select 'Passed 5 - Insert' from t25 where literal_integer = '10' + f_integer having count(*) = 2; update t25 set f_integer = 12 where f_integer = 10; select 'Passed 5 - Update' from t25 where literal_integer = '10' + f_integer having count(*) = 2; @@ -139,7 +120,7 @@ test_script_1 = """ update t30 set f_float = 12.12 where f_float = 10.12; select 'Passed 6 - Update' from t30 where literal_float = '1.12' + f_float having count(*) = 2; - + /*-------------------------------*/ /* Computed by "01/01/95" - date */ /*-------------------------------*/ @@ -153,9 +134,9 @@ test_script_1 = """ select 'Passed 7 - Update' from t35 where literal_date = '01/01/95' - f_date having count(*) = 2; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ Passed 1 - Insert 10 10.120000 101.1999988555908 11 11.120000 122.3199987411499 @@ -186,9 +167,8 @@ expected_stdout_1 = """ Passed 7 - Update """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_computed_fields_08.py b/tests/functional/gtcs/test_computed_fields_08.py index 409cb434..f2681d81 100644 --- a/tests/functional/gtcs/test_computed_fields_08.py +++ b/tests/functional/gtcs/test_computed_fields_08.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.computed_fields_08 -# title: computed-fields-08 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_08.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: computed-fields-08 +FBTEST: functional.gtcs.computed_fields_08 +TITLE: Computed fields +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_08.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set heading off; /*----------------------*/ @@ -58,18 +48,17 @@ test_script_1 = """ select 'Passed 2 - Update' from t5 where upper_const = a || upper('upper()') having count(*) = 4; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ Passed 1 - Insert Passed 1 - Update Passed 2 - Insert Passed 2 - Update """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_computed_fields_09.py b/tests/functional/gtcs/test_computed_fields_09.py index 7fe5e5de..d732a643 100644 --- a/tests/functional/gtcs/test_computed_fields_09.py +++ b/tests/functional/gtcs/test_computed_fields_09.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.computed_fields_09 -# title: computed-fields-09 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_09.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: computed-fields-09 +FBTEST: functional.gtcs.computed_fields_09 +TITLE: Computed fields +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_09.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set heading off; @@ -107,9 +97,9 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ Passed 1 - Insert Passed 1 - Update Passed 2 - Insert @@ -124,9 +114,8 @@ expected_stdout_1 = """ Passed 6 - Update """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_computed_fields_10.py b/tests/functional/gtcs/test_computed_fields_10.py index da90897a..3d845962 100644 --- a/tests/functional/gtcs/test_computed_fields_10.py +++ b/tests/functional/gtcs/test_computed_fields_10.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.computed_fields_10 -# title: computed-fields-10 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_10.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: computed-fields-10 +FBTEST: functional.gtcs.computed_fields_10 +TITLE: Computed fields +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_10.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set heading off; @@ -52,16 +42,16 @@ test_script_1 = """ ** Since computed fields are evaluated during run-time, the computed ** field with gen_id() will be different every-time. So, the following ** select will never have a match. - */ + */ set generator gen1 to 1000; select * from t0 where genid_field = gen_id(gen1, 1); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ 10 1011 12 1014 @@ -69,9 +59,8 @@ expected_stdout_1 = """ 12 1014 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_computed_fields_11.py b/tests/functional/gtcs/test_computed_fields_11.py index ad4c199f..3f14da40 100644 --- a/tests/functional/gtcs/test_computed_fields_11.py +++ b/tests/functional/gtcs/test_computed_fields_11.py @@ -1,32 +1,25 @@ #coding:utf-8 -# -# id: functional.gtcs.computed_fields_11 -# title: computed-fields-11 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_11.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: computed-fields-11 +FBTEST: functional.gtcs.computed_fields_11 +TITLE: Computed fields +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_11.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +substitutions = [('^((?!Statement failed|SQL error code|Column unknown|F01|F02|REL_NAME|Records).)*$', ''), + ('[ \t]+', ' ')] -substitutions_1 = [('^((?!Statement failed|SQL error code|Column unknown|F01|F02|REL_NAME|Records).)*$', ''), ('[ \t]+', ' ')] +db = db_factory() -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set heading off; set list on; set count on; @@ -56,13 +49,14 @@ test_script_1 = """ ; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=substitutions) -expected_stdout_1 = """ +expected_stdout = """ REL_NAME T2 Records affected: 1 """ -expected_stderr_1 = """ + +expected_stderr = """ Statement failed, SQLSTATE = 42S22 unsuccessful metadata update -CREATE TABLE T0 failed @@ -81,11 +75,9 @@ expected_stderr_1 = """ """ @pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/gtcs/test_computed_fields_12.py b/tests/functional/gtcs/test_computed_fields_12.py index 0de12c9a..ae83c63b 100644 --- a/tests/functional/gtcs/test_computed_fields_12.py +++ b/tests/functional/gtcs/test_computed_fields_12.py @@ -1,34 +1,26 @@ #coding:utf-8 -# -# id: functional.gtcs.computed_fields_12 -# title: computed-fields-12 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_12.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# 25.09.2021: moved code for 4.0+ into separate secion because of fixed gh-6845. Use SET_SQLDA_DISPLAY ON for check datatypes. -# (seel also commit for apropriate GTCS-tests: e617f3d70be5018de6e6ee8624da6358d52a9ce0, 20-aug-2021 14:11) -# -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5, 4.0 -# qmid: None + +""" +ID: computed-fields-12 +FBTEST: functional.gtcs.computed_fields_12 +TITLE: Computed fields +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_12.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +NOTES: +[25.09.2021] + moved code for 4.0+ into separate secion because of fixed gh-6845. Use SET_SQLDA_DISPLAY ON for check datatypes. + (seel also commit for apropriate GTCS-tests: e617f3d70be5018de6e6ee8624da6358d52a9ce0, 20-aug-2021 14:11) +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) +# version: 3.0 test_script_1 = """ set heading off; @@ -42,7 +34,7 @@ test_script_1 = """ select * from t3; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act_1 = isql_act('db', test_script_1, substitutions=[('[ \t]+', ' ')]) expected_stdout_1 = """ A 10 @@ -50,20 +42,13 @@ expected_stdout_1 = """ AFAF 60 """ -@pytest.mark.version('>=2.5,<4.0') +@pytest.mark.version('>=3.0,<4.0') def test_1(act_1: Action): act_1.expected_stdout = expected_stdout_1 act_1.execute() assert act_1.clean_stdout == act_1.clean_expected_stdout # version: 4.0 -# resources: None - -substitutions_2 = [('^((?!(sqltype|FLD_)).)*$', ''), ('[ \t]+', ' '), ('.*alias.*', '')] - -init_script_2 = """""" - -db_2 = db_factory(sql_dialect=3, init=init_script_2) test_script_2 = """ set list on; @@ -72,14 +57,15 @@ test_script_2 = """ set sqlda_display on; - -- expected output for 3rd column: + -- expected output for 3rd column: -- 03: sqltype: 32752 INT128 Nullable scale: 0 subtype: 0 len: 16" (confirm on build 4.0.1.2613; 5.0.0.220) -- build 4.0.1.2536 (last before fix) issues here "03: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8" select * from test; """ -act_2 = isql_act('db_2', test_script_2, substitutions=substitutions_2) +act_2 = isql_act('db', test_script_2, substitutions=[('^((?!(sqltype|FLD_)).)*$', ''), + ('[ \t]+', ' '), ('.*alias.*', '')]) expected_stdout_2 = """ 01: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4 @@ -95,4 +81,3 @@ def test_2(act_2: Action): act_2.expected_stdout = expected_stdout_2 act_2.execute() assert act_2.clean_stdout == act_2.clean_expected_stdout - diff --git a/tests/functional/gtcs/test_computed_fields_13.py b/tests/functional/gtcs/test_computed_fields_13.py index c2abda98..a6230d6b 100644 --- a/tests/functional/gtcs/test_computed_fields_13.py +++ b/tests/functional/gtcs/test_computed_fields_13.py @@ -1,36 +1,24 @@ #coding:utf-8 -# -# id: functional.gtcs.computed_fields_13 -# title: computed-fields-13 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_13.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# -# Check that it is not allowed to drop column which is referenced by computed-by column. -# -# -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: computed-fields-13 +FBTEST: functional.gtcs.computed_fields_13 +TITLE: Computed fields +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_13.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script + + Check that it is not allowed to drop column which is referenced by computed-by column. +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set heading off; /*---------------------------------------------*/ /* Create a table with computed field. */ @@ -55,20 +43,21 @@ test_script_1 = """ /*---------------------------------------------------------------------*/ /* Now alter table and drop the computed field which is used in other */ - /* computed field. */ + /* computed field. */ /* It shouldn't allow you to drop the field. */ /*---------------------------------------------------------------------*/ alter table t1 drop af; select 'point-2' msg, p.* from t1 p; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ point-1 10 30 point-2 11 44 220 """ -expected_stderr_1 = """ + +expected_stderr = """ Statement failed, SQLSTATE 42000 unsuccessful metadata update -cannot delete @@ -82,12 +71,10 @@ expected_stderr_1 = """ -there are 1 dependencies """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/gtcs/test_computed_fields_14.py b/tests/functional/gtcs/test_computed_fields_14.py index ff5c7089..a47a800a 100644 --- a/tests/functional/gtcs/test_computed_fields_14.py +++ b/tests/functional/gtcs/test_computed_fields_14.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.computed_fields_14 -# title: GTCS/tests/CF_ISQL_14; computed-fields-14 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_14.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: computed-fields-14 +FBTEST: functional.gtcs.computed_fields_14 +TITLE: Computed fields +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_14.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' '), ('attempted update of read-only column.*', 'attempted update of read-only column')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set heading off; /*---------------------------------------------*/ /* Create a table with computed field. */ @@ -59,13 +49,16 @@ test_script_1 = """ insert into t6 values(10, 12); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' '), + ('attempted update of read-only column.*', + 'attempted update of read-only column')]) -expected_stdout_1 = """ +expected_stdout = """ point-1 10 30 point-2 10 30 """ -expected_stderr_1 = """ + +expected_stderr = """ Statement failed, SQLSTATE 42000 attempted update of read-only column @@ -83,12 +76,10 @@ expected_stderr_1 = """ -Count of read-write columns does not equal count of values """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/gtcs/test_computed_fields_15.py b/tests/functional/gtcs/test_computed_fields_15.py index 7414e1c2..b01473f3 100644 --- a/tests/functional/gtcs/test_computed_fields_15.py +++ b/tests/functional/gtcs/test_computed_fields_15.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.computed_fields_15 -# title: computed-fields-15 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_15.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: computed-fields-15 +FBTEST: functional.gtcs.computed_fields_15 +TITLE: Computed fields +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_15.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('^((?!Statement failed|SQL error code).)*$', ''), (' = ', ' '), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set heading off; /*-----------------------------------------------------------------------------*/ /* Create a table with computed field which is defined using non-existing UDF. */ @@ -34,9 +24,10 @@ test_script_1 = """ create table t0 (a integer, af computed by ( non_exist_udf(a) )); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('^((?!Statement failed|SQL error code).)*$', ''), + (' = ', ' '), ('[ \t]+', ' ')]) -expected_stderr_1 = """ +expected_stderr = """ Statement failed, SQLSTATE 39000 Dynamic SQL Error -SQL error code -804 @@ -44,9 +35,8 @@ expected_stderr_1 = """ -NON_EXIST_UDF """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/gtcs/test_computed_fields_16.py b/tests/functional/gtcs/test_computed_fields_16.py index 1d777bb3..badf83ef 100644 --- a/tests/functional/gtcs/test_computed_fields_16.py +++ b/tests/functional/gtcs/test_computed_fields_16.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.computed_fields_16 -# title: computed-fields-16 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_16.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: computed-fields-16 +FBTEST: functional.gtcs.computed_fields_16 +TITLE: Computed fields +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_16.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ /*-------------------------------------------------------------*/ /* Create a table with computed field and improper attributes. */ /*-------------------------------------------------------------*/ @@ -51,9 +41,9 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stderr_1 = """ +expected_stderr = """ Statement failed, SQLSTATE = 42000 Dynamic SQL Error -SQL error code = -104 @@ -97,9 +87,8 @@ expected_stderr_1 = """ -primary """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/gtcs/test_computed_fields_17.py b/tests/functional/gtcs/test_computed_fields_17.py index 99e6d926..9e90bdb3 100644 --- a/tests/functional/gtcs/test_computed_fields_17.py +++ b/tests/functional/gtcs/test_computed_fields_17.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.computed_fields_17 -# title: computed-fields-17 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_17.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: computed-fields-17 +FBTEST: functional.gtcs.computed_fields_17 +TITLE: Computed fields +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_17.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ create generator gen1; set generator gen1 to 999; @@ -120,9 +110,9 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ MSG A GENID_FIELD3 point-1 4 1228 point-1 1 3428 @@ -132,9 +122,8 @@ expected_stdout_1 = """ point-2 1 1007 6216 11840 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_conversion_error_from_string.py b/tests/functional/gtcs/test_conversion_error_from_string.py index 182a1d19..8fae8cca 100644 --- a/tests/functional/gtcs/test_conversion_error_from_string.py +++ b/tests/functional/gtcs/test_conversion_error_from_string.py @@ -1,38 +1,26 @@ #coding:utf-8 -# -# id: functional.gtcs.conversion_error_from_string -# title: GTCS/tests/CF_ISQL_31. Script issues conversion error from string "" -# decription: -# ::: NB ::: -# ### Name of original test has no any relation with actual task of this test: ### -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_31.script -# -# Source description of problem with script for reproducing: -# https://sourceforge.net/p/firebird/mailman/message/17016915/ -# -# Issue in original test: -# bug in devel-list / Reported by lobolo2000 18-May-2004 -# -# Checked on: 4.0.0.1804 SS; 3.0.6.33271 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.conversion-error-from-string +FBTEST: functional.gtcs.conversion_error_from_string +TITLE: Script issues conversion error from string +DESCRIPTION: + ::: NB ::: + ### Name of original test has no any relation with actual task of this test: ### + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_31.script + + Source description of problem with script for reproducing: + https://sourceforge.net/p/firebird/mailman/message/17016915/ + + Issue in original test: bug in devel-list / Reported by lobolo2000 18-May-2004 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ create domain dm_id as bigint not null; @@ -252,9 +240,9 @@ test_script_1 = """ select 'point-2' msg, m.qoh, m.qoh, m.qoh, m.qoh, m.qoh, m.qoh, m.qoh, m.qoh, m.qoh, m.qoh from items m; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ MSG point-1 UID 1 DESCRIPTION pa @@ -430,9 +418,8 @@ expected_stdout_1 = """ Records affected: 6 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_crash_of_group_by_varchar_4000.py b/tests/functional/gtcs/test_crash_of_group_by_varchar_4000.py index 8029b7ae..a06a9282 100644 --- a/tests/functional/gtcs/test_crash_of_group_by_varchar_4000.py +++ b/tests/functional/gtcs/test_crash_of_group_by_varchar_4000.py @@ -1,40 +1,30 @@ #coding:utf-8 -# -# id: functional.gtcs.crash_of_group_by_varchar_4000 -# title: GTCS/tests/CF_ISQL_33. Crash on attempt to GROUP BY on table with varchar(4000) field -# decription: -# ::: NB ::: -# ### Name of original test has no any relation with actual task of this test: ### -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_33.script -# -# Source description (dominikfaessler, message of 2004-05-27 13:11:09; FB 1.5.1.4443): -# https://sourceforge.net/p/firebird/mailman/message/17071981/ -# -# Issue in original test: -# bug #961543 Server Crash ISO8859_1 and DE_DE -# -# Checked on: 4.0.0.1804 SS; 3.0.6.33271 SS; 2.5.9.27149 SC. -# NB: it is enough in 'expected_stdout' to show only name of resulting field ('F01') -# rather than the whole output. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.crash-of-group-by-varchar-4000 +FBTEST: functional.gtcs.crash_of_group_by_varchar_4000 +TITLE: Crash on attempt to GROUP BY on table with varchar(4000) field +DESCRIPTION: + ::: NB ::: + ### Name of original test has no any relation with actual task of this test: ### + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_33.script + + Source description (dominikfaessler, message of 2004-05-27 13:11:09; FB 1.5.1.4443): + https://sourceforge.net/p/firebird/mailman/message/17071981/ + + Issue in original test: + bug #961543 Server Crash ISO8859_1 and DE_DE + + NB: it is enough in 'expected_stdout' to show only name of resulting field ('F01') + rather than the whole output. +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(charset='ISO8859_1') -substitutions_1 = [('[ \t]+', ' '), ('^((?!F01|Records affected).)*$', '')] - -init_script_1 = """""" - -db_1 = db_factory(charset='ISO8859_1', sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ CREATE TABLE SNIPPETS ( f01 VARCHAR(4000) COLLATE DE_DE ); @@ -86,16 +76,15 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' '), ('^((?!F01|Records affected).)*$', '')]) -expected_stdout_1 = """ +expected_stdout = """ F01 Records affected: 1 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_division_by_zero_corrupts_db.py b/tests/functional/gtcs/test_division_by_zero_corrupts_db.py index 1e067b55..8c8bb583 100644 --- a/tests/functional/gtcs/test_division_by_zero_corrupts_db.py +++ b/tests/functional/gtcs/test_division_by_zero_corrupts_db.py @@ -1,35 +1,24 @@ #coding:utf-8 -# -# id: functional.gtcs.division_by_zero_corrupts_db -# title: GTCS/tests/CF_ISQL_29. Zero divide in SP can crash database when call this SP several times. -# decription: -# ::: NB ::: -# ### Name of original test has no any relation with actual task of this test: ### -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_29.script -# -# Issue in original test: -# Division by 0 corrupt database -# -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.division-by-zero-corrupts-db +FBTEST: functional.gtcs.division_by_zero_corrupts_db +TITLE: Zero divide in SP can crash database when call this SP several times +DESCRIPTION: + ::: NB ::: + ### Name of original test has no any relation with actual task of this test: ### + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_29.script + + Issue in original test: + Division by 0 corrupt database +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [("-At procedure 'SPX_AUX_TEST' line: .*", ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set term ^ ; create procedure spx_aux_test (par1 bigint) returns (ret1 bigint) as @@ -73,9 +62,10 @@ test_script_1 = """ execute procedure spx_aux_test (1); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[("-At procedure 'SPX_AUX_TEST' line: .*", ''), + ('[ \t]+', ' ')]) -expected_stderr_1 = """ +expected_stderr = """ Statement failed, SQLSTATE = 22012 arithmetic exception, numeric overflow, or string truncation -Integer divide by zero. The code attempted to divide an integer value by an integer divisor of zero. @@ -89,9 +79,8 @@ expected_stderr_1 = """ -Integer divide by zero. The code attempted to divide an integer value by an integer divisor of zero. """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/gtcs/test_dsql_domain_01.py b/tests/functional/gtcs/test_dsql_domain_01.py index a411c8ed..3b4e6203 100644 --- a/tests/functional/gtcs/test_dsql_domain_01.py +++ b/tests/functional/gtcs/test_dsql_domain_01.py @@ -1,34 +1,23 @@ #coding:utf-8 -# -# id: functional.gtcs.dsql_domain_01 -# title: GTCS/tests/DSQL_DOMAIN_01. Test the level 0 syntax for SQL create domain defining only the datatype. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_01.script -# -# NB: avoid usage of ISQL command 'SHOW DOMAIN' because of unstable output. -# We display info about domains using common VIEW based on RDB$FIELDS table. -# -# Checked on 4.0.0.1896; 3.0.6.33288; 2.5.9.27149 -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.dsql-domain-01 +FBTEST: functional.gtcs.dsql_domain_01 +TITLE: Test the level 0 syntax for SQL create domain defining only the datatype +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_01.script + + NB: avoid usage of ISQL command 'SHOW DOMAIN' because of unstable output. + We display info about domains using common VIEW based on RDB$FIELDS table. +""" import pytest from firebird.qa import db_factory, isql_act, Action -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('^((?!Statement failed|SQL error code).)*$', ''), (' = ', ' '), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ create view v_test as select ff.rdb$field_name as dm_name @@ -78,243 +67,243 @@ test_script_1 = """ select * from v_test order by dm_name; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('^((?!Statement failed|SQL error code).)*$', ''), + (' = ', ' '), ('[ \t]+', ' ')]) -expected_stdout_1 = """ - DM_NAME DOM01A_1 - DM_TYPE 7 - DM_SUBTYPE 0 - DM_FLEN 2 - DM_FSCALE 0 - DM_FPREC 0 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - DM_FVALID - DM_FDEFAULT - DM_NAME DOM01A_2 - DM_TYPE 7 - DM_SUBTYPE 1 - DM_FLEN 2 - DM_FSCALE -1 - DM_FPREC 3 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - DM_FVALID - DM_FDEFAULT - DM_NAME DOM01B_1 - DM_TYPE 8 - DM_SUBTYPE 0 - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC 0 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - DM_FVALID - DM_FDEFAULT - DM_NAME DOM01B_2 - DM_TYPE 8 - DM_SUBTYPE 0 - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC 0 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - DM_FVALID - DM_FDEFAULT - DM_NAME DOM01B_3 - DM_TYPE 8 - DM_SUBTYPE 1 - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC 9 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - DM_FVALID - DM_FDEFAULT - DM_NAME DOM01B_4 - DM_TYPE 8 - DM_SUBTYPE 1 - DM_FLEN 4 - DM_FSCALE -2 - DM_FPREC 6 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - DM_FVALID - DM_FDEFAULT - DM_NAME DOM01C - DM_TYPE 12 - DM_SUBTYPE - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - DM_FVALID - DM_FDEFAULT - DM_NAME DOM01D_1 - DM_TYPE 14 - DM_SUBTYPE 0 - DM_FLEN 20 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 0 - DM_FCOLL 0 - DM_FCHRLEN 20 - DM_FNULL - DM_FVALID - DM_FDEFAULT - DM_NAME DOM01D_2 - DM_TYPE 14 - DM_SUBTYPE 0 - DM_FLEN 99 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 0 - DM_FCOLL 0 - DM_FCHRLEN 99 - DM_FNULL - DM_FVALID - DM_FDEFAULT - DM_NAME DOM01E_1 - DM_TYPE 37 - DM_SUBTYPE 0 - DM_FLEN 25 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 0 - DM_FCOLL 0 - DM_FCHRLEN 25 - DM_FNULL - DM_FVALID - DM_FDEFAULT - DM_NAME DOM01E_2 - DM_TYPE 37 - DM_SUBTYPE 0 - DM_FLEN 100 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 0 - DM_FCOLL 0 - DM_FCHRLEN 100 - DM_FNULL - DM_FVALID - DM_FDEFAULT - DM_NAME DOM01E_3 - DM_TYPE 37 - DM_SUBTYPE 0 - DM_FLEN 2 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 0 - DM_FCOLL 0 - DM_FCHRLEN 2 - DM_FNULL - DM_FVALID - DM_FDEFAULT - DM_NAME DOM01F_1 - DM_TYPE 8 - DM_SUBTYPE 2 - DM_FLEN 4 - DM_FSCALE -2 - DM_FPREC 6 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - DM_FVALID - DM_FDEFAULT - DM_NAME DOM01G_1 - DM_TYPE 10 - DM_SUBTYPE - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - DM_FVALID - DM_FDEFAULT - DM_NAME DOM01G_2 - DM_TYPE 27 - DM_SUBTYPE - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - DM_FVALID - DM_FDEFAULT - DM_NAME DOM01G_3 - DM_TYPE 10 - DM_SUBTYPE - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - DM_FVALID - DM_FDEFAULT - DM_NAME DOM01H - DM_TYPE 27 - DM_SUBTYPE - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - DM_FVALID - DM_FDEFAULT - DM_NAME DOM01I_1 - DM_TYPE 261 - DM_SUBTYPE 0 - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - DM_FVALID - DM_FDEFAULT - DM_NAME DOM01I_2 - DM_TYPE 261 - DM_SUBTYPE 1 - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 0 - DM_FCOLL 0 - DM_FCHRLEN - DM_FNULL - DM_FVALID - DM_FDEFAULT - Records affected: 19 +expected_stdout = """ + DM_NAME DOM01A_1 + DM_TYPE 7 + DM_SUBTYPE 0 + DM_FLEN 2 + DM_FSCALE 0 + DM_FPREC 0 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + DM_FVALID + DM_FDEFAULT + DM_NAME DOM01A_2 + DM_TYPE 7 + DM_SUBTYPE 1 + DM_FLEN 2 + DM_FSCALE -1 + DM_FPREC 3 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + DM_FVALID + DM_FDEFAULT + DM_NAME DOM01B_1 + DM_TYPE 8 + DM_SUBTYPE 0 + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC 0 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + DM_FVALID + DM_FDEFAULT + DM_NAME DOM01B_2 + DM_TYPE 8 + DM_SUBTYPE 0 + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC 0 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + DM_FVALID + DM_FDEFAULT + DM_NAME DOM01B_3 + DM_TYPE 8 + DM_SUBTYPE 1 + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC 9 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + DM_FVALID + DM_FDEFAULT + DM_NAME DOM01B_4 + DM_TYPE 8 + DM_SUBTYPE 1 + DM_FLEN 4 + DM_FSCALE -2 + DM_FPREC 6 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + DM_FVALID + DM_FDEFAULT + DM_NAME DOM01C + DM_TYPE 12 + DM_SUBTYPE + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + DM_FVALID + DM_FDEFAULT + DM_NAME DOM01D_1 + DM_TYPE 14 + DM_SUBTYPE 0 + DM_FLEN 20 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 0 + DM_FCOLL 0 + DM_FCHRLEN 20 + DM_FNULL + DM_FVALID + DM_FDEFAULT + DM_NAME DOM01D_2 + DM_TYPE 14 + DM_SUBTYPE 0 + DM_FLEN 99 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 0 + DM_FCOLL 0 + DM_FCHRLEN 99 + DM_FNULL + DM_FVALID + DM_FDEFAULT + DM_NAME DOM01E_1 + DM_TYPE 37 + DM_SUBTYPE 0 + DM_FLEN 25 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 0 + DM_FCOLL 0 + DM_FCHRLEN 25 + DM_FNULL + DM_FVALID + DM_FDEFAULT + DM_NAME DOM01E_2 + DM_TYPE 37 + DM_SUBTYPE 0 + DM_FLEN 100 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 0 + DM_FCOLL 0 + DM_FCHRLEN 100 + DM_FNULL + DM_FVALID + DM_FDEFAULT + DM_NAME DOM01E_3 + DM_TYPE 37 + DM_SUBTYPE 0 + DM_FLEN 2 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 0 + DM_FCOLL 0 + DM_FCHRLEN 2 + DM_FNULL + DM_FVALID + DM_FDEFAULT + DM_NAME DOM01F_1 + DM_TYPE 8 + DM_SUBTYPE 2 + DM_FLEN 4 + DM_FSCALE -2 + DM_FPREC 6 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + DM_FVALID + DM_FDEFAULT + DM_NAME DOM01G_1 + DM_TYPE 10 + DM_SUBTYPE + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + DM_FVALID + DM_FDEFAULT + DM_NAME DOM01G_2 + DM_TYPE 27 + DM_SUBTYPE + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + DM_FVALID + DM_FDEFAULT + DM_NAME DOM01G_3 + DM_TYPE 10 + DM_SUBTYPE + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + DM_FVALID + DM_FDEFAULT + DM_NAME DOM01H + DM_TYPE 27 + DM_SUBTYPE + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + DM_FVALID + DM_FDEFAULT + DM_NAME DOM01I_1 + DM_TYPE 261 + DM_SUBTYPE 0 + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + DM_FVALID + DM_FDEFAULT + DM_NAME DOM01I_2 + DM_TYPE 261 + DM_SUBTYPE 1 + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 0 + DM_FCOLL 0 + DM_FCHRLEN + DM_FNULL + DM_FVALID + DM_FDEFAULT + Records affected: 19 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_dsql_domain_02.py b/tests/functional/gtcs/test_dsql_domain_02.py index 7d058365..34129b9b 100644 --- a/tests/functional/gtcs/test_dsql_domain_02.py +++ b/tests/functional/gtcs/test_dsql_domain_02.py @@ -1,35 +1,25 @@ #coding:utf-8 -# -# id: functional.gtcs.dsql_domain_02 -# title: GTCS/tests/DSQL_DOMAIN_02. Test the level 0 syntax for SQL "CREATE DOMAIN" statement using datatype and DEFAULT clauses -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_02.script -# -# NB: avoid usage of ISQL command 'SHOW DOMAIN' because of unstable output. -# We display info about domains using common VIEW based on RDB$FIELDS table. -# Columns with rdb$validation_source and rdb$default_source contain BLOB data thus we have to skip from showing their blob ID - see substitution. -# -# Checked on 4.0.0.1896; 3.0.6.33288; 2.5.9.27149 -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.dsql-domain-02 +FBTEST: functional.gtcs.dsql_domain_02 +TITLE: Test the level 0 syntax for SQL "CREATE DOMAIN" statement using datatype and DEFAULT clauses +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_02.script + + NB: avoid usage of ISQL command 'SHOW DOMAIN' because of unstable output. + We display info about domains using common VIEW based on RDB$FIELDS table. + Columns with rdb$validation_source and rdb$default_source contain BLOB data thus we have + to skip from showing their blob ID - see substitution. +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' '), ('DM_FDEFAULT_BLOB_ID.*', ''), ('DM_FVALID_BLOB_ID.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ create view v_test as select ff.rdb$field_name as dm_name @@ -83,293 +73,293 @@ test_script_1 = """ set list on; set count on; select * from v_test order by dm_name; ---('[ ]+', ' '), +--('[ ]+', ' '), """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' '), ('DM_FDEFAULT_BLOB_ID.*', ''), + ('DM_FVALID_BLOB_ID.*', '')]) -expected_stdout_1 = """ - DM_NAME DOM02A1 - DM_TYPE 7 - DM_SUBTYPE 0 - DM_FLEN 2 - DM_FSCALE 0 - DM_FPREC 0 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - default 0 - DM_NAME DOM02B1 - DM_TYPE 8 - DM_SUBTYPE 0 - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC 0 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - default 0 - DM_NAME DOM02C3_1 - DM_TYPE 12 - DM_SUBTYPE - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - default '27-JAN-1992' - DM_NAME DOM02C3_2 - DM_TYPE 12 - DM_SUBTYPE - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - default 'today' - DM_NAME DOM02C3_3 - DM_TYPE 12 - DM_SUBTYPE - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - default '01/27/92' - DM_NAME DOM02D1 - DM_TYPE 14 - DM_SUBTYPE 0 - DM_FLEN 30 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 0 - DM_FCOLL 0 - DM_FCHRLEN 30 - DM_FNULL - default 0 - DM_NAME DOM02D2 - DM_TYPE 14 - DM_SUBTYPE 0 - DM_FLEN 30 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 0 - DM_FCOLL 0 - DM_FCHRLEN 30 - DM_FNULL - default 'def' - DM_NAME DOM02D3 - DM_TYPE 14 - DM_SUBTYPE 0 - DM_FLEN 30 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 0 - DM_FCOLL 0 - DM_FCHRLEN 30 - DM_FNULL - default '28-OCT-1990' - DM_NAME DOM02E1 - DM_TYPE 37 - DM_SUBTYPE 0 - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 0 - DM_FCOLL 0 - DM_FCHRLEN 4 - DM_FNULL - default 0 - DM_NAME DOM02E2 - DM_TYPE 37 - DM_SUBTYPE 0 - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 0 - DM_FCOLL 0 - DM_FCHRLEN 4 - DM_FNULL - default 'def' - DM_NAME DOM02E3 - DM_TYPE 37 - DM_SUBTYPE 0 - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 0 - DM_FCOLL 0 - DM_FCHRLEN 8 - DM_FNULL - default '09/01/82' - DM_NAME DOM02F1 - DM_TYPE 16 - DM_SUBTYPE 2 - DM_FLEN 8 - DM_FSCALE -1 - DM_FPREC 10 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - default 0 - DM_NAME DOM02G1 - DM_TYPE 10 - DM_SUBTYPE - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - default 0 - DM_NAME DOM02H1 - DM_TYPE 27 - DM_SUBTYPE - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - default 0 - DM_NAME DOM02J - DM_TYPE 7 - DM_SUBTYPE 0 - DM_FLEN 2 - DM_FSCALE 0 - DM_FPREC 0 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - default null - DM_NAME DOM02K - DM_TYPE 8 - DM_SUBTYPE 0 - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC 0 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - default null - DM_NAME DOM02L - DM_TYPE 12 - DM_SUBTYPE - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - default null - DM_NAME DOM02M - DM_TYPE 14 - DM_SUBTYPE 0 - DM_FLEN 2 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 0 - DM_FCOLL 0 - DM_FCHRLEN 2 - DM_FNULL - default null - DM_NAME DOM02N - DM_TYPE 37 - DM_SUBTYPE 0 - DM_FLEN 15 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 0 - DM_FCOLL 0 - DM_FCHRLEN 15 - DM_FNULL - default null - DM_NAME DOM02O - DM_TYPE 8 - DM_SUBTYPE 2 - DM_FLEN 4 - DM_FSCALE -1 - DM_FPREC 4 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - default null - DM_NAME DOM02P - DM_TYPE 10 - DM_SUBTYPE - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - default null - DM_NAME DOM02Q - DM_TYPE 27 - DM_SUBTYPE - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - default null - DM_NAME DOM02R - DM_TYPE 261 - DM_SUBTYPE 0 - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - default null - DM_NAME DOM02V - DM_TYPE 14 - DM_SUBTYPE 0 - DM_FLEN 15 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 0 - DM_FCOLL 0 - DM_FCHRLEN 15 - DM_FNULL - default user - DM_NAME DOM02W - DM_TYPE 37 - DM_SUBTYPE 0 - DM_FLEN 60 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 0 - DM_FCOLL 0 - DM_FCHRLEN 60 - DM_FNULL - default user - Records affected: 25 +expected_stdout = """ + DM_NAME DOM02A1 + DM_TYPE 7 + DM_SUBTYPE 0 + DM_FLEN 2 + DM_FSCALE 0 + DM_FPREC 0 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + default 0 + DM_NAME DOM02B1 + DM_TYPE 8 + DM_SUBTYPE 0 + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC 0 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + default 0 + DM_NAME DOM02C3_1 + DM_TYPE 12 + DM_SUBTYPE + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + default '27-JAN-1992' + DM_NAME DOM02C3_2 + DM_TYPE 12 + DM_SUBTYPE + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + default 'today' + DM_NAME DOM02C3_3 + DM_TYPE 12 + DM_SUBTYPE + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + default '01/27/92' + DM_NAME DOM02D1 + DM_TYPE 14 + DM_SUBTYPE 0 + DM_FLEN 30 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 0 + DM_FCOLL 0 + DM_FCHRLEN 30 + DM_FNULL + default 0 + DM_NAME DOM02D2 + DM_TYPE 14 + DM_SUBTYPE 0 + DM_FLEN 30 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 0 + DM_FCOLL 0 + DM_FCHRLEN 30 + DM_FNULL + default 'def' + DM_NAME DOM02D3 + DM_TYPE 14 + DM_SUBTYPE 0 + DM_FLEN 30 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 0 + DM_FCOLL 0 + DM_FCHRLEN 30 + DM_FNULL + default '28-OCT-1990' + DM_NAME DOM02E1 + DM_TYPE 37 + DM_SUBTYPE 0 + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 0 + DM_FCOLL 0 + DM_FCHRLEN 4 + DM_FNULL + default 0 + DM_NAME DOM02E2 + DM_TYPE 37 + DM_SUBTYPE 0 + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 0 + DM_FCOLL 0 + DM_FCHRLEN 4 + DM_FNULL + default 'def' + DM_NAME DOM02E3 + DM_TYPE 37 + DM_SUBTYPE 0 + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 0 + DM_FCOLL 0 + DM_FCHRLEN 8 + DM_FNULL + default '09/01/82' + DM_NAME DOM02F1 + DM_TYPE 16 + DM_SUBTYPE 2 + DM_FLEN 8 + DM_FSCALE -1 + DM_FPREC 10 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + default 0 + DM_NAME DOM02G1 + DM_TYPE 10 + DM_SUBTYPE + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + default 0 + DM_NAME DOM02H1 + DM_TYPE 27 + DM_SUBTYPE + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + default 0 + DM_NAME DOM02J + DM_TYPE 7 + DM_SUBTYPE 0 + DM_FLEN 2 + DM_FSCALE 0 + DM_FPREC 0 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + default null + DM_NAME DOM02K + DM_TYPE 8 + DM_SUBTYPE 0 + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC 0 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + default null + DM_NAME DOM02L + DM_TYPE 12 + DM_SUBTYPE + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + default null + DM_NAME DOM02M + DM_TYPE 14 + DM_SUBTYPE 0 + DM_FLEN 2 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 0 + DM_FCOLL 0 + DM_FCHRLEN 2 + DM_FNULL + default null + DM_NAME DOM02N + DM_TYPE 37 + DM_SUBTYPE 0 + DM_FLEN 15 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 0 + DM_FCOLL 0 + DM_FCHRLEN 15 + DM_FNULL + default null + DM_NAME DOM02O + DM_TYPE 8 + DM_SUBTYPE 2 + DM_FLEN 4 + DM_FSCALE -1 + DM_FPREC 4 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + default null + DM_NAME DOM02P + DM_TYPE 10 + DM_SUBTYPE + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + default null + DM_NAME DOM02Q + DM_TYPE 27 + DM_SUBTYPE + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + default null + DM_NAME DOM02R + DM_TYPE 261 + DM_SUBTYPE 0 + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + default null + DM_NAME DOM02V + DM_TYPE 14 + DM_SUBTYPE 0 + DM_FLEN 15 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 0 + DM_FCOLL 0 + DM_FCHRLEN 15 + DM_FNULL + default user + DM_NAME DOM02W + DM_TYPE 37 + DM_SUBTYPE 0 + DM_FLEN 60 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 0 + DM_FCOLL 0 + DM_FCHRLEN 60 + DM_FNULL + default user + Records affected: 25 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_dsql_domain_03.py b/tests/functional/gtcs/test_dsql_domain_03.py index 8ed4960c..82c08b5f 100644 --- a/tests/functional/gtcs/test_dsql_domain_03.py +++ b/tests/functional/gtcs/test_dsql_domain_03.py @@ -1,37 +1,27 @@ #coding:utf-8 -# -# id: functional.gtcs.dsql_domain_03 -# title: GTCS/tests/DSQL_DOMAIN_03. Test the level 0 syntax for SQL "CREATE DOMAIN" statement using datatype and NOT NULL constraint. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_03.script -# -# NB: avoid usage of ISQL command 'SHOW DOMAIN' because of unstable output. -# We display info about domains using common VIEW based on RDB$FIELDS table. -# Columns with rdb$validation_source and rdb$default_source contain BLOB data thus we have to skip from showing their blob ID - see substitution. -# -# ::: NOTE ::: -# Added domains with datatype that did appear only in FB 4.0: DECFLOAT and TIME[STAMP] WITH TIME ZONE. For this reason only FB 4.0+ can be tested. -# Checked on 4.0.0.1896. -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: None + +""" +ID: gtcs.dsql-domain-03 +FBTEST: functional.gtcs.dsql_domain_03 +TITLE: Test the level 0 syntax for SQL "CREATE DOMAIN" statement using datatype and NOT NULL constraint +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_03.script + + NB: avoid usage of ISQL command 'SHOW DOMAIN' because of unstable output. + We display info about domains using common VIEW based on RDB$FIELDS table. + Columns with rdb$validation_source and rdb$default_source contain BLOB data thus we have to skip from showing their blob ID - see substitution. +NOTES: + Added domains with datatype that did appear only in FB 4.0: DECFLOAT and + TIME[STAMP] WITH TIME ZONE. For this reason only FB 4.0+ can be tested. +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' '), ('DM_FDEFAULT_BLOB_ID.*', ''), ('DM_FVALID_BLOB_ID.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ create view v_test as select ff.rdb$field_name as dm_name @@ -81,332 +71,332 @@ test_script_1 = """ create domain dom03_25 as decfloat(16) not null; create domain dom03_26 as decfloat(34) not null; commit; - set list on; + set list on; set count on; select * from v_test order by dm_name; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' '), ('DM_FDEFAULT_BLOB_ID.*', ''), + ('DM_FVALID_BLOB_ID.*', '')]) -expected_stdout_1 = """ - DM_NAME DOM03_01 - DM_TYPE 7 - DM_SUBTYPE 0 - DM_FLEN 2 - DM_FSCALE 0 - DM_FPREC 0 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_02 - DM_TYPE 8 - DM_SUBTYPE 0 - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC 0 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_03 - DM_TYPE 16 - DM_SUBTYPE 0 - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC 0 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_04 - DM_TYPE 12 - DM_SUBTYPE - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_05 - DM_TYPE 14 - DM_SUBTYPE 0 - DM_FLEN 20 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 0 - DM_FCOLL 0 - DM_FCHRLEN 20 - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_06 - DM_TYPE 37 - DM_SUBTYPE 0 - DM_FLEN 25 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 0 - DM_FCOLL 0 - DM_FCHRLEN 25 - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_07 - DM_TYPE 8 - DM_SUBTYPE 2 - DM_FLEN 4 - DM_FSCALE -2 - DM_FPREC 6 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_08 - DM_TYPE 8 - DM_SUBTYPE 1 - DM_FLEN 4 - DM_FSCALE -2 - DM_FPREC 6 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_09 - DM_TYPE 10 - DM_SUBTYPE - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_10 - DM_TYPE 10 - DM_SUBTYPE - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_11 - DM_TYPE 27 - DM_SUBTYPE - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_12 - DM_TYPE 27 - DM_SUBTYPE - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_13 - DM_TYPE 261 - DM_SUBTYPE 0 - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_14 - DM_TYPE 261 - DM_SUBTYPE 1 - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 0 - DM_FCOLL 0 - DM_FCHRLEN - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_15 - DM_TYPE 261 - DM_SUBTYPE 0 - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_16 - DM_TYPE 23 - DM_SUBTYPE - DM_FLEN 1 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_17 - DM_TYPE 13 - DM_SUBTYPE - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_18 - DM_TYPE 28 - DM_SUBTYPE - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_19 - DM_TYPE 35 - DM_SUBTYPE - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_20 - DM_TYPE 29 - DM_SUBTYPE - DM_FLEN 12 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_21 - DM_TYPE 14 - DM_SUBTYPE 0 - DM_FLEN 20 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 21 - DM_FCOLL 0 - DM_FCHRLEN 20 - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_22 - DM_TYPE 14 - DM_SUBTYPE 1 - DM_FLEN 20 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 1 - DM_FCOLL 0 - DM_FCHRLEN 20 - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_23 - DM_TYPE 37 - DM_SUBTYPE 1 - DM_FLEN 20 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 1 - DM_FCOLL 0 - DM_FCHRLEN 20 - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_24 - DM_TYPE 25 - DM_SUBTYPE - DM_FLEN 16 - DM_FSCALE 0 - DM_FPREC 34 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_25 - DM_TYPE 24 - DM_SUBTYPE - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC 16 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - DM_NAME DOM03_26 - DM_TYPE 25 - DM_SUBTYPE - DM_FLEN 16 - DM_FSCALE 0 - DM_FPREC 34 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL 1 - DM_FVALID_BLOB_ID - DM_FDEFAULT_BLOB_ID - Records affected: 26 +expected_stdout = """ + DM_NAME DOM03_01 + DM_TYPE 7 + DM_SUBTYPE 0 + DM_FLEN 2 + DM_FSCALE 0 + DM_FPREC 0 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_02 + DM_TYPE 8 + DM_SUBTYPE 0 + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC 0 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_03 + DM_TYPE 16 + DM_SUBTYPE 0 + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC 0 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_04 + DM_TYPE 12 + DM_SUBTYPE + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_05 + DM_TYPE 14 + DM_SUBTYPE 0 + DM_FLEN 20 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 0 + DM_FCOLL 0 + DM_FCHRLEN 20 + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_06 + DM_TYPE 37 + DM_SUBTYPE 0 + DM_FLEN 25 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 0 + DM_FCOLL 0 + DM_FCHRLEN 25 + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_07 + DM_TYPE 8 + DM_SUBTYPE 2 + DM_FLEN 4 + DM_FSCALE -2 + DM_FPREC 6 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_08 + DM_TYPE 8 + DM_SUBTYPE 1 + DM_FLEN 4 + DM_FSCALE -2 + DM_FPREC 6 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_09 + DM_TYPE 10 + DM_SUBTYPE + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_10 + DM_TYPE 10 + DM_SUBTYPE + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_11 + DM_TYPE 27 + DM_SUBTYPE + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_12 + DM_TYPE 27 + DM_SUBTYPE + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_13 + DM_TYPE 261 + DM_SUBTYPE 0 + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_14 + DM_TYPE 261 + DM_SUBTYPE 1 + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 0 + DM_FCOLL 0 + DM_FCHRLEN + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_15 + DM_TYPE 261 + DM_SUBTYPE 0 + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_16 + DM_TYPE 23 + DM_SUBTYPE + DM_FLEN 1 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_17 + DM_TYPE 13 + DM_SUBTYPE + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_18 + DM_TYPE 28 + DM_SUBTYPE + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_19 + DM_TYPE 35 + DM_SUBTYPE + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_20 + DM_TYPE 29 + DM_SUBTYPE + DM_FLEN 12 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_21 + DM_TYPE 14 + DM_SUBTYPE 0 + DM_FLEN 20 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 21 + DM_FCOLL 0 + DM_FCHRLEN 20 + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_22 + DM_TYPE 14 + DM_SUBTYPE 1 + DM_FLEN 20 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 1 + DM_FCOLL 0 + DM_FCHRLEN 20 + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_23 + DM_TYPE 37 + DM_SUBTYPE 1 + DM_FLEN 20 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 1 + DM_FCOLL 0 + DM_FCHRLEN 20 + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_24 + DM_TYPE 25 + DM_SUBTYPE + DM_FLEN 16 + DM_FSCALE 0 + DM_FPREC 34 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_25 + DM_TYPE 24 + DM_SUBTYPE + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC 16 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + DM_NAME DOM03_26 + DM_TYPE 25 + DM_SUBTYPE + DM_FLEN 16 + DM_FSCALE 0 + DM_FPREC 34 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL 1 + DM_FVALID_BLOB_ID + DM_FDEFAULT_BLOB_ID + Records affected: 26 """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_dsql_domain_04.py b/tests/functional/gtcs/test_dsql_domain_04.py index 8550f995..4f840bfa 100644 --- a/tests/functional/gtcs/test_dsql_domain_04.py +++ b/tests/functional/gtcs/test_dsql_domain_04.py @@ -1,37 +1,28 @@ #coding:utf-8 -# -# id: functional.gtcs.dsql_domain_04 -# title: GTCS/tests/DSQL_DOMAIN_04. Test the level 0 syntax for SQL "CREATE DOMAIN" statement using datatype and CHECK constraint clause. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_04.script -# -# NB: avoid usage of ISQL command 'SHOW DOMAIN' because of unstable output. -# We display info about domains using common VIEW based on RDB$FIELDS table. -# Columns with rdb$validation_source and rdb$default_source contain BLOB data thus we have to skip from showing their blob ID - see substitution. -# -# ::: NOTE ::: -# Added domains with datatype that did appear only in FB 4.0: DECFLOAT and TIME[STAMP] WITH TIME ZONE. For this reason only FB 4.0+ can be tested. -# Checked on 4.0.0.1896. -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: None + +""" +ID: gtcs.dsql-domain-04 +FBTEST: functional.gtcs.dsql_domain_04 +TITLE: Test the level 0 syntax for SQL "CREATE DOMAIN" statement using datatype and CHECK constraint clause +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_04.script + + NB: avoid usage of ISQL command 'SHOW DOMAIN' because of unstable output. + We display info about domains using common VIEW based on RDB$FIELDS table. + Columns with rdb$validation_source and rdb$default_source contain BLOB data thus we have + to skip from showing their blob ID - see substitution. +NOTES: + Added domains with datatype that did appear only in FB 4.0: DECFLOAT and + TIME[STAMP] WITH TIME ZONE. For this reason only FB 4.0+ can be tested. +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' '), ('DM_FDEFAULT_BLOB_ID.*', ''), ('DM_FVALID_BLOB_ID.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ create view v_test as select ff.rdb$field_name as dm_name @@ -221,429 +212,429 @@ test_script_1 = """ create domain dom03_26 as decfloat(34) check (value in(-9.999999999999999999999999999999999E+6144, 9.999999999999999999999999999999999E+6144) ); commit; - set list on; + set list on; set count on; select * from v_test order by dm_name; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' '), ('DM_FDEFAULT_BLOB_ID.*', ''), + ('DM_FVALID_BLOB_ID.*', '')]) -expected_stdout_1 = """ - DM_NAME DOM03_01 - DM_TYPE 7 - DM_SUBTYPE 0 - DM_FLEN 2 - DM_FSCALE 0 - DM_FPREC 0 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - check (value between 0 and 99) - DM_NAME DOM03_02 - DM_TYPE 8 - DM_SUBTYPE 0 - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC 0 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - check ( - value in ( - 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41, - 42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, - 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112, - 113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140, - 141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168, - 169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196, - 197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, - 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, - 253,254,255,256,257,258,259,260,261,262,263,264,265,266,267,268,269,270,271,272,273,274,275,276,277,278,279,280, - 281,282,283,284,285,286,287,288,289,290,291,292,293,294,295,296,297,298,299,300,301,302,303,304,305,306,307,308, - 309,310,311,312,313,314,315,316,317,318,319,320,321,322,323,324,325,326,327,328,329,330,331,332,333,334,335,336, - 337,338,339,340,341,342,343,344,345,346,347,348,349,350,351,352,353,354,355,356,357,358,359,360,361,362,363,364, - 365,366,367,368,369,370,371,372,373,374,375,376,377,378,379,380,381,382,383,384,385,386,387,388,389,390,391,392, - 393,394,395,396,397,398,399,400,401,402,403,404,405,406,407,408,409,410,411,412,413,414,415,416,417,418,419,420, - 421,422,423,424,425,426,427,428,429,430,431,432,433,434,435,436,437,438,439,440,441,442,443,444,445,446,447,448, - 449,450,451,452,453,454,455,456,457,458,459,460,461,462,463,464,465,466,467,468,469,470,471,472,473,474,475,476, - 477,478,479,480,481,482,483,484,485,486,487,488,489,490,491,492,493,494,495,496,497,498,499,500,501,502,503,504, - 505,506,507,508,509,510,511,512,513,514,515,516,517,518,519,520,521,522,523,524,525,526,527,528,529,530,531,532, - 533,534,535,536,537,538,539,540,541,542,543,544,545,546,547,548,549,550,551,552,553,554,555,556,557,558,559,560, - 561,562,563,564,565,566,567,568,569,570,571,572,573,574,575,576,577,578,579,580,581,582,583,584,585,586,587,588, - 589,590,591,592,593,594,595,596,597,598,599,600,601,602,603,604,605,606,607,608,609,610,611,612,613,614,615,616, - 617,618,619,620,621,622,623,624,625,626,627,628,629,630,631,632,633,634,635,636,637,638,639,640,641,642,643,644, - 645,646,647,648,649,650,651,652,653,654,655,656,657,658,659,660,661,662,663,664,665,666,667,668,669,670,671,672, - 673,674,675,676,677,678,679,680,681,682,683,684,685,686,687,688,689,690,691,692,693,694,695,696,697,698,699,700, - 701,702,703,704,705,706,707,708,709,710,711,712,713,714,715,716,717,718,719,720,721,722,723,724,725,726,727,728, - 729,730,731,732,733,734,735,736,737,738,739,740,741,742,743,744,745,746,747,748,749,750,751,752,753,754,755,756, - 757,758,759,760,761,762,763,764,765,766,767,768,769,770,771,772,773,774,775,776,777,778,779,780,781,782,783,784, - 785,786,787,788,789,790,791,792,793,794,795,796,797,798,799,800,801,802,803,804,805,806,807,808,809,810,811,812, - 813,814,815,816,817,818,819,820,821,822,823,824,825,826,827,828,829,830,831,832,833,834,835,836,837,838,839,840, - 841,842,843,844,845,846,847,848,849,850,851,852,853,854,855,856,857,858,859,860,861,862,863,864,865,866,867,868, - 869,870,871,872,873,874,875,876,877,878,879,880,881,882,883,884,885,886,887,888,889,890,891,892,893,894,895,896, - 897,898,899,900,901,902,903,904,905,906,907,908,909,910,911,912,913,914,915,916,917,918,919,920,921,922,923,924, - 925,926,927,928,929,930,931,932,933,934,935,936,937,938,939,940,941,942,943,944,945,946,947,948,949,950,951,952, - 953,954,955,956,957,958,959,960,961,962,963,964,965,966,967,968,969,970,971,972,973,974,975,976,977,978,979,980, - 981,982,983,984,985,986,987,988,989,990,991,992,993,994,995,996,997,998,999,1000,1001,1002,1003,1004,1005,1006, - 1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023,1024,1025,1026,1027,1028, - 1029,1030,1031,1032,1033,1034,1035,1036,1037,1038,1039,1040,1041,1042,1043,1044,1045,1046,1047,1048,1049,1050, - 1051,1052,1053,1054,1055,1056,1057,1058,1059,1060,1061,1062,1063,1064,1065,1066,1067,1068,1069,1070,1071,1072, - 1073,1074,1075,1076,1077,1078,1079,1080,1081,1082,1083,1084,1085,1086,1087,1088,1089,1090,1091,1092,1093,1094, - 1095,1096,1097,1098,1099,1100,1101,1102,1103,1104,1105,1106,1107,1108,1109,1110,1111,1112,1113,1114,1115,1116, - 1117,1118,1119,1120,1121,1122,1123,1124,1125,1126,1127,1128,1129,1130,1131,1132,1133,1134,1135,1136,1137,1138, - 1139,1140,1141,1142,1143,1144,1145,1146,1147,1148,1149,1150,1151,1152,1153,1154,1155,1156,1157,1158,1159,1160, - 1161,1162,1163,1164,1165,1166,1167,1168,1169,1170,1171,1172,1173,1174,1175,1176,1177,1178,1179,1180,1181,1182, - 1183,1184,1185,1186,1187,1188,1189,1190,1191,1192,1193,1194,1195,1196,1197,1198,1199,1200,1201,1202,1203,1204, - 1205,1206,1207,1208,1209,1210,1211,1212,1213,1214,1215,1216,1217,1218,1219,1220,1221,1222,1223,1224,1225,1226, - 1227,1228,1229,1230,1231,1232,1233,1234,1235,1236,1237,1238,1239,1240,1241,1242,1243,1244,1245,1246,1247,1248, - 1249,1250,1251,1252,1253,1254,1255,1256,1257,1258,1259,1260,1261,1262,1263,1264,1265,1266,1267,1268,1269,1270, - 1271,1272,1273,1274,1275,1276,1277,1278,1279,1280,1281,1282,1283,1284,1285,1286,1287,1288,1289,1290,1291,1292, - 1293,1294,1295,1296,1297,1298,1299,1300,1301,1302,1303,1304,1305,1306,1307,1308,1309,1310,1311,1312,1313,1314, - 1315,1316,1317,1318,1319,1320,1321,1322,1323,1324,1325,1326,1327,1328,1329,1330,1331,1332,1333,1334,1335,1336, - 1337,1338,1339,1340,1341,1342,1343,1344,1345,1346,1347,1348,1349,1350,1351,1352,1353,1354,1355,1356,1357,1358, - 1359,1360,1361,1362,1363,1364,1365,1366,1367,1368,1369,1370,1371,1372,1373,1374,1375,1376,1377,1378,1379,1380, - 1381,1382,1383,1384,1385,1386,1387,1388,1389,1390,1391,1392,1393,1394,1395,1396,1397,1398,1399,1400,1401,1402, - 1403,1404,1405,1406,1407,1408,1409,1410,1411,1412,1413,1414,1415,1416,1417,1418,1419,1420,1421,1422,1423,1424, - 1425,1426,1427,1428,1429,1430,1431,1432,1433,1434,1435,1436,1437,1438,1439,1440,1441,1442,1443,1444,1445,1446, - 1447,1448,1449,1450,1451,1452,1453,1454,1455,1456,1457,1458,1459,1460,1461,1462,1463,1464,1465,1466,1467,1468, - 1469,1470,1471,1472,1473,1474,1475,1476,1477,1478,1479,1480,1481,1482,1483,1484,1485,1486,1487,1488,1489,1490, - 1491,1492,1493,1494,1495,1496,1497,1498,1499,1500 - ) - ) - DM_NAME DOM03_03 - DM_TYPE 16 - DM_SUBTYPE 0 - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC 0 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - check ( - value NOT in ( - 1500,1499,1498,1497,1496,1495,1494,1493,1492,1491,1490,1489,1488,1487,1486,1485,1484,1483,1482,1481,1480,1479, - 1478,1477,1476,1475,1474,1473,1472,1471,1470,1469,1468,1467,1466,1465,1464,1463,1462,1461,1460,1459,1458,1457, - 1456,1455,1454,1453,1452,1451,1450,1449,1448,1447,1446,1445,1444,1443,1442,1441,1440,1439,1438,1437,1436,1435, - 1434,1433,1432,1431,1430,1429,1428,1427,1426,1425,1424,1423,1422,1421,1420,1419,1418,1417,1416,1415,1414,1413, - 1412,1411,1410,1409,1408,1407,1406,1405,1404,1403,1402,1401,1400,1399,1398,1397,1396,1395,1394,1393,1392,1391, - 1390,1389,1388,1387,1386,1385,1384,1383,1382,1381,1380,1379,1378,1377,1376,1375,1374,1373,1372,1371,1370,1369, - 1368,1367,1366,1365,1364,1363,1362,1361,1360,1359,1358,1357,1356,1355,1354,1353,1352,1351,1350,1349,1348,1347, - 1346,1345,1344,1343,1342,1341,1340,1339,1338,1337,1336,1335,1334,1333,1332,1331,1330,1329,1328,1327,1326,1325, - 1324,1323,1322,1321,1320,1319,1318,1317,1316,1315,1314,1313,1312,1311,1310,1309,1308,1307,1306,1305,1304,1303, - 1302,1301,1300,1299,1298,1297,1296,1295,1294,1293,1292,1291,1290,1289,1288,1287,1286,1285,1284,1283,1282,1281, - 1280,1279,1278,1277,1276,1275,1274,1273,1272,1271,1270,1269,1268,1267,1266,1265,1264,1263,1262,1261,1260,1259, - 1258,1257,1256,1255,1254,1253,1252,1251,1250,1249,1248,1247,1246,1245,1244,1243,1242,1241,1240,1239,1238,1237, - 1236,1235,1234,1233,1232,1231,1230,1229,1228,1227,1226,1225,1224,1223,1222,1221,1220,1219,1218,1217,1216,1215, - 1214,1213,1212,1211,1210,1209,1208,1207,1206,1205,1204,1203,1202,1201,1200,1199,1198,1197,1196,1195,1194,1193, - 1192,1191,1190,1189,1188,1187,1186,1185,1184,1183,1182,1181,1180,1179,1178,1177,1176,1175,1174,1173,1172,1171, - 1170,1169,1168,1167,1166,1165,1164,1163,1162,1161,1160,1159,1158,1157,1156,1155,1154,1153,1152,1151,1150,1149, - 1148,1147,1146,1145,1144,1143,1142,1141,1140,1139,1138,1137,1136,1135,1134,1133,1132,1131,1130,1129,1128,1127, - 1126,1125,1124,1123,1122,1121,1120,1119,1118,1117,1116,1115,1114,1113,1112,1111,1110,1109,1108,1107,1106,1105, - 1104,1103,1102,1101,1100,1099,1098,1097,1096,1095,1094,1093,1092,1091,1090,1089,1088,1087,1086,1085,1084,1083, - 1082,1081,1080,1079,1078,1077,1076,1075,1074,1073,1072,1071,1070,1069,1068,1067,1066,1065,1064,1063,1062,1061, - 1060,1059,1058,1057,1056,1055,1054,1053,1052,1051,1050,1049,1048,1047,1046,1045,1044,1043,1042,1041,1040,1039, - 1038,1037,1036,1035,1034,1033,1032,1031,1030,1029,1028,1027,1026,1025,1024,1023,1022,1021,1020,1019,1018,1017, - 1016,1015,1014,1013,1012,1011,1010,1009,1008,1007,1006,1005,1004,1003,1002,1001,1000,999,998,997,996,995,994, - 993,992,991,990,989,988,987,986,985,984,983,982,981,980,979,978,977,976,975,974,973,972,971,970,969,968,967, - 966,965,964,963,962,961,960,959,958,957,956,955,954,953,952,951,950,949,948,947,946,945,944,943,942,941,940, - 939,938,937,936,935,934,933,932,931,930,929,928,927,926,925,924,923,922,921,920,919,918,917,916,915,914,913, - 912,911,910,909,908,907,906,905,904,903,902,901,900,899,898,897,896,895,894,893,892,891,890,889,888,887,886, - 885,884,883,882,881,880,879,878,877,876,875,874,873,872,871,870,869,868,867,866,865,864,863,862,861,860,859, - 858,857,856,855,854,853,852,851,850,849,848,847,846,845,844,843,842,841,840,839,838,837,836,835,834,833,832, - 831,830,829,828,827,826,825,824,823,822,821,820,819,818,817,816,815,814,813,812,811,810,809,808,807,806,805, - 804,803,802,801,800,799,798,797,796,795,794,793,792,791,790,789,788,787,786,785,784,783,782,781,780,779,778, - 777,776,775,774,773,772,771,770,769,768,767,766,765,764,763,762,761,760,759,758,757,756,755,754,753,752,751, - 750,749,748,747,746,745,744,743,742,741,740,739,738,737,736,735,734,733,732,731,730,729,728,727,726,725,724, - 723,722,721,720,719,718,717,716,715,714,713,712,711,710,709,708,707,706,705,704,703,702,701,700,699,698,697, - 696,695,694,693,692,691,690,689,688,687,686,685,684,683,682,681,680,679,678,677,676,675,674,673,672,671,670, - 669,668,667,666,665,664,663,662,661,660,659,658,657,656,655,654,653,652,651,650,649,648,647,646,645,644,643, - 642,641,640,639,638,637,636,635,634,633,632,631,630,629,628,627,626,625,624,623,622,621,620,619,618,617,616, - 615,614,613,612,611,610,609,608,607,606,605,604,603,602,601,600,599,598,597,596,595,594,593,592,591,590,589, - 588,587,586,585,584,583,582,581,580,579,578,577,576,575,574,573,572,571,570,569,568,567,566,565,564,563,562, - 561,560,559,558,557,556,555,554,553,552,551,550,549,548,547,546,545,544,543,542,541,540,539,538,537,536,535, - 534,533,532,531,530,529,528,527,526,525,524,523,522,521,520,519,518,517,516,515,514,513,512,511,510,509,508, - 507,506,505,504,503,502,501,500,499,498,497,496,495,494,493,492,491,490,489,488,487,486,485,484,483,482,481, - 480,479,478,477,476,475,474,473,472,471,470,469,468,467,466,465,464,463,462,461,460,459,458,457,456,455,454, - 453,452,451,450,449,448,447,446,445,444,443,442,441,440,439,438,437,436,435,434,433,432,431,430,429,428,427, - 426,425,424,423,422,421,420,419,418,417,416,415,414,413,412,411,410,409,408,407,406,405,404,403,402,401,400, - 399,398,397,396,395,394,393,392,391,390,389,388,387,386,385,384,383,382,381,380,379,378,377,376,375,374,373, - 372,371,370,369,368,367,366,365,364,363,362,361,360,359,358,357,356,355,354,353,352,351,350,349,348,347,346, - 345,344,343,342,341,340,339,338,337,336,335,334,333,332,331,330,329,328,327,326,325,324,323,322,321,320,319, - 318,317,316,315,314,313,312,311,310,309,308,307,306,305,304,303,302,301,300,299,298,297,296,295,294,293,292, - 291,290,289,288,287,286,285,284,283,282,281,280,279,278,277,276,275,274,273,272,271,270,269,268,267,266,265, - 264,263,262,261,260,259,258,257,256,255,254,253,252,251,250,249,248,247,246,245,244,243,242,241,240,239,238, - 237,236,235,234,233,232,231,230,229,228,227,226,225,224,223,222,221,220,219,218,217,216,215,214,213,212,211, - 210,209,208,207,206,205,204,203,202,201,200,199,198,197,196,195,194,193,192,191,190,189,188,187,186,185,184, - 183,182,181,180,179,178,177,176,175,174,173,172,171,170,169,168,167,166,165,164,163,162,161,160,159,158,157, - 156,155,154,153,152,151,150,149,148,147,146,145,144,143,142,141,140,139,138,137,136,135,134,133,132,131,130, - 129,128,127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,111,110,109,108,107,106,105,104,103, - 102,101,100,99,98,97,96,95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,79,78,77,76,75,74,73,72,71,70,69,68, - 67,66,65,64,63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32, - 31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1 - ) - ) - DM_NAME DOM03_04 - DM_TYPE 12 - DM_SUBTYPE - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - check (value >='01.01.0001' and value <= '31.12.9999') - DM_NAME DOM03_05 - DM_TYPE 14 - DM_SUBTYPE 0 - DM_FLEN 31 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 0 - DM_FCOLL 0 - DM_FCHRLEN 31 - DM_FNULL - check (value in (select rdb$field_name from rdb$fields)) - DM_NAME DOM03_06 - DM_TYPE 37 - DM_SUBTYPE 0 - DM_FLEN 31 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 0 - DM_FCOLL 0 - DM_FCHRLEN 31 - DM_FNULL - check ( exists(select 1 from rdb$database) ) - DM_NAME DOM03_07 - DM_TYPE 8 - DM_SUBTYPE 2 - DM_FLEN 4 - DM_FSCALE -2 - DM_FPREC 6 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - check (value between 2.71 and 3.14) - DM_NAME DOM03_08 - DM_TYPE 8 - DM_SUBTYPE 1 - DM_FLEN 4 - DM_FSCALE -2 - DM_FPREC 6 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - check (value not between 2.71 and 3.14) - DM_NAME DOM03_09 - DM_TYPE 10 - DM_SUBTYPE - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - check (value = 3.1415926) - DM_NAME DOM03_10 - DM_TYPE 10 - DM_SUBTYPE - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - check (value = 3.1415926) - DM_NAME DOM03_11 - DM_TYPE 27 - DM_SUBTYPE - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - check (value <= 1.7976931348623155e308) - DM_NAME DOM03_12 - DM_TYPE 27 - DM_SUBTYPE - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - check ( abs(value) > exp(-745.1332191)) - DM_NAME DOM03_13 - DM_TYPE 261 - DM_SUBTYPE 0 - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - check (value = '0xadef') - DM_NAME DOM03_14 - DM_TYPE 261 - DM_SUBTYPE 1 - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 0 - DM_FCOLL 0 - DM_FCHRLEN - DM_FNULL - check (value > '') - DM_NAME DOM03_15 - DM_TYPE 261 - DM_SUBTYPE 0 - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - check (value similar to '([0-9]|[a-f]){1,}') - DM_NAME DOM03_16 - DM_TYPE 23 - DM_SUBTYPE - DM_FLEN 1 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - check (value in(true, false)) - DM_NAME DOM03_17 - DM_TYPE 13 - DM_SUBTYPE - DM_FLEN 4 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - check (value between '00:00:00.000' and '23:59:59.999' ) - DM_NAME DOM03_18 - DM_TYPE 28 - DM_SUBTYPE - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - check (value >= time '10:00 America/Los_Angeles') - DM_NAME DOM03_19 - DM_TYPE 35 - DM_SUBTYPE - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - check (value between '01.01.0001 00:00:00.000' and '31.12.9999 23:59:59.999' ) - DM_NAME DOM03_20 - DM_TYPE 29 - DM_SUBTYPE - DM_FLEN 12 - DM_FSCALE 0 - DM_FPREC - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - check (value >= timestamp '01.01.2020 10:00 America/Los_Angeles') - DM_NAME DOM03_21 - DM_TYPE 14 - DM_SUBTYPE 0 - DM_FLEN 20 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 21 - DM_FCOLL 0 - DM_FCHRLEN 20 - DM_FNULL - check (singular(select 1 from rdb$database)) - DM_NAME DOM03_22 - DM_TYPE 14 - DM_SUBTYPE 1 - DM_FLEN 20 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 1 - DM_FCOLL 0 - DM_FCHRLEN 20 - DM_FNULL - check (value = 'qwerty' ) - DM_NAME DOM03_23 - DM_TYPE 37 - DM_SUBTYPE 1 - DM_FLEN 20 - DM_FSCALE 0 - DM_FPREC - DM_FCSET 1 - DM_FCOLL 0 - DM_FCHRLEN 20 - DM_FNULL - check (value = 'mnbvcxz' ) - DM_NAME DOM03_24 - DM_TYPE 25 - DM_SUBTYPE - DM_FLEN 16 - DM_FSCALE 0 - DM_FPREC 34 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - check (value in(-9.999999999999999999999999999999999E+6144, 9.999999999999999999999999999999999E+6144) ) - DM_NAME DOM03_25 - DM_TYPE 24 - DM_SUBTYPE - DM_FLEN 8 - DM_FSCALE 0 - DM_FPREC 16 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - check (value in(-9.999999999999999E+384, 9.999999999999999E+384) ) - DM_NAME DOM03_26 - DM_TYPE 25 - DM_SUBTYPE - DM_FLEN 16 - DM_FSCALE 0 - DM_FPREC 34 - DM_FCSET - DM_FCOLL - DM_FCHRLEN - DM_FNULL - check (value in(-9.999999999999999999999999999999999E+6144, 9.999999999999999999999999999999999E+6144) ) - Records affected: 26 +expected_stdout = """ + DM_NAME DOM03_01 + DM_TYPE 7 + DM_SUBTYPE 0 + DM_FLEN 2 + DM_FSCALE 0 + DM_FPREC 0 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + check (value between 0 and 99) + DM_NAME DOM03_02 + DM_TYPE 8 + DM_SUBTYPE 0 + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC 0 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + check ( + value in ( + 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41, + 42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, + 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112, + 113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140, + 141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168, + 169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196, + 197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224, + 225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252, + 253,254,255,256,257,258,259,260,261,262,263,264,265,266,267,268,269,270,271,272,273,274,275,276,277,278,279,280, + 281,282,283,284,285,286,287,288,289,290,291,292,293,294,295,296,297,298,299,300,301,302,303,304,305,306,307,308, + 309,310,311,312,313,314,315,316,317,318,319,320,321,322,323,324,325,326,327,328,329,330,331,332,333,334,335,336, + 337,338,339,340,341,342,343,344,345,346,347,348,349,350,351,352,353,354,355,356,357,358,359,360,361,362,363,364, + 365,366,367,368,369,370,371,372,373,374,375,376,377,378,379,380,381,382,383,384,385,386,387,388,389,390,391,392, + 393,394,395,396,397,398,399,400,401,402,403,404,405,406,407,408,409,410,411,412,413,414,415,416,417,418,419,420, + 421,422,423,424,425,426,427,428,429,430,431,432,433,434,435,436,437,438,439,440,441,442,443,444,445,446,447,448, + 449,450,451,452,453,454,455,456,457,458,459,460,461,462,463,464,465,466,467,468,469,470,471,472,473,474,475,476, + 477,478,479,480,481,482,483,484,485,486,487,488,489,490,491,492,493,494,495,496,497,498,499,500,501,502,503,504, + 505,506,507,508,509,510,511,512,513,514,515,516,517,518,519,520,521,522,523,524,525,526,527,528,529,530,531,532, + 533,534,535,536,537,538,539,540,541,542,543,544,545,546,547,548,549,550,551,552,553,554,555,556,557,558,559,560, + 561,562,563,564,565,566,567,568,569,570,571,572,573,574,575,576,577,578,579,580,581,582,583,584,585,586,587,588, + 589,590,591,592,593,594,595,596,597,598,599,600,601,602,603,604,605,606,607,608,609,610,611,612,613,614,615,616, + 617,618,619,620,621,622,623,624,625,626,627,628,629,630,631,632,633,634,635,636,637,638,639,640,641,642,643,644, + 645,646,647,648,649,650,651,652,653,654,655,656,657,658,659,660,661,662,663,664,665,666,667,668,669,670,671,672, + 673,674,675,676,677,678,679,680,681,682,683,684,685,686,687,688,689,690,691,692,693,694,695,696,697,698,699,700, + 701,702,703,704,705,706,707,708,709,710,711,712,713,714,715,716,717,718,719,720,721,722,723,724,725,726,727,728, + 729,730,731,732,733,734,735,736,737,738,739,740,741,742,743,744,745,746,747,748,749,750,751,752,753,754,755,756, + 757,758,759,760,761,762,763,764,765,766,767,768,769,770,771,772,773,774,775,776,777,778,779,780,781,782,783,784, + 785,786,787,788,789,790,791,792,793,794,795,796,797,798,799,800,801,802,803,804,805,806,807,808,809,810,811,812, + 813,814,815,816,817,818,819,820,821,822,823,824,825,826,827,828,829,830,831,832,833,834,835,836,837,838,839,840, + 841,842,843,844,845,846,847,848,849,850,851,852,853,854,855,856,857,858,859,860,861,862,863,864,865,866,867,868, + 869,870,871,872,873,874,875,876,877,878,879,880,881,882,883,884,885,886,887,888,889,890,891,892,893,894,895,896, + 897,898,899,900,901,902,903,904,905,906,907,908,909,910,911,912,913,914,915,916,917,918,919,920,921,922,923,924, + 925,926,927,928,929,930,931,932,933,934,935,936,937,938,939,940,941,942,943,944,945,946,947,948,949,950,951,952, + 953,954,955,956,957,958,959,960,961,962,963,964,965,966,967,968,969,970,971,972,973,974,975,976,977,978,979,980, + 981,982,983,984,985,986,987,988,989,990,991,992,993,994,995,996,997,998,999,1000,1001,1002,1003,1004,1005,1006, + 1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023,1024,1025,1026,1027,1028, + 1029,1030,1031,1032,1033,1034,1035,1036,1037,1038,1039,1040,1041,1042,1043,1044,1045,1046,1047,1048,1049,1050, + 1051,1052,1053,1054,1055,1056,1057,1058,1059,1060,1061,1062,1063,1064,1065,1066,1067,1068,1069,1070,1071,1072, + 1073,1074,1075,1076,1077,1078,1079,1080,1081,1082,1083,1084,1085,1086,1087,1088,1089,1090,1091,1092,1093,1094, + 1095,1096,1097,1098,1099,1100,1101,1102,1103,1104,1105,1106,1107,1108,1109,1110,1111,1112,1113,1114,1115,1116, + 1117,1118,1119,1120,1121,1122,1123,1124,1125,1126,1127,1128,1129,1130,1131,1132,1133,1134,1135,1136,1137,1138, + 1139,1140,1141,1142,1143,1144,1145,1146,1147,1148,1149,1150,1151,1152,1153,1154,1155,1156,1157,1158,1159,1160, + 1161,1162,1163,1164,1165,1166,1167,1168,1169,1170,1171,1172,1173,1174,1175,1176,1177,1178,1179,1180,1181,1182, + 1183,1184,1185,1186,1187,1188,1189,1190,1191,1192,1193,1194,1195,1196,1197,1198,1199,1200,1201,1202,1203,1204, + 1205,1206,1207,1208,1209,1210,1211,1212,1213,1214,1215,1216,1217,1218,1219,1220,1221,1222,1223,1224,1225,1226, + 1227,1228,1229,1230,1231,1232,1233,1234,1235,1236,1237,1238,1239,1240,1241,1242,1243,1244,1245,1246,1247,1248, + 1249,1250,1251,1252,1253,1254,1255,1256,1257,1258,1259,1260,1261,1262,1263,1264,1265,1266,1267,1268,1269,1270, + 1271,1272,1273,1274,1275,1276,1277,1278,1279,1280,1281,1282,1283,1284,1285,1286,1287,1288,1289,1290,1291,1292, + 1293,1294,1295,1296,1297,1298,1299,1300,1301,1302,1303,1304,1305,1306,1307,1308,1309,1310,1311,1312,1313,1314, + 1315,1316,1317,1318,1319,1320,1321,1322,1323,1324,1325,1326,1327,1328,1329,1330,1331,1332,1333,1334,1335,1336, + 1337,1338,1339,1340,1341,1342,1343,1344,1345,1346,1347,1348,1349,1350,1351,1352,1353,1354,1355,1356,1357,1358, + 1359,1360,1361,1362,1363,1364,1365,1366,1367,1368,1369,1370,1371,1372,1373,1374,1375,1376,1377,1378,1379,1380, + 1381,1382,1383,1384,1385,1386,1387,1388,1389,1390,1391,1392,1393,1394,1395,1396,1397,1398,1399,1400,1401,1402, + 1403,1404,1405,1406,1407,1408,1409,1410,1411,1412,1413,1414,1415,1416,1417,1418,1419,1420,1421,1422,1423,1424, + 1425,1426,1427,1428,1429,1430,1431,1432,1433,1434,1435,1436,1437,1438,1439,1440,1441,1442,1443,1444,1445,1446, + 1447,1448,1449,1450,1451,1452,1453,1454,1455,1456,1457,1458,1459,1460,1461,1462,1463,1464,1465,1466,1467,1468, + 1469,1470,1471,1472,1473,1474,1475,1476,1477,1478,1479,1480,1481,1482,1483,1484,1485,1486,1487,1488,1489,1490, + 1491,1492,1493,1494,1495,1496,1497,1498,1499,1500 + ) + ) + DM_NAME DOM03_03 + DM_TYPE 16 + DM_SUBTYPE 0 + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC 0 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + check ( + value NOT in ( + 1500,1499,1498,1497,1496,1495,1494,1493,1492,1491,1490,1489,1488,1487,1486,1485,1484,1483,1482,1481,1480,1479, + 1478,1477,1476,1475,1474,1473,1472,1471,1470,1469,1468,1467,1466,1465,1464,1463,1462,1461,1460,1459,1458,1457, + 1456,1455,1454,1453,1452,1451,1450,1449,1448,1447,1446,1445,1444,1443,1442,1441,1440,1439,1438,1437,1436,1435, + 1434,1433,1432,1431,1430,1429,1428,1427,1426,1425,1424,1423,1422,1421,1420,1419,1418,1417,1416,1415,1414,1413, + 1412,1411,1410,1409,1408,1407,1406,1405,1404,1403,1402,1401,1400,1399,1398,1397,1396,1395,1394,1393,1392,1391, + 1390,1389,1388,1387,1386,1385,1384,1383,1382,1381,1380,1379,1378,1377,1376,1375,1374,1373,1372,1371,1370,1369, + 1368,1367,1366,1365,1364,1363,1362,1361,1360,1359,1358,1357,1356,1355,1354,1353,1352,1351,1350,1349,1348,1347, + 1346,1345,1344,1343,1342,1341,1340,1339,1338,1337,1336,1335,1334,1333,1332,1331,1330,1329,1328,1327,1326,1325, + 1324,1323,1322,1321,1320,1319,1318,1317,1316,1315,1314,1313,1312,1311,1310,1309,1308,1307,1306,1305,1304,1303, + 1302,1301,1300,1299,1298,1297,1296,1295,1294,1293,1292,1291,1290,1289,1288,1287,1286,1285,1284,1283,1282,1281, + 1280,1279,1278,1277,1276,1275,1274,1273,1272,1271,1270,1269,1268,1267,1266,1265,1264,1263,1262,1261,1260,1259, + 1258,1257,1256,1255,1254,1253,1252,1251,1250,1249,1248,1247,1246,1245,1244,1243,1242,1241,1240,1239,1238,1237, + 1236,1235,1234,1233,1232,1231,1230,1229,1228,1227,1226,1225,1224,1223,1222,1221,1220,1219,1218,1217,1216,1215, + 1214,1213,1212,1211,1210,1209,1208,1207,1206,1205,1204,1203,1202,1201,1200,1199,1198,1197,1196,1195,1194,1193, + 1192,1191,1190,1189,1188,1187,1186,1185,1184,1183,1182,1181,1180,1179,1178,1177,1176,1175,1174,1173,1172,1171, + 1170,1169,1168,1167,1166,1165,1164,1163,1162,1161,1160,1159,1158,1157,1156,1155,1154,1153,1152,1151,1150,1149, + 1148,1147,1146,1145,1144,1143,1142,1141,1140,1139,1138,1137,1136,1135,1134,1133,1132,1131,1130,1129,1128,1127, + 1126,1125,1124,1123,1122,1121,1120,1119,1118,1117,1116,1115,1114,1113,1112,1111,1110,1109,1108,1107,1106,1105, + 1104,1103,1102,1101,1100,1099,1098,1097,1096,1095,1094,1093,1092,1091,1090,1089,1088,1087,1086,1085,1084,1083, + 1082,1081,1080,1079,1078,1077,1076,1075,1074,1073,1072,1071,1070,1069,1068,1067,1066,1065,1064,1063,1062,1061, + 1060,1059,1058,1057,1056,1055,1054,1053,1052,1051,1050,1049,1048,1047,1046,1045,1044,1043,1042,1041,1040,1039, + 1038,1037,1036,1035,1034,1033,1032,1031,1030,1029,1028,1027,1026,1025,1024,1023,1022,1021,1020,1019,1018,1017, + 1016,1015,1014,1013,1012,1011,1010,1009,1008,1007,1006,1005,1004,1003,1002,1001,1000,999,998,997,996,995,994, + 993,992,991,990,989,988,987,986,985,984,983,982,981,980,979,978,977,976,975,974,973,972,971,970,969,968,967, + 966,965,964,963,962,961,960,959,958,957,956,955,954,953,952,951,950,949,948,947,946,945,944,943,942,941,940, + 939,938,937,936,935,934,933,932,931,930,929,928,927,926,925,924,923,922,921,920,919,918,917,916,915,914,913, + 912,911,910,909,908,907,906,905,904,903,902,901,900,899,898,897,896,895,894,893,892,891,890,889,888,887,886, + 885,884,883,882,881,880,879,878,877,876,875,874,873,872,871,870,869,868,867,866,865,864,863,862,861,860,859, + 858,857,856,855,854,853,852,851,850,849,848,847,846,845,844,843,842,841,840,839,838,837,836,835,834,833,832, + 831,830,829,828,827,826,825,824,823,822,821,820,819,818,817,816,815,814,813,812,811,810,809,808,807,806,805, + 804,803,802,801,800,799,798,797,796,795,794,793,792,791,790,789,788,787,786,785,784,783,782,781,780,779,778, + 777,776,775,774,773,772,771,770,769,768,767,766,765,764,763,762,761,760,759,758,757,756,755,754,753,752,751, + 750,749,748,747,746,745,744,743,742,741,740,739,738,737,736,735,734,733,732,731,730,729,728,727,726,725,724, + 723,722,721,720,719,718,717,716,715,714,713,712,711,710,709,708,707,706,705,704,703,702,701,700,699,698,697, + 696,695,694,693,692,691,690,689,688,687,686,685,684,683,682,681,680,679,678,677,676,675,674,673,672,671,670, + 669,668,667,666,665,664,663,662,661,660,659,658,657,656,655,654,653,652,651,650,649,648,647,646,645,644,643, + 642,641,640,639,638,637,636,635,634,633,632,631,630,629,628,627,626,625,624,623,622,621,620,619,618,617,616, + 615,614,613,612,611,610,609,608,607,606,605,604,603,602,601,600,599,598,597,596,595,594,593,592,591,590,589, + 588,587,586,585,584,583,582,581,580,579,578,577,576,575,574,573,572,571,570,569,568,567,566,565,564,563,562, + 561,560,559,558,557,556,555,554,553,552,551,550,549,548,547,546,545,544,543,542,541,540,539,538,537,536,535, + 534,533,532,531,530,529,528,527,526,525,524,523,522,521,520,519,518,517,516,515,514,513,512,511,510,509,508, + 507,506,505,504,503,502,501,500,499,498,497,496,495,494,493,492,491,490,489,488,487,486,485,484,483,482,481, + 480,479,478,477,476,475,474,473,472,471,470,469,468,467,466,465,464,463,462,461,460,459,458,457,456,455,454, + 453,452,451,450,449,448,447,446,445,444,443,442,441,440,439,438,437,436,435,434,433,432,431,430,429,428,427, + 426,425,424,423,422,421,420,419,418,417,416,415,414,413,412,411,410,409,408,407,406,405,404,403,402,401,400, + 399,398,397,396,395,394,393,392,391,390,389,388,387,386,385,384,383,382,381,380,379,378,377,376,375,374,373, + 372,371,370,369,368,367,366,365,364,363,362,361,360,359,358,357,356,355,354,353,352,351,350,349,348,347,346, + 345,344,343,342,341,340,339,338,337,336,335,334,333,332,331,330,329,328,327,326,325,324,323,322,321,320,319, + 318,317,316,315,314,313,312,311,310,309,308,307,306,305,304,303,302,301,300,299,298,297,296,295,294,293,292, + 291,290,289,288,287,286,285,284,283,282,281,280,279,278,277,276,275,274,273,272,271,270,269,268,267,266,265, + 264,263,262,261,260,259,258,257,256,255,254,253,252,251,250,249,248,247,246,245,244,243,242,241,240,239,238, + 237,236,235,234,233,232,231,230,229,228,227,226,225,224,223,222,221,220,219,218,217,216,215,214,213,212,211, + 210,209,208,207,206,205,204,203,202,201,200,199,198,197,196,195,194,193,192,191,190,189,188,187,186,185,184, + 183,182,181,180,179,178,177,176,175,174,173,172,171,170,169,168,167,166,165,164,163,162,161,160,159,158,157, + 156,155,154,153,152,151,150,149,148,147,146,145,144,143,142,141,140,139,138,137,136,135,134,133,132,131,130, + 129,128,127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,111,110,109,108,107,106,105,104,103, + 102,101,100,99,98,97,96,95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,79,78,77,76,75,74,73,72,71,70,69,68, + 67,66,65,64,63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32, + 31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1 + ) + ) + DM_NAME DOM03_04 + DM_TYPE 12 + DM_SUBTYPE + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + check (value >='01.01.0001' and value <= '31.12.9999') + DM_NAME DOM03_05 + DM_TYPE 14 + DM_SUBTYPE 0 + DM_FLEN 31 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 0 + DM_FCOLL 0 + DM_FCHRLEN 31 + DM_FNULL + check (value in (select rdb$field_name from rdb$fields)) + DM_NAME DOM03_06 + DM_TYPE 37 + DM_SUBTYPE 0 + DM_FLEN 31 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 0 + DM_FCOLL 0 + DM_FCHRLEN 31 + DM_FNULL + check ( exists(select 1 from rdb$database) ) + DM_NAME DOM03_07 + DM_TYPE 8 + DM_SUBTYPE 2 + DM_FLEN 4 + DM_FSCALE -2 + DM_FPREC 6 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + check (value between 2.71 and 3.14) + DM_NAME DOM03_08 + DM_TYPE 8 + DM_SUBTYPE 1 + DM_FLEN 4 + DM_FSCALE -2 + DM_FPREC 6 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + check (value not between 2.71 and 3.14) + DM_NAME DOM03_09 + DM_TYPE 10 + DM_SUBTYPE + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + check (value = 3.1415926) + DM_NAME DOM03_10 + DM_TYPE 10 + DM_SUBTYPE + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + check (value = 3.1415926) + DM_NAME DOM03_11 + DM_TYPE 27 + DM_SUBTYPE + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + check (value <= 1.7976931348623155e308) + DM_NAME DOM03_12 + DM_TYPE 27 + DM_SUBTYPE + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + check ( abs(value) > exp(-745.1332191)) + DM_NAME DOM03_13 + DM_TYPE 261 + DM_SUBTYPE 0 + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + check (value = '0xadef') + DM_NAME DOM03_14 + DM_TYPE 261 + DM_SUBTYPE 1 + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 0 + DM_FCOLL 0 + DM_FCHRLEN + DM_FNULL + check (value > '') + DM_NAME DOM03_15 + DM_TYPE 261 + DM_SUBTYPE 0 + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + check (value similar to '([0-9]|[a-f]){1,}') + DM_NAME DOM03_16 + DM_TYPE 23 + DM_SUBTYPE + DM_FLEN 1 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + check (value in(true, false)) + DM_NAME DOM03_17 + DM_TYPE 13 + DM_SUBTYPE + DM_FLEN 4 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + check (value between '00:00:00.000' and '23:59:59.999' ) + DM_NAME DOM03_18 + DM_TYPE 28 + DM_SUBTYPE + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + check (value >= time '10:00 America/Los_Angeles') + DM_NAME DOM03_19 + DM_TYPE 35 + DM_SUBTYPE + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + check (value between '01.01.0001 00:00:00.000' and '31.12.9999 23:59:59.999' ) + DM_NAME DOM03_20 + DM_TYPE 29 + DM_SUBTYPE + DM_FLEN 12 + DM_FSCALE 0 + DM_FPREC + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + check (value >= timestamp '01.01.2020 10:00 America/Los_Angeles') + DM_NAME DOM03_21 + DM_TYPE 14 + DM_SUBTYPE 0 + DM_FLEN 20 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 21 + DM_FCOLL 0 + DM_FCHRLEN 20 + DM_FNULL + check (singular(select 1 from rdb$database)) + DM_NAME DOM03_22 + DM_TYPE 14 + DM_SUBTYPE 1 + DM_FLEN 20 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 1 + DM_FCOLL 0 + DM_FCHRLEN 20 + DM_FNULL + check (value = 'qwerty' ) + DM_NAME DOM03_23 + DM_TYPE 37 + DM_SUBTYPE 1 + DM_FLEN 20 + DM_FSCALE 0 + DM_FPREC + DM_FCSET 1 + DM_FCOLL 0 + DM_FCHRLEN 20 + DM_FNULL + check (value = 'mnbvcxz' ) + DM_NAME DOM03_24 + DM_TYPE 25 + DM_SUBTYPE + DM_FLEN 16 + DM_FSCALE 0 + DM_FPREC 34 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + check (value in(-9.999999999999999999999999999999999E+6144, 9.999999999999999999999999999999999E+6144) ) + DM_NAME DOM03_25 + DM_TYPE 24 + DM_SUBTYPE + DM_FLEN 8 + DM_FSCALE 0 + DM_FPREC 16 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + check (value in(-9.999999999999999E+384, 9.999999999999999E+384) ) + DM_NAME DOM03_26 + DM_TYPE 25 + DM_SUBTYPE + DM_FLEN 16 + DM_FSCALE 0 + DM_FPREC 34 + DM_FCSET + DM_FCOLL + DM_FCHRLEN + DM_FNULL + check (value in(-9.999999999999999999999999999999999E+6144, 9.999999999999999999999999999999999E+6144) ) + Records affected: 26 """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_dsql_domain_06.py b/tests/functional/gtcs/test_dsql_domain_06.py index 0d3ed20b..dbf8679a 100644 --- a/tests/functional/gtcs/test_dsql_domain_06.py +++ b/tests/functional/gtcs/test_dsql_domain_06.py @@ -1,56 +1,47 @@ #coding:utf-8 -# -# id: functional.gtcs.dsql_domain_06 -# title: GTCS/tests/DSQL_DOMAIN_06. Test the level 0 syntax for SQL "CREATE DOMAIN" statement using datatype and CHECK constraint clause. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_06.script -# -# NB: avoid usage of ISQL command 'SHOW DOMAIN' because of unstable output. -# We display info about domains using common VIEW based on RDB$FIELDS table. -# Columns with rdb$validation_source and rdb$default_source contain BLOB data thus we have to skip from showing their blob ID - see substitution. -# -# ::: NOTE ::: -# Added domains with datatype that did appear only in FB 4.0: DECFLOAT and TIME[STAMP] WITH TIME ZONE. For this reason only FB 4.0+ can be tested. -# -# For each base datatype we: -# * create domain and set default value; -# * alter domain in order to drop default; -# * alter domain in order to set new default; -# * alter domain with doing TWO changes in ONE statement: set new default + drop default; -# * alter domain with doing TWO changes in ONE statement: drop default + set new default. -# -# For some datatypes (float, double precision) we also verify ability to use boundary values for datatype itself. -# For character datatypes we use non-asci characters (currency signs: euro, cent, pound, yena). -# -# Currently following datatypes are NOT checked: -# blob sub_type text not null; -# blob sub_type binary not null; // byt test *does* check BLOB without sub_type specified -# long float not null; -# nchar(20) not null; -# binary(20) not null; -# varbinary(20) not null; -# -# Checked on 4.0.0.1926. -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: None + +""" +ID: gtcs.dsql-domain-06 +FBTEST: functional.gtcs.dsql_domain_06 +TITLE: Test the level 0 syntax for SQL "CREATE DOMAIN" statement using datatype and CHECK constraint clause. +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_06.script + + NB: avoid usage of ISQL command 'SHOW DOMAIN' because of unstable output. + We display info about domains using common VIEW based on RDB$FIELDS table. + Columns with rdb$validation_source and rdb$default_source contain BLOB data thus we have + to skip from showing their blob ID - see substitution. + + ::: NOTE ::: + Added domains with datatype that did appear only in FB 4.0: DECFLOAT and + TIME[STAMP] WITH TIME ZONE. For this reason only FB 4.0+ can be tested. + + For each base datatype we: + * create domain and set default value; + * alter domain in order to drop default; + * alter domain in order to set new default; + * alter domain with doing TWO changes in ONE statement: set new default + drop default; + * alter domain with doing TWO changes in ONE statement: drop default + set new default. + + For some datatypes (float, double precision) we also verify ability to use boundary values for datatype itself. + For character datatypes we use non-asci characters (currency signs: euro, cent, pound, yena). + + Currently following datatypes are NOT checked: + blob sub_type text not null; + blob sub_type binary not null; // byt test *does* check BLOB without sub_type specified + long float not null; + nchar(20) not null; + binary(20) not null; + varbinary(20) not null; +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' '), ('DM_FDEFAULT_BLOB_ID.*', ''), ('DM_FVALID_BLOB_ID.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set list on; @@ -156,7 +147,7 @@ test_script_1 = """ ------------------------------------------------------------------------------------------------ -- https://en.wikipedia.org/wiki/Single-precision_floating-point_format, power(2,-149): -- https://www.wolframalpha.com - + -- (largest normal number): (2-power(2,-23)) * power(2,127) create domain dom06_14 as float default 340282346638528859811704183484516925440; alter domain dom06_14 drop default; @@ -175,7 +166,7 @@ test_script_1 = """ alter domain dom06_15 set default 1.40129846432481707092372958328991613128026194187651577175706828388979108268586060148663818836212158203125e-45; alter domain dom06_15 set default 0.999999940395355224609375 drop default; alter domain dom06_15 drop default set default 1.00000011920928955078125; - + ------------------------------------------------------------------------------------------------ -- https://en.wikipedia.org/wiki/Double-precision_floating-point_format @@ -196,13 +187,13 @@ test_script_1 = """ -- 2.225073858507200889024586876085859887650423112240959... × 10^-308 -- alter domain dom06_16 set default 2.225073858507200889024586876085859887650423112240959e-308; -- 0.00000000 alter domain dom06_16 set default 2e-308; - -- 1 + power(2,-52) = 1.0000000000000002, the smallest number > 1 + -- 1 + power(2,-52) = 1.0000000000000002, the smallest number > 1 -- 1.0000000000000002220446049250313080847263336181640625 alter domain dom06_16 set default 1.0000000000000002220446049250313080847263336181640625 drop default; alter domain dom06_16 drop default set default 1.0000000000000006; ----------------------------------------------------------------------------------------------- create domain dom06_17 as blob default -' +' 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 @@ -370,7 +361,7 @@ test_script_1 = """ ; alter domain dom06_17 drop default; - alter domain dom06_17 set default + alter domain dom06_17 set default ' @@ -378,7 +369,7 @@ test_script_1 = """ alter domain dom06_17 set default null drop default; alter domain dom06_17 drop default set default -' +' 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 @@ -543,7 +534,7 @@ test_script_1 = """ 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 012345678901234567 ' - ; + ; ---------------------------------------------------------------------------------------------------- create domain dom06_18 as boolean default false; alter domain dom06_18 drop default; @@ -557,15 +548,16 @@ test_script_1 = """ alter domain dom06_19 set default null drop default; alter domain dom06_19 drop default set default -1.0E-6143; ---------------------------------------------------------------------------------------------------- - commit; + commit; set count on; select * from v_test; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' '), ('DM_FDEFAULT_BLOB_ID.*', ''), + ('DM_FVALID_BLOB_ID.*', '')]) -expected_stdout_1 = """ -DM_NAME DOM06_01 +expected_stdout = """ +DM_NAME DOM06_01 DM_TYPE 7 DM_SUBTYPE 0 DM_FLEN 2 @@ -579,7 +571,7 @@ DM_FVALID_BLOB_ID DM_FDEFAULT_BLOB_ID 2:1e8 default 3333 -DM_NAME DOM06_02 +DM_NAME DOM06_02 DM_TYPE 8 DM_SUBTYPE 0 DM_FLEN 4 @@ -593,7 +585,7 @@ DM_FVALID_BLOB_ID DM_FDEFAULT_BLOB_ID 2:1ec default 33333 -DM_NAME DOM06_03 +DM_NAME DOM06_03 DM_TYPE 16 DM_SUBTYPE 0 DM_FLEN 8 @@ -607,7 +599,7 @@ DM_FVALID_BLOB_ID DM_FDEFAULT_BLOB_ID 2:1f0 default 333333 -DM_NAME DOM06_04 +DM_NAME DOM06_04 DM_TYPE 12 DM_SUBTYPE DM_FLEN 4 @@ -621,7 +613,7 @@ DM_FVALID_BLOB_ID DM_FDEFAULT_BLOB_ID 2:1f4 default 'YESTERDAY' -DM_NAME DOM06_05 +DM_NAME DOM06_05 DM_TYPE 13 DM_SUBTYPE DM_FLEN 4 @@ -635,7 +627,7 @@ DM_FVALID_BLOB_ID DM_FDEFAULT_BLOB_ID 2:1f8 default current_time -DM_NAME DOM06_06 +DM_NAME DOM06_06 DM_TYPE 28 DM_SUBTYPE DM_FLEN 8 @@ -649,7 +641,7 @@ DM_FVALID_BLOB_ID DM_FDEFAULT_BLOB_ID 2:1fc default '01:02:03.456 Antarctica/South_Pole' -DM_NAME DOM06_07 +DM_NAME DOM06_07 DM_TYPE 35 DM_SUBTYPE DM_FLEN 8 @@ -663,7 +655,7 @@ DM_FVALID_BLOB_ID DM_FDEFAULT_BLOB_ID 2:200 default current_timestamp -DM_NAME DOM06_08 +DM_NAME DOM06_08 DM_TYPE 29 DM_SUBTYPE DM_FLEN 12 @@ -677,7 +669,7 @@ DM_FVALID_BLOB_ID DM_FDEFAULT_BLOB_ID 2:204 default '29.05.2017 01:02:03.456 Antarctica/South_Pole' -DM_NAME DOM06_09 +DM_NAME DOM06_09 DM_TYPE 14 DM_SUBTYPE 0 DM_FLEN 4 @@ -691,7 +683,7 @@ DM_FVALID_BLOB_ID DM_FDEFAULT_BLOB_ID 2:208 default '¥' -DM_NAME DOM06_10 +DM_NAME DOM06_10 DM_TYPE 37 DM_SUBTYPE 0 DM_FLEN 4 @@ -705,7 +697,7 @@ DM_FVALID_BLOB_ID DM_FDEFAULT_BLOB_ID 2:20c default '¥' -DM_NAME DOM06_11 +DM_NAME DOM06_11 DM_TYPE 14 DM_SUBTYPE 0 DM_FLEN 1 @@ -719,7 +711,7 @@ DM_FVALID_BLOB_ID DM_FDEFAULT_BLOB_ID 2:210 default '¡' -DM_NAME DOM06_12 +DM_NAME DOM06_12 DM_TYPE 7 DM_SUBTYPE 1 DM_FLEN 2 @@ -733,7 +725,7 @@ DM_FVALID_BLOB_ID DM_FDEFAULT_BLOB_ID 2:214 default 327.67 -DM_NAME DOM06_13 +DM_NAME DOM06_13 DM_TYPE 26 DM_SUBTYPE 2 DM_FLEN 16 @@ -747,7 +739,7 @@ DM_FVALID_BLOB_ID DM_FDEFAULT_BLOB_ID 2:218 default 99999999999999999999999999999999 -DM_NAME DOM06_14 +DM_NAME DOM06_14 DM_TYPE 10 DM_SUBTYPE DM_FLEN 4 @@ -761,7 +753,7 @@ DM_FVALID_BLOB_ID DM_FDEFAULT_BLOB_ID 2:21c default 1.00000011920928955078125 -DM_NAME DOM06_15 +DM_NAME DOM06_15 DM_TYPE 10 DM_SUBTYPE DM_FLEN 4 @@ -775,7 +767,7 @@ DM_FVALID_BLOB_ID DM_FDEFAULT_BLOB_ID 2:220 default 1.00000011920928955078125 -DM_NAME DOM06_16 +DM_NAME DOM06_16 DM_TYPE 27 DM_SUBTYPE DM_FLEN 8 @@ -789,7 +781,7 @@ DM_FVALID_BLOB_ID DM_FDEFAULT_BLOB_ID 2:224 default 1.0000000000000006 -DM_NAME DOM06_17 +DM_NAME DOM06_17 DM_TYPE 261 DM_SUBTYPE 0 DM_FLEN 8 @@ -802,7 +794,7 @@ DM_FNULL DM_FVALID_BLOB_ID DM_FDEFAULT_BLOB_ID 2:228 default -' +' 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 @@ -968,7 +960,7 @@ default 012345678901234567 ' -DM_NAME DOM06_18 +DM_NAME DOM06_18 DM_TYPE 23 DM_SUBTYPE DM_FLEN 1 @@ -982,7 +974,7 @@ DM_FVALID_BLOB_ID DM_FDEFAULT_BLOB_ID 2:22c default false -DM_NAME DOM06_19 +DM_NAME DOM06_19 DM_TYPE 25 DM_SUBTYPE DM_FLEN 16 @@ -1000,8 +992,7 @@ Records affected: 19 """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_dsql_domain_07.py b/tests/functional/gtcs/test_dsql_domain_07.py index 9bf95b11..14d3e14e 100644 --- a/tests/functional/gtcs/test_dsql_domain_07.py +++ b/tests/functional/gtcs/test_dsql_domain_07.py @@ -1,56 +1,48 @@ #coding:utf-8 -# -# id: functional.gtcs.dsql_domain_07 -# title: GTCS/tests/DSQL_DOMAIN_07. Test CREATE / ALTER domain statement with ADD/DROP CONSTRAINT clauses, together and separately. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_07.script -# -# NB: avoid usage of ISQL command 'SHOW DOMAIN' because of unstable output. -# We display info about domains using common VIEW based on RDB$FIELDS table. -# Columns with rdb$validation_source and rdb$default_source contain BLOB data thus we have to skip from showing their blob ID - see substitution. -# -# ::: NOTE ::: -# Added domains with datatype that did appear only in FB 4.0: DECFLOAT and TIME[STAMP] WITH TIME ZONE. For this reason only FB 4.0+ can be tested. -# -# For each base datatype we: -# * create domain and set initial CHECK constraint; -# * alter domain in order to add new constraint. This must FAIL with message "Only one constraint allowed for a domain" (SQLSTATE = 42000) -# * alter domain with requirement ADD CONSTRAINT and DROP it. -# ########## -# ### NB ### Clause 'DROP CONSTRAINT' will be executed FIRST in this case, regardless where it is specified. -# ########## -# For this reason such statement must PASS. -# * alter domain and try to add again new CHECK constraint. This thould fail again with SQLSTATE=42000. -# -# Currently following datatypes are NOT checked: -# blob sub_type text not null; -# blob sub_type binary not null; // byt test *does* check BLOB without sub_type specified -# long float not null; -# nchar not null; -# binary not null; -# varbinary not null; -# -# Checked on 4.0.0.1931. -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: None + +""" +ID: gtcs.dsql-domain-07 +FBTEST: functional.gtcs.dsql_domain_07 +TITLE: Test CREATE / ALTER domain statement with ADD/DROP CONSTRAINT clauses, together and separately. +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_07.script + + NB: avoid usage of ISQL command 'SHOW DOMAIN' because of unstable output. + We display info about domains using common VIEW based on RDB$FIELDS table. + Columns with rdb$validation_source and rdb$default_source contain BLOB data thus we have + to skip from showing their blob ID - see substitution. + + ::: NOTE ::: + Added domains with datatype that did appear only in FB 4.0: DECFLOAT and + TIME[STAMP] WITH TIME ZONE. For this reason only FB 4.0+ can be tested. + + For each base datatype we: + * create domain and set initial CHECK constraint; + * alter domain in order to add new constraint. This must FAIL with message + "Only one constraint allowed for a domain" (SQLSTATE = 42000) + * alter domain with requirement ADD CONSTRAINT and DROP it. + ########## + ### NB ### Clause 'DROP CONSTRAINT' will be executed FIRST in this case, regardless where it is specified. + ########## + For this reason such statement must PASS. + * alter domain and try to add again new CHECK constraint. This thould fail again with SQLSTATE=42000. + + Currently following datatypes are NOT checked: + blob sub_type text not null; + blob sub_type binary not null; // byt test *does* check BLOB without sub_type specified + long float not null; + nchar not null; + binary not null; + varbinary not null; +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' '), ('DM_FDEFAULT_BLOB_ID.*', ''), ('DM_FVALID_BLOB_ID.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; create view v_test as select @@ -153,8 +145,8 @@ test_script_1 = """ alter domain dom06_16 add constraint check( value >= 2e-308 ) drop constraint; alter domain dom06_16 add constraint check( value = 1.0000000000000002220446049250313080847263336181640625 ); ----------------------------------------------------------------------------------------------- - create domain dom06_17 as blob check (value = -' + create domain dom06_17 as blob check (value = +' 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 @@ -321,13 +313,13 @@ test_script_1 = """ ' ); - alter domain dom06_17 add constraint check( value = + alter domain dom06_17 add constraint check( value = ' '); -- several empty lines here - alter domain dom06_17 add constraint check( value = + alter domain dom06_17 add constraint check( value = ' @@ -347,16 +339,17 @@ test_script_1 = """ alter domain dom06_19 add constraint check( value is not null ) drop constraint; alter domain dom06_19 add constraint check( value >= -1.0E-6143 ); ---------------------------------------------------------------------------------------------------- - commit; + commit; set count on; select * from v_test; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' '), ('DM_FDEFAULT_BLOB_ID.*', ''), + ('DM_FVALID_BLOB_ID.*', '')]) -expected_stdout_1 = """ - DM_NAME DOM06_01 +expected_stdout = """ + DM_NAME DOM06_01 DM_TYPE 7 DM_SUBTYPE 0 DM_FLEN 2 @@ -370,7 +363,7 @@ expected_stdout_1 = """ check( value = 3 ) DM_FDEFAULT_BLOB_ID - DM_NAME DOM06_02 + DM_NAME DOM06_02 DM_TYPE 8 DM_SUBTYPE 0 DM_FLEN 4 @@ -384,7 +377,7 @@ expected_stdout_1 = """ check( value = 3 ) DM_FDEFAULT_BLOB_ID - DM_NAME DOM06_03 + DM_NAME DOM06_03 DM_TYPE 16 DM_SUBTYPE 0 DM_FLEN 8 @@ -398,7 +391,7 @@ expected_stdout_1 = """ check( value = 3 ) DM_FDEFAULT_BLOB_ID - DM_NAME DOM06_04 + DM_NAME DOM06_04 DM_TYPE 12 DM_SUBTYPE DM_FLEN 4 @@ -412,7 +405,7 @@ expected_stdout_1 = """ check( value < current_date ) DM_FDEFAULT_BLOB_ID - DM_NAME DOM06_05 + DM_NAME DOM06_05 DM_TYPE 13 DM_SUBTYPE DM_FLEN 4 @@ -426,7 +419,7 @@ expected_stdout_1 = """ check( value < current_time ) DM_FDEFAULT_BLOB_ID - DM_NAME DOM06_06 + DM_NAME DOM06_06 DM_TYPE 28 DM_SUBTYPE DM_FLEN 8 @@ -440,7 +433,7 @@ expected_stdout_1 = """ check( value < '23:34:45.678 Pacific/Galapagos' ) DM_FDEFAULT_BLOB_ID - DM_NAME DOM06_07 + DM_NAME DOM06_07 DM_TYPE 13 DM_SUBTYPE DM_FLEN 4 @@ -454,7 +447,7 @@ expected_stdout_1 = """ check( value < current_timestamp ) DM_FDEFAULT_BLOB_ID - DM_NAME DOM06_08 + DM_NAME DOM06_08 DM_TYPE 29 DM_SUBTYPE DM_FLEN 12 @@ -468,7 +461,7 @@ expected_stdout_1 = """ check( value < '27.03.2015 23:34:45.678 Pacific/Galapagos' ) DM_FDEFAULT_BLOB_ID - DM_NAME DOM06_09 + DM_NAME DOM06_09 DM_TYPE 14 DM_SUBTYPE 0 DM_FLEN 4 @@ -482,7 +475,7 @@ expected_stdout_1 = """ check( value = '¢' ) DM_FDEFAULT_BLOB_ID - DM_NAME DOM06_10 + DM_NAME DOM06_10 DM_TYPE 37 DM_SUBTYPE 0 DM_FLEN 4 @@ -496,7 +489,7 @@ expected_stdout_1 = """ check( value = '¢' ) DM_FDEFAULT_BLOB_ID - DM_NAME DOM06_11 + DM_NAME DOM06_11 DM_TYPE 14 DM_SUBTYPE 0 DM_FLEN 1 @@ -510,7 +503,7 @@ expected_stdout_1 = """ check( value = 'Ÿ' ) DM_FDEFAULT_BLOB_ID - DM_NAME DOM06_12 + DM_NAME DOM06_12 DM_TYPE 7 DM_SUBTYPE 1 DM_FLEN 2 @@ -524,7 +517,7 @@ expected_stdout_1 = """ check( value = -327.68 ) DM_FDEFAULT_BLOB_ID - DM_NAME DOM06_13 + DM_NAME DOM06_13 DM_TYPE 8 DM_SUBTYPE 2 DM_FLEN 4 @@ -538,7 +531,7 @@ expected_stdout_1 = """ check( value = -327.68 ) DM_FDEFAULT_BLOB_ID - DM_NAME DOM06_14 + DM_NAME DOM06_14 DM_TYPE 10 DM_SUBTYPE DM_FLEN 4 @@ -552,7 +545,7 @@ expected_stdout_1 = """ check( value = 1.40129846432481707092372958328991613128026194187651577175706828388979108268586060148663818836212158203125e-45 ) DM_FDEFAULT_BLOB_ID - DM_NAME DOM06_15 + DM_NAME DOM06_15 DM_TYPE 10 DM_SUBTYPE DM_FLEN 4 @@ -566,7 +559,7 @@ expected_stdout_1 = """ check( value = 1.40129846432481707092372958328991613128026194187651577175706828388979108268586060148663818836212158203125e-45 ) DM_FDEFAULT_BLOB_ID - DM_NAME DOM06_16 + DM_NAME DOM06_16 DM_TYPE 27 DM_SUBTYPE DM_FLEN 8 @@ -580,7 +573,7 @@ expected_stdout_1 = """ check( value >= 2e-308 ) DM_FDEFAULT_BLOB_ID - DM_NAME DOM06_17 + DM_NAME DOM06_17 DM_TYPE 261 DM_SUBTYPE 0 DM_FLEN 8 @@ -591,7 +584,7 @@ expected_stdout_1 = """ DM_FCHRLEN DM_FNULL DM_FVALID_BLOB_ID 2:226 - check( value = + check( value = ' @@ -599,7 +592,7 @@ expected_stdout_1 = """ ') DM_FDEFAULT_BLOB_ID - DM_NAME DOM06_18 + DM_NAME DOM06_18 DM_TYPE 23 DM_SUBTYPE DM_FLEN 1 @@ -613,7 +606,7 @@ expected_stdout_1 = """ check( value is not null ) DM_FDEFAULT_BLOB_ID - DM_NAME DOM06_19 + DM_NAME DOM06_19 DM_TYPE 25 DM_SUBTYPE DM_FLEN 16 @@ -629,7 +622,8 @@ expected_stdout_1 = """ Records affected: 19 """ -expected_stderr_1 = """ + +expected_stderr = """ Statement failed, SQLSTATE = 42000 unsuccessful metadata update -ALTER DOMAIN DOM06_01 failed @@ -822,11 +816,9 @@ expected_stderr_1 = """ """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/gtcs/test_dsql_domain_12.py b/tests/functional/gtcs/test_dsql_domain_12.py index b8871b00..a1ba4454 100644 --- a/tests/functional/gtcs/test_dsql_domain_12.py +++ b/tests/functional/gtcs/test_dsql_domain_12.py @@ -1,50 +1,39 @@ #coding:utf-8 -# -# id: functional.gtcs.dsql_domain_12 -# title: GTCS/tests/DSQL_DOMAIN_12. Verify result of INSERT DEFAULT VALUES into a table with domain fields when domains are defined with DEFAULT value. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_12.script -# -# ::: NOTE ::: -# Added domains with datatype that did appear only in FB 4.0: DECFLOAT and TIME[STAMP] WITH TIME ZONE. For this reason only FB 4.0+ can be tested. -# -# # Comment: This script will run level 1 testing, checking syntax of create -# # domain using datatype and default clauses, then creating a table -# # using the domain fields and inserting data to those fields, -# # allowing missing data to be supplied by the default definitions -# # for the domain. -# -# -# For some datatypes (float, double precision) we also verify ability to use boundary values for datatype itself. -# For character datatypes we use non-asci characters (currency signs: euro, cent, pound, yena). -# -# Currently following datatypes are NOT checked: -# blob sub_type text|binary -# long float; -# binary(20); -# varbinary(20); -# -# Checked on 4.0.0.1935. -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: None + +""" +ID: gtcs.dsql-domain-12 +FBTEST: functional.gtcs.dsql_domain_12 +TITLE: Verify result of INSERT DEFAULT VALUES into a table with domain fields when domains are defined with DEFAULT value. +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_12.script + + ::: NOTE ::: + Added domains with datatype that did appear only in FB 4.0: DECFLOAT and + TIME[STAMP] WITH TIME ZONE. For this reason only FB 4.0+ can be tested. + + This script will run level 1 testing, checking syntax of create + domain using datatype and default clauses, then creating a table + using the domain fields and inserting data to those fields, + allowing missing data to be supplied by the default definitions + for the domain. + + For some datatypes (float, double precision) we also verify ability to use boundary values for datatype itself. + For character datatypes we use non-asci characters (currency signs: euro, cent, pound, yena). + + Currently following datatypes are NOT checked: + blob sub_type text|binary + long float; + binary(20); + varbinary(20); +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' '), ('F16_BLOB_ID.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set list on; set blob all; @@ -66,19 +55,19 @@ test_script_1 = """ create domain dom12_11 as nchar(1) default 'Ö' ; create domain dom12_12 as numeric(2,2) default -327.68; create domain dom12_13 as decimal(20,2) default -999999999999999999; - + -- Online evaluation of expressions: https://www.wolframalpha.com -- https://en.wikipedia.org/wiki/Single-precision_floating-point_format -- (largest number less than one): 1 - power(2,-24) create domain dom12_14 as float default 0.999999940395355224609375; - + -- https://en.wikipedia.org/wiki/Double-precision_floating-point_format -- Max Double: power(2,1023) * ( 1+(1-power(2,-52) ) create domain dom12_15 as double precision default 1.7976931348623157e308; - - - create domain dom12_16 as blob default + + + create domain dom12_16 as blob default ' 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 @@ -248,7 +237,7 @@ test_script_1 = """ create domain dom12_17 as boolean default false; create domain dom12_18 as decfloat(16) default -9.999999999999999E+384; create domain dom12_19 as decfloat default -9.999999999999999999999999999999999E6144; - commit; + commit; recreate table test( f01 dom12_01 @@ -277,9 +266,9 @@ test_script_1 = """ select * from test; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' '), ('F16_BLOB_ID.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ F01 -32768 F02 -2147483648 F03 -9223372036854775807 @@ -288,9 +277,9 @@ F05 23:59:59.9990 F06 11:11:11.1110 Indian/Cocos F07 0001-01-01 00:00:01.0010 F08 2013-12-21 11:11:11.1110 Indian/Cocos -F09 € +F09 € F00 € -F11 Ö +F11 Ö F12 -327.68 F13 -999999999999999999.00 F14 0.99999994 @@ -469,8 +458,7 @@ Records affected: 1 """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_dsql_domain_13.py b/tests/functional/gtcs/test_dsql_domain_13.py index 42460504..609ee75e 100644 --- a/tests/functional/gtcs/test_dsql_domain_13.py +++ b/tests/functional/gtcs/test_dsql_domain_13.py @@ -1,51 +1,41 @@ #coding:utf-8 -# -# id: functional.gtcs.dsql_domain_13 -# title: GTCS/tests/DSQL_DOMAIN_13. Verify result of INSERT DEFAULT for domain-based fields which have their own default values. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_13.script -# -# Comment in GTCS -# This script will test level 1 syntax checking for create domain -# statement using datatype and default clauses. The domains are then -# used to create a table where column defaults are also specified. -# Data is then inserted into the table allowing the missing fields -# to be supplied by the column defaults (where specified) and the -# domain defaults (where no column default exists). -# -# ::: NOTE ::: -# Added domains with datatype that did appear only in FB 4.0: DECFLOAT and TIME[STAMP] WITH TIME ZONE. For this reason only FB 4.0+ can be tested. -# -# Fields without default values have names 'F1xx': f101, f102, ... -# Fields with their own default values are 'F2xx': f201, f202, ... -# -# Currently following datatypes are NOT checked: -# blob sub_type text|binary -# long float; -# binary(20); -# varbinary(20); -# -# Checked on 4.0.0.1954. -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: None + +""" +ID: gtcs.dsql-domain-13 +FBTEST: functional.gtcs.dsql_domain_13 +TITLE: Verify result of INSERT DEFAULT for domain-based fields which have their own default values +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_13.script + + Comment in GTCS + This script will test level 1 syntax checking for create domain + statement using datatype and default clauses. The domains are then + used to create a table where column defaults are also specified. + Data is then inserted into the table allowing the missing fields + to be supplied by the column defaults (where specified) and the + domain defaults (where no column default exists). + + ::: NOTE ::: + Added domains with datatype that did appear only in FB 4.0: DECFLOAT and + TIME[STAMP] WITH TIME ZONE. For this reason only FB 4.0+ can be tested. + + Fields without default values have names 'F1xx': f101, f102, ... + Fields with their own default values are 'F2xx': f201, f202, ... + + Currently following datatypes are NOT checked: + blob sub_type text|binary + long float; + binary(20); + varbinary(20); +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' '), ('F116_BLOB_ID.*', ''), ('F216_BLOB_ID.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set list on; set blob all; @@ -67,23 +57,23 @@ test_script_1 = """ create domain dom13_11 as nchar(1) default 'Ö' ; create domain dom13_12 as numeric(2,2) default -327.68; create domain dom13_13 as decimal(20,2) default -999999999999999999; - + -- Online evaluation of expressions: https://www.wolframalpha.com -- https://en.wikipedia.org/wiki/Single-precision_floating-point_format -- (largest number less than one): 1 - power(2,-24) create domain dom13_14 as float default 0.999999940395355224609375; - + -- https://en.wikipedia.org/wiki/Double-precision_floating-point_format -- Max Double: power(2,1023) * ( 1+(1-power(2,-52) ) create domain dom13_15 as double precision default 1.7976931348623157e308; - + create domain dom13_16 as blob default 'Ø'; create domain dom13_17 as boolean default false; create domain dom13_18 as decfloat(16) default -9.999999999999999E+384; create domain dom13_19 as decfloat default -9.999999999999999999999999999999999E6144; - commit; + commit; recreate table test( f101 dom13_01 @@ -120,7 +110,7 @@ test_script_1 = """ ,f211 dom13_11 default 'Ç' ,f212 dom13_12 default 327.67 ,f213 dom13_13 default 999999999999999999 - ,f214 dom13_14 default 1.0000001192 + ,f214 dom13_14 default 1.0000001192 ,f215 dom13_15 default 1.4012984643e-45 ,f216_blob_id dom13_16 default 'Ö' ,f217 dom13_17 default true @@ -129,15 +119,15 @@ test_script_1 = """ ); commit; - + insert into test default values; set count on; select * from test; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' '), ('F116_BLOB_ID.*', ''), ('F216_BLOB_ID.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ F101 -32768 F102 -2147483648 F103 -9223372036854775807 @@ -146,9 +136,9 @@ expected_stdout_1 = """ F106 11:11:11.1110 Indian/Cocos F107 0001-01-01 00:00:01.0010 F108 2013-12-21 11:11:11.1110 Indian/Cocos - F109 € + F109 € F110 ¢ - F111 Ö + F111 Ö F112 -327.68 F113 -999999999999999999.00 F114 0.99999994 @@ -166,9 +156,9 @@ expected_stdout_1 = """ F206 22:22:22.2220 Pacific/Fiji F207 1234-12-15 12:34:56.7890 F208 2222-12-22 22:22:22.2220 Pacific/Fiji - F209 ¥ + F209 ¥ F210 £ - F211 Ç + F211 Ç F212 327.67 F213 999999999999999999.00 F214 1.0000001 @@ -179,12 +169,11 @@ expected_stdout_1 = """ F218 9.999999999999999E+384 F219 9.999999999999999999999999999999999E+6144 - Records affected: 1 + Records affected: 1 """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_dsql_domain_15.py b/tests/functional/gtcs/test_dsql_domain_15.py index 4736019b..d2ae154b 100644 --- a/tests/functional/gtcs/test_dsql_domain_15.py +++ b/tests/functional/gtcs/test_dsql_domain_15.py @@ -1,47 +1,37 @@ #coding:utf-8 -# -# id: functional.gtcs.dsql_domain_15 -# title: GTCS/tests/DSQL_DOMAIN_15. Verify result of INSERT DEFAULT for domain-based fields which are declared as NOT NULL and have their own default values. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_15.script -# -# Comment in GTCS -# This script will utilize the datatype, default and not null -# clauses in the create domain statement. A table is then -# created using the domain definitions with overriding column -# deafults, then data is added to the table with missing fields -# being supplied by the column or domain defaults. -# -# ::: NOTE ::: -# Added domains with datatype that did appear only in FB 4.0: DECFLOAT and TIME[STAMP] WITH TIME ZONE. For this reason only FB 4.0+ can be tested. -# -# Currently following datatypes are NOT checked: -# blob sub_type text|binary -# long float; -# binary(20); -# varbinary(20); -# -# Checked on 4.0.0.2425. -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: None + +""" +ID: gtcs.dsql-domain-15 +FBTEST: functional.gtcs.dsql_domain_15 +TITLE: Verify result of INSERT DEFAULT for domain-based fields which are declared as NOT NULL and have their own default values +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_15.script + + Comment in GTCS + This script will utilize the datatype, default and not null + clauses in the create domain statement. A table is then + created using the domain definitions with overriding column + deafults, then data is added to the table with missing fields + being supplied by the column or domain defaults. + + ::: NOTE ::: + Added domains with datatype that did appear only in FB 4.0: DECFLOAT and + TIME[STAMP] WITH TIME ZONE. For this reason only FB 4.0+ can be tested. + + Currently following datatypes are NOT checked: + blob sub_type text|binary + long float; + binary(20); + varbinary(20); +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' '), ('FLD17_BLOB_ID.*', ''), ('O_17_BLOB_ID.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set list on; set blob all; @@ -71,11 +61,11 @@ test_script_1 = """ commit; create table tab15a ( - fld01 dom15_01 default 5000 + fld01 dom15_01 default 5000 ,fld02 dom15_02 default 50000000 ,fld03 dom15_03 default '01/01/90' ,fld04 dom15_04 default 'FIXCHAR DEF' - ,fld05 dom15_05 default 'VARCHAR DEF' + ,fld05 dom15_05 default 'VARCHAR DEF' ,fld06 dom15_06 default 3.1415926 ,fld07 dom15_07 default 500.2 ,fld08 dom15_08 default 2.718281828 @@ -174,9 +164,10 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' '), ('FLD17_BLOB_ID.*', ''), + ('O_17_BLOB_ID.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ FLD01 5000 FLD02 50000000 FLD03 1990-01-01 @@ -220,8 +211,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_dsql_domain_20.py b/tests/functional/gtcs/test_dsql_domain_20.py index 28d342bd..126f8c05 100644 --- a/tests/functional/gtcs/test_dsql_domain_20.py +++ b/tests/functional/gtcs/test_dsql_domain_20.py @@ -1,44 +1,34 @@ #coding:utf-8 -# -# id: functional.gtcs.dsql_domain_20 -# title: GTCS/tests/DSQL_DOMAIN_20. Verify result of ALTER DOMAIN SET/DROP DEFAULT when a table exists with field based on this domain. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_20.script -# -# Comment in GTCS -# This script will test using the alter domain statement on domains that are already in use in table definitions. -# Related bugs: have to exit db for changes made to domains to affect data being entered into tables. -# -# ::: NOTE ::: -# Added domains with datatype that did appear only in FB 4.0: DECFLOAT and TIME[STAMP] WITH TIME ZONE. For this reason only FB 4.0+ can be tested. -# -# Currently following datatypes are NOT checked: -# blob sub_type text|binary -# long float; -# binary(20); -# varbinary(20); -# -# Checked on 4.0.0.1954. -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: None + +""" +ID: gtcs.dsql-domain-20 +FBTEST: functional.gtcs.dsql_domain_20 +TITLE: Verify result of ALTER DOMAIN SET/DROP DEFAULT when a table exists with field based on this domain +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_20.script + + Comment in GTCS + This script will test using the alter domain statement on domains that are already in use in table definitions. + Related bugs: have to exit db for changes made to domains to affect data being entered into tables. + + ::: NOTE ::: + Added domains with datatype that did appear only in FB 4.0: DECFLOAT and + TIME[STAMP] WITH TIME ZONE. For this reason only FB 4.0+ can be tested. + + Currently following datatypes are NOT checked: + blob sub_type text|binary + long float; + binary(20); + varbinary(20); +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' '), ('F16_BLOB_ID.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set list on; set blob all; @@ -60,23 +50,23 @@ test_script_1 = """ create domain dom20_11 as nchar(1) default 'Ö' ; create domain dom20_12 as numeric(2,2) default -327.68; create domain dom20_13 as decimal(20,2) default -999999999999999999; - + -- Online evaluation of expressions: https://www.wolframalpha.com -- https://en.wikipedia.org/wiki/Single-precision_floating-point_format -- (largest number less than one): 1 - power(2,-24) create domain dom20_14 as float default 0.999999940395355224609375; - + -- https://en.wikipedia.org/wiki/Double-precision_floating-point_format -- Max Double: power(2,1023) * ( 1+(1-power(2,-52) ) create domain dom20_15 as double precision default 1.7976931348623157e308; - + create domain dom20_16 as blob default 'Ø'; create domain dom20_17 as boolean default false; create domain dom20_18 as decfloat(16) default -9.999999999999999E+384; create domain dom20_19 as decfloat default -9.999999999999999999999999999999999E6144; - commit; + commit; recreate table test( f01 dom20_01 @@ -100,7 +90,7 @@ test_script_1 = """ ,f19 dom20_19 ); commit; - + insert into test default values; select 'point-1' as msg, t.* from test t; rollback; @@ -153,12 +143,12 @@ test_script_1 = """ insert into test default values; select 'point-3' as msg, t.* from test t; rollback; - + """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' '), ('F16_BLOB_ID.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ MSG point-1 F01 -32768 F02 -2147483648 @@ -226,12 +216,11 @@ expected_stdout_1 = """ Ö F17 F18 9.999999999999999E+384 - F19 9.999999999999999999999999999999999E+6144 + F19 9.999999999999999999999999999999999E+6144 """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_dsql_domain_21.py b/tests/functional/gtcs/test_dsql_domain_21.py index 3578c66b..75657d39 100644 --- a/tests/functional/gtcs/test_dsql_domain_21.py +++ b/tests/functional/gtcs/test_dsql_domain_21.py @@ -1,53 +1,50 @@ #coding:utf-8 -# -# id: functional.gtcs.dsql_domain_21 -# title: GTCS/tests/DSQL_DOMAIN_21. Verify result of ALTER DOMAIN with changing DEFAULT values and DROP constraints when a table exists with field based on this domain. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_21.script -# -# Comment in GTCS: -# This script will test using the alter domain statement on domains that are already in use in table definitions, -# with domain defaults and check constraints. -# Related bugs: have to exit db for changes made to domains to affect data being entered into tables. -# -# We create domains with default values and constraints. Initially we use such default values that PASS requirements of check-constraints. -# Statement INSERT DEFAULT and query to the test table is used in order to ensure that we have ability to use such values. -# -# Then we change values in DEFAULT clause so that all of them will VILOLATE check expressions. Here take domains one-by-one and try to user -# INSERT DEFAULT after each such change of DEFAULT value. Every such attempt must fail. -# -# Then we drop CHECK constraints in all domains and again try INSERT DEFAULT. It must pass and new default values must be stored in the test table. -# Finally, we drop DEFAULT in all domains and try INSERT DEFAULT one more time. It must result to NULL value in all fields. -# -# ::: NB::: Changing default value for BLOB field to one that violates CHECK-expression of domain leads to strange message that does not -# relates to actual problem: SQLSTATE = 22018 / conversion error from string "BLOB". See CORE-6297 for details. -# -# ::: NOTE ::: -# Added domains with datatype that did appear only in FB 4.0: DECFLOAT and TIME[STAMP] WITH TIME ZONE. For this reason only FB 4.0+ can be tested. -# -# Checked on 4.0.0.1954. -# -# 08.04.2021: changed expected output for date 01-jan-0001 after discuss with Adriano. -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: None + +""" +ID: gtcs.dsql-domain-21 +FBTEST: functional.gtcs.dsql_domain_21 +TITLE: Verify result of ALTER DOMAIN with changing DEFAULT values and DROP constraints when a table exists with field based on this domain +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_21.script + + Comment in GTCS: + This script will test using the alter domain statement on domains that are already in use in table definitions, + with domain defaults and check constraints. + Related bugs: have to exit db for changes made to domains to affect data being entered into tables. + + We create domains with default values and constraints. Initially we use such default + values that PASS requirements of check-constraints. + Statement INSERT DEFAULT and query to the test table is used in order to ensure that we + have ability to use such values. + + Then we change values in DEFAULT clause so that all of them will VILOLATE check expressions. + Here take domains one-by-one and try to user + INSERT DEFAULT after each such change of DEFAULT value. Every such attempt must fail. + + Then we drop CHECK constraints in all domains and again try INSERT DEFAULT. It must pass + and new default values must be stored in the test table. + Finally, we drop DEFAULT in all domains and try INSERT DEFAULT one more time. It must + result to NULL value in all fields. + + ::: NB::: Changing default value for BLOB field to one that violates CHECK-expression + of domain leads to strange message that does not relates to actual + problem: SQLSTATE = 22018 / conversion error from string "BLOB". See CORE-6297 for details. + + ::: NOTE ::: + Added domains with datatype that did appear only in FB 4.0: DECFLOAT and + TIME[STAMP] WITH TIME ZONE. For this reason only FB 4.0+ can be tested. +NOTES: +[08.04.2021] + changed expected output for date 01-jan-0001 after discuss with Adriano. +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' '), ('F18_BLOB_ID.*', ''), ('F19_BLOB_ID.*', ''), ('F20_BLOB_ID.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set list on; set blob all; @@ -75,7 +72,7 @@ test_script_1 = """ commit; create domain dom21_01 as smallint default -32768 check (value not in ( select r.rdb$relation_id from rdb$relations r where r.rdb$system_flag = 1 ) ); - + create domain dom21_02 as int default 1500 check ( value in ( @@ -203,13 +200,13 @@ test_script_1 = """ 67,66,65,64,63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32, 31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1 ) - ) + ) ; - + create domain dom21_04 as date default '01.01.1980' check ( value >='01.01.0001' and value <= '30.12.9999'); - + create domain dom21_05 as time default '23:59:59.999' check ( extract(hour from value) >=21 ); - + create domain dom21_06 as time with time zone default '11:11:11.111 Indian/Cocos' check ( extract(hour from value) <=12 ); create domain dom21_07 as timestamp default '01.01.0001 00:00:01.001' check ( extract(minute from value) = 0 ); create domain dom21_08 as timestamp with time zone default '21.12.2013 11:11:11.111 Indian/Cocos' check ( extract(minute from value) <=30 ); @@ -221,30 +218,30 @@ test_script_1 = """ create domain dom21_11 as nchar(1) default 'Ö' check( value in ('Ö', 'Ø') ); create domain dom21_12 as binary(2) default 'Œ' check( value in ('Œ', 'Ÿ', '¿') ); -- this datatype is alias for char(N) character set octets create domain dom21_13 as varbinary(2) default 'Œ' check( value in ('Œ', 'Ÿ', '¿') ); - + create domain dom21_14 as numeric(2,2) default -327.68 check ( value < 0 ); create domain dom21_15 as decimal(20,2) default -999999999999999999 check( value < 0 ); - + -- Online evaluation of expressions: https://www.wolframalpha.com -- https://en.wikipedia.org/wiki/Single-precision_floating-point_format -- (largest number less than one): 1 - power(2,-24) create domain dom21_16 as float default 0.999999940395355224609375 check( abs(value) < 1 ); - + -- https://en.wikipedia.org/wiki/Double-precision_floating-point_format -- Max Double: power(2,1023) * ( 1+(1-power(2,-52) ) create domain dom21_17 as double precision default 1.7976931348623157e308 check( abs(value) > 1 ); - + create domain dom21_18 as blob default 'Ø' check( value in ('Ö', 'Ø') ); create domain dom21_19 as blob sub_type text default 'W' check (value > ''); create domain dom21_20 as blob sub_type binary default 'f' check (value similar to '([0-9]|[a-f]){1,}'); - + create domain dom21_21 as boolean default false check ( value is not true ); create domain dom21_22 as decfloat(16) default -9.999999999999999E+384 check( log10(abs(value)) >= 384 ); create domain dom21_23 as decfloat default -9.999999999999999999999999999999999E6144 check( log10(abs(value)) >= 6144 ); - commit; - + commit; + --select * from v_test; recreate table test( @@ -273,19 +270,19 @@ test_script_1 = """ ,f23 dom21_23 ); commit; - + set bail off; -- ### NB ### - + insert into test default values; -- this must PASS select 'point-1' as msg, t.* from test t; rollback; - + ---------------------------------- -- Now we change DEFAULT values of domains so that they become violate CHECK expressions: alter domain dom21_01 set default 1; insert into test default values; -- this must FAIL with SQLSTATE = 23000 / validation error for column "TEST"."F01", value "1" alter domain dom21_01 drop constraint; -- in order to have ability to test next domain and field - + alter domain dom21_02 set default -1; insert into test default values; -- validation error for column "TEST"."F02", value "-1" alter domain dom21_02 drop constraint; @@ -293,7 +290,7 @@ test_script_1 = """ alter domain dom21_03 set default 1; insert into test default values; -- validation error for column "TEST"."F03", value "1" alter domain dom21_03 drop constraint; - + alter domain dom21_04 set default '31.12.9999'; insert into test default values; -- validation error for column "TEST"."F04", value "9999-12-31" alter domain dom21_04 drop constraint; @@ -305,7 +302,7 @@ test_script_1 = """ alter domain dom21_06 set default '13:00:00 Indian/Cocos'; insert into test default values; -- validation error for column "TEST"."F06", value "13:00:00.0000 Indian/Cocos" alter domain dom21_06 drop constraint; - + alter domain dom21_07 set default '01.01.0001 01:01:01.001'; insert into test default values; -- validation error for column "TEST"."F07", value "01-JAN-1 1:01:01.0010" // changed 08.04.2021, was: '1-jan' alter domain dom21_07 drop constraint; @@ -375,12 +372,12 @@ test_script_1 = """ alter domain dom21_23 drop constraint; --------------------------------------- - -- Now we have NO constraints in any domain. + -- Now we have NO constraints in any domain. -- We can run again INSERT DEFAULT and verify that new values appear in the table insert into test default values; -- this must PASS select 'point-2' as msg, t.* from test t; -- all values must have now NEW defaults for domains rollback; - + alter domain dom21_01 drop default; alter domain dom21_02 drop default; alter domain dom21_03 drop default; @@ -408,13 +405,14 @@ test_script_1 = """ insert into test default values; -- this must PASS select 'point-3' as msg, t.* from test t; -- all values now must be NULL rollback; - + """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' '), ('F18_BLOB_ID.*', ''), + ('F19_BLOB_ID.*', ''), ('F20_BLOB_ID.*', '')]) -expected_stdout_1 = """ - MSG point-1 +expected_stdout = """ + MSG point-1 F01 -32768 F02 1500 F03 -9223372036854775807 @@ -423,9 +421,9 @@ expected_stdout_1 = """ F06 11:11:11.1110 Indian/Cocos F07 0001-01-01 00:00:01.0010 F08 2013-12-21 11:11:11.1110 Indian/Cocos - F09 € + F09 € F10 ¢ - F11 Ö + F11 Ö F12 C592 F13 C592 F14 -327.68 @@ -444,7 +442,7 @@ expected_stdout_1 = """ - MSG point-2 + MSG point-2 F01 1 F02 -1 F03 1 @@ -453,9 +451,9 @@ expected_stdout_1 = """ F06 13:00:00.0000 Indian/Cocos F07 0001-01-01 01:01:01.0010 F08 2013-12-21 10:31:00.0000 Indian/Cocos - F09 Ő - F10 - F11 + F09 Ő + F10 + F11 F12 C398 F13 C2A2 F14 327.67 @@ -474,7 +472,7 @@ expected_stdout_1 = """ - MSG point-3 + MSG point-3 F01 F02 F03 @@ -499,7 +497,8 @@ expected_stdout_1 = """ F22 F23 """ -expected_stderr_1 = """ + +expected_stderr = """ Statement failed, SQLSTATE = 23000 validation error for column "TEST"."F01", value "1" @@ -571,11 +570,9 @@ expected_stderr_1 = """ """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/gtcs/test_dsql_domain_22.py b/tests/functional/gtcs/test_dsql_domain_22.py index 6a906407..1b1208fd 100644 --- a/tests/functional/gtcs/test_dsql_domain_22.py +++ b/tests/functional/gtcs/test_dsql_domain_22.py @@ -1,558 +1,57 @@ #coding:utf-8 -# -# id: functional.gtcs.dsql_domain_22 -# title: GTCS/tests/DSQL_DOMAIN_22. Verify result of ALTER DOMAIN with changing NOT NULL flag and CHECK constraints when a table exists with field based on this domain. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_22.script -# -# Comment in GTCS: -# test for error conditions when using the alter domain statement on domains -# that are already in use in table definitions, -# with domain defaults and check constraints. -# -# Test creates domain with DEFAULT value and CHECK constraint. -# Initially domain definition: -# 1) allows insertion of NULLs; -# 2) have DEFAULT value which meets CHECK requirements. -# -# Then we create table and insert one record with DEFAULT value (it must pass) and second record with NULL. -# -# After this we try to change domain definition by adding NOT NULL clause - and it must fail because of existing record with null. -# Finally, we replace CHECK constraint so that its new expression will opposite to previous one, and try again to insert record with DEFAULT value. -# It must fail because of new domain CHECK violation. -# -# This is performed separately for each datatype (smallint, int, ...). -# -# ::: NB-1 ::: -# Test uses datatypes that did appear only in FB 4.0: INT128, DECFLOAT and TIME[STAMP] WITH TIME ZONE. For this reason only FB 4.0+ can be tested. -# -# ::: NB-2 ::: -# Domain CHECK constraint *can* be changed so that existing data will not satisfy new expression. -# Only NOT NULL is verified against data that were inserted in the table. -# -# Checked on 4.0.0.2425 (Windows), 4.0.0.2422 (Linux) -# -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: None + +""" +ID: gtcs.dsql-domain-22 +FBTEST: functional.gtcs.dsql_domain_22 +TITLE: Verify result of ALTER DOMAIN with changing NOT NULL flag and CHECK constraints + when a table exists with field based on this domain +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/DSQL_DOMAIN_22.script + + Comment in GTCS: + test for error conditions when using the alter domain statement on domains + that are already in use in table definitions, + with domain defaults and check constraints. + + Test creates domain with DEFAULT value and CHECK constraint. + Initially domain definition: + 1) allows insertion of NULLs; + 2) have DEFAULT value which meets CHECK requirements. + + Then we create table and insert one record with DEFAULT value (it must pass) and second record with NULL. + + After this we try to change domain definition by adding NOT NULL clause - and it must + fail because of existing record with null. Finally, we replace CHECK constraint so that + its new expression will opposite to previous one, and try again to insert record with DEFAULT value. + It must fail because of new domain CHECK violation. + + This is performed separately for each datatype (smallint, int, ...). + + ::: NB-1 ::: + Test uses datatypes that did appear only in FB 4.0: INT128, DECFLOAT and + TIME[STAMP] WITH TIME ZONE. For this reason only FB 4.0+ can be tested. + + ::: NB-2 ::: + Domain CHECK constraint *can* be changed so that existing data will not satisfy new expression. + Only NOT NULL is verified against data that were inserted in the table. +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +substitutions = [('After line.*', ''), ('X_BLOB_20.*', ''), ('X_BLOB_21.*', ''), + ('X_BLOB_22.*', ''), ('DM_FVALID.*', ''), ('DM_FDEFAULT.*', ''), + ('0.0000000000000000', '0.000000000000000'), + ('X_DATE 20.*', 'X_DATE 20'), + ('validation error for column "TEST"."X_DATE", value .*', + 'validation error for column "TEST"."X_DATE"')] -substitutions_1 = [('After line.*', ''), ('X_BLOB_20.*', ''), ('X_BLOB_21.*', ''), ('X_BLOB_22.*', ''), ('DM_FVALID.*', ''), ('DM_FDEFAULT.*', ''), ('0.0000000000000000', '0.000000000000000'), ('X_DATE 20.*', 'X_DATE 20'), ('validation error for column "TEST"."X_DATE", value .*', 'validation error for column "TEST"."X_DATE"')] +db = db_factory() -init_script_1 = """""" +act = python_act('db', substitutions=substitutions) -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# -# import os -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# sql_init=''' -# set list on; -# set blob all; -# set names utf8; -# connect '%(dsn)s' user '%(user_name)s' password '%(user_password)s'; -# -# create collation nm_coll for utf8 from unicode case insensitive accent insensitive; -# commit; -# -# create domain dom20u as varchar(20) character set utf8 collate nm_coll; -# commit; -# -# -# create table rivers( -# id int -# ,river dom20u -# ); -# insert into rivers(id, river) values(1, 'Волга'); -# insert into rivers(id, river) values(2, 'Дніпро'); -# insert into rivers(id, river) values(3, 'Wisła'); -# insert into rivers(id, river) values(4, 'Dunărea'); -# insert into rivers(id, river) values(5, 'Rhône'); -# commit; -# -# create domain dom22_01 as smallint default 0 check (value >= 0 and value < 100); -# create domain dom22_02 as integer default 500 check (value >= 500); -# create domain dom22_03 as date default 'TODAY' check (value >= 'today'); -# -# -- CHECK-expression of this domain will be changed to -# -- "check (value in (select river from rivers))" - see below: -# create domain dom22_04 as char(20) default 'Wisła' check ( value in ('Волга','Дніпро','Wisła','Dunărea','Rhône') ); -# -# -- CHECK-expression of this domain will be changed to -# -- "check (value NOT in (select river from rivers))" - see below: -# create domain dom22_05 as varchar(25) default 'Norrström' check ( value NOT in ('Волга','Дніпро','Wisła','Dunărea','Rhône') ); -# -# create domain dom22_06 as numeric(2,2) default -327.68 check (value < 0); -# create domain dom22_07 as decimal(6,2) default -999.99 check (value < 0); -# -# -- exp(-745.1332192) is max. double precision value that will be NOT distinguish from zero: -# create domain dom22_08 as double precision default 0 check (value is null or value is not distinct from exp(-745.1332192)); -# -# ----------------------------- -# -# -- Additional datataypes (they either not present in original test or did appear since FB 3.x): -# create domain dom22_09 as bigint default 9223372036854775807 check (value > 0); -# create domain dom22_10 as nchar(1) default 'Y' check( value in ('Y', 'y') ); -- alias for ISO8859_1 -# create domain dom22_11 as binary(2) default 'Ÿ' check( value in ('Ÿ', 'ÿ') ); -- this datatype is alias for char(N) character set octets -# create domain dom22_12 as varbinary(2) default 'Ÿ' check( value in ('Ÿ', 'ÿ') ); -# create domain dom22_13 as boolean default false check ( value is not true ); -# -# create domain dom22_14 as decfloat(16) default -9.999999999999999E+384 check( log10(abs(value)) >= 384 ); -# create domain dom22_15 as decfloat default -9.999999999999999999999999999999999E6144 check( log10(abs(value)) >= 6144 ); -# create domain dom22_16 as int128 default 170141183460469231731687303715884105727 check( value in(-170141183460469231731687303715884105728, 170141183460469231731687303715884105727) ); -# -# create domain dom22_17 as time with time zone default '11:11:11.111 Indian/Cocos' check ( extract(hour from value) <=12 ); -# create domain dom22_18 as timestamp default '01.01.0001 00:00:01.001' check ( extract(minute from value) = 0 ); -# create domain dom22_19 as timestamp with time zone default '21.12.2013 11:11:11.111 Indian/Cocos' check ( extract(minute from value) <=30 ); -# -# create domain dom22_20 as blob default 'Ÿ' check( value in ('Ÿ', 'ÿ') ); -# create domain dom22_21 as blob sub_type text character set utf8 default 'Ätran' check (value is null or value NOT in (select river from rivers)) collate nm_coll; -# create domain dom22_22 as blob sub_type binary default 0x10 check (value > 0x01); -# -# create or alter view v_test as -# select -# ff.rdb$field_name as dm_name -# ,ff.rdb$field_type as dm_type -# ,ff.rdb$field_sub_type as dm_subtype -# ,ff.rdb$field_length as dm_flen -# ,ff.rdb$field_scale as dm_fscale -# ,ff.rdb$field_precision as dm_fprec -# ,ff.rdb$character_set_id as dm_fcset -# ,ff.rdb$collation_id as dm_fcoll -# ,ff.rdb$character_length dm_fchrlen -# ,ff.rdb$null_flag as dm_fnull -# ,ff.rdb$validation_source as dm_fvalid -# ,ff.rdb$default_source as dm_fdefault -# from rdb$fields ff -# where -# ff.rdb$system_flag is distinct from 1 -# and ff.rdb$field_name starting with upper( 'dom22_' ) -# order by dm_name -# ; -# commit; -# -# select * from v_test; -# -- ############################################################ -# -# set bail off; -# -# ------------------------------------------------ -# -# recreate table test(x_sml dom22_01); -- smallint default 0 check (value >= 0 and value < 100); -# -# insert into test default values returning x_sml; -# insert into test values(null); -# commit; -# -# alter domain dom22_01 set not null; -- must fail with SQLSTATE = 22006 / -Cannot make field ... NOT NULL because there are NULLs -# alter domain dom22_01 drop constraint add constraint check(value < 0); -# commit; -# -# insert into test default values returning x_sml; -- must fail with SQLSTATE = 23000 / validation error ... value "0" -# update test set x_sml = default where x_sml is null; -- must fail with SQLSTATE = 23000 / validation error ... value "0" -# commit; -# -# ------------------------------------------------ -# -# recreate table test(x_int dom22_02); -- integer default 500 check (value >= 500); -# -# insert into test default values returning x_int; -# insert into test values(null); -# commit; -# -# alter domain dom22_02 set not null; -- must fail with SQLSTATE = 22006 / -Cannot make field ... NOT NULL because there are NULLs -# alter domain dom22_02 drop constraint add constraint check(value < 0); -# commit; -# -# insert into test default values returning x_int; -- must fail with SQLSTATE = 23000 / validation error ... value "500" -# update test set x_int = default where x_int is null; -- must fail with SQLSTATE = 23000 / validation error ... value "500" -# commit; -# -# ------------------------------------------------ -# -# recreate table test(x_date dom22_03); -- date default 'TODAY' check (value >= 'today'); -# -# insert into test default values returning x_date; -# insert into test values(null); -# commit; -# -# alter domain dom22_03 set not null; -- must fail with SQLSTATE = 22006 / -Cannot make field ... NOT NULL because there are NULLs -# alter domain dom22_03 drop constraint add constraint check(value < 'today'); -# commit; -# -# insert into test default values returning x_date; -- must fail with SQLSTATE = 23000 / validation error ... value -# update test set x_date = default where x_date is null; -- must fail with SQLSTATE = 23000 / validation error ... value -# commit; -# -# ------------------------------------------------ -# -# recreate table test(x_char dom22_04); -- char(20) default 'Wisła' check (value in (select river from rivers)); -# -# insert into test default values returning x_char; -# insert into test values(null); -# commit; -# -# alter domain dom22_04 set not null; -- must fail with SQLSTATE = 22006 / -Cannot make field ... NOT NULL because there are NULLs -# alter domain dom22_04 drop constraint add constraint check(value NOT in (select river from rivers)); -# commit; -# -# insert into test default values returning x_char; -- must fail with SQLSTATE = 23000 / validation error ... value "Wisła " -# update test set x_char = default where x_char is null; -- must fail with SQLSTATE = 23000 / validation error ... value "Wisła " -# commit; -# -# ------------------------------------------------ -# -# recreate table test(x_vchr dom22_05); -- varchar(25) default 'Norrström' check (value NOT in (select river from rivers)); -# -# insert into test default values returning x_vchr; -# insert into test values(null); -# commit; -# -# alter domain dom22_05 set not null; -- must fail with SQLSTATE = 22006 / -Cannot make field ... NOT NULL because there are NULLs -# alter domain dom22_05 drop constraint add constraint check(value in (select river from rivers)); -# commit; -# -# insert into test default values returning x_vchr; -- must fail with SQLSTATE = 23000 / validation error ... value "Norrström" -# update test set x_vchr = default where x_vchr is null; -- must fail with SQLSTATE = 23000 / validation error ... value "Norrström" -# commit; -# -# ------------------------------------------------ -# -# recreate table test(x_num dom22_06); -- numeric(2,2) default -327.68 check (value < 0); -# -# insert into test default values returning x_num; -# insert into test values(null); -# commit; -# -# alter domain dom22_06 set not null; -- must fail with SQLSTATE = 22006 / -Cannot make field ... NOT NULL because there are NULLs -# alter domain dom22_06 drop constraint add constraint check(value >0); -# commit; -# -# insert into test default values returning x_num; -- must fail with SQLSTATE = 23000 / validation error ... value "-327.68" -# update test set x_num = default where x_num is null; -- must fail with SQLSTATE = 23000 / validation error ... value "-327.68" -# commit; -# -# ------------------------------------------------ -# -# recreate table test(x_dec dom22_07); -- decimal(6,2) default -999.99 check (value < 0); -# -# insert into test default values returning x_dec; -# insert into test values(null); -# commit; -# -# alter domain dom22_07 set not null; -# -# -- numeric(2,2) can hold values from the scope -327.68 to +327.68. -# -- cast of -999.99 to numeric(2,2) (being valid for decimal(2,2)) must fail with 'numeric value is out of range': -# alter domain dom22_07 drop constraint add constraint check(cast(value as numeric(2,2)) < 0 ); -# commit; -# -# insert into test default values returning x_dec; -- must fail with SQLSTATE = 23000 / numeric value is out of range -# update test set x_dec = default where x_dec is null returning x_dec; -- must fail with SQLSTATE = 23000 / numeric value is out of range -# commit; -# -# ------------------------------------------------ -# -# recreate table test(x_dp dom22_08); -- default 0 check (value is not distinct from exp(-745.1332192)) -# -# insert into test default values returning x_dp; -# insert into test values(null); -# commit; -# -# alter domain dom22_08 set not null; -# alter domain dom22_08 drop constraint add constraint check(value is not distinct from exp(-745.1332191) ); -- minimal DP value that can be distinguished from zero -# commit; -# -# insert into test default values returning x_dp; -- must fail with SQLSTATE = 23000 / validation error ... value "0.0000000000000000" -# update test set x_dp = default where x_dp is null returning x_dp; -- must fail with SQLSTATE = 23000 / validation error ... value "0.0000000000000000" -# commit; -# -# ------------------------------------------------ -# -# recreate table test(x_big dom22_09); -- default 9223372036854775807 check (value > 0); -# -# insert into test default values returning x_big; -# insert into test values(null); -# commit; -# -# alter domain dom22_09 set not null; -# alter domain dom22_09 drop constraint add constraint check(value between bin_shr(-9223372036854775808,63) and bin_shr(9223372036854775807,63)); -- -1...0 -# commit; -# -# insert into test default values returning x_big; -- must fail with SQLSTATE = 23000 / validation error ... value "9223372036854775807" -# update test set x_big = default where x_big is null returning x_big; -- must fail with SQLSTATE = 23000 / validation error ... value "9223372036854775807" -# commit; -# -# ------------------------------------------------ -# -# recreate table test(x_nc dom22_10); -- nchar(1) default 'Y' check( value in ('Y', 'y') ); -# insert into test default values returning x_nc; -# insert into test values(null); -# commit; -# -# alter domain dom22_10 set not null; -# alter domain dom22_10 drop constraint add constraint check( value similar to 'U'); -# commit; -# -# insert into test default values returning x_nc; -# update test set x_nc = default where x_nc is null returning x_nc; -# commit; -# -# ------------------------------------------------ -# -# recreate table test(x_bin dom22_11); -- binary(2) default 'Ÿ' check( value in ('Ÿ', 'ÿ') ) -# insert into test default values returning x_bin; -# insert into test values(null); -# commit; -# -# alter domain dom22_11 set not null; -# alter domain dom22_11 drop constraint add constraint check( value is not distinct from 'ł' or value is not distinct from 'ă' or value is not distinct from 'ô' ); -# commit; -# -# insert into test default values returning x_bin; -- must fail with SQLSTATE = 23000 / validation error ... value "Ÿ" -# update test set x_bin = default where x_bin is null returning x_bin; -- must fail with SQLSTATE = 23000 / validation error ... value "Ÿ" -# commit; -# -# ------------------------------------------------ -# -# recreate table test(x_vb dom22_12); -- varbinary(2) default 'Ÿ' check( value in ('Ÿ', 'ÿ') ) -# insert into test default values returning x_vb; -# insert into test values(null); -# commit; -# -# alter domain dom22_12 set not null; -# alter domain dom22_12 drop constraint add constraint check( value = any(select 'ł' from rdb$database union all select 'ă' from rdb$database) ); -# commit; -# -# insert into test default values returning x_vb; -- must fail with SQLSTATE = 23000 / validation error ... value "Ÿ" -# update test set x_vb = default where x_vb is null returning x_vb; -- must fail with SQLSTATE = 23000 / validation error ... value "Ÿ" -# commit; -# -# ------------------------------------------------ -# -# recreate table test(x_boo dom22_13); -- boolean default false check ( value is not true ); -# insert into test default values returning x_boo; -# insert into test values(null); -# commit; -# -# alter domain dom22_13 set not null; -# alter domain dom22_13 drop constraint add constraint check( value NOT in (false) ); -# commit; -# -# insert into test default values returning x_boo; -# update test set x_boo = default where x_boo is null returning x_boo; -# commit; -# -# ------------------------------------------------ -# -# recreate table test(x_df16 dom22_14); -- decfloat(16) default -9.999999999999999E+384 check( log10(abs(value)) >= 384 ); -# -# insert into test default values returning x_df16; -# insert into test values(null); -# commit; -# -# alter domain dom22_14 set not null; -# alter domain dom22_14 drop constraint add constraint check( value = 0 ); -# commit; -# -# insert into test default values returning x_df16; -# update test set x_df16 = default where x_df16 is null returning x_df16; -# commit; -# -# ------------------------------------------------ -# -# recreate table test(x_df34 dom22_15); -- default -9.999999999999999999999999999999999E6144 check( log10(abs(value)) >= 6144 ); -# -# insert into test default values returning x_df34; -# insert into test values(null); -# commit; -# -# alter domain dom22_15 set not null; -# alter domain dom22_15 set default 0; -# alter domain dom22_15 drop constraint add constraint check( log10(abs(value)) < 0 ); -# commit; -# -# insert into test default values returning x_df34; -- must fail with SQLSTATE = 42000 / -Argument for LOG10 must be positive -# update test set x_df34 = default where x_df34 is null returning x_df34; -- must fail with SQLSTATE = 42000 / -Argument for LOG10 must be positive -# commit; -# -# ------------------------------------------------ -# -# recreate table test(x_i128 dom22_16); -- int128 default 170141183460469231731687303715884105727 check( value in(-170141183460469231731687303715884105728, 170141183460469231731687303715884105727) ); -# -# insert into test default values returning x_i128; -# insert into test values(null); -# commit; -# -# alter domain dom22_16 set not null; -# alter domain dom22_16 drop constraint add constraint check(value between bin_shr(-170141183460469231731687303715884105727,127) and bin_shr(170141183460469231731687303715884105727,127)); -- -1...0 -# commit; -# -# insert into test default values returning x_i128; -- must fail with SQLSTATE = 23000 / validation error ... value "170141183460469231731687303715884105727" -# update test set x_i128 = default where x_i128 is null returning x_i128; -- must fail with SQLSTATE = 23000 / validation error ... value "170141183460469231731687303715884105727" -# commit; -# -# ------------------------------------------------ -# -# recreate table test(x_tmtz dom22_17); -- time with time zone default '11:11:11.111 Indian/Cocos' check ( extract(hour from value) <=12 ); -# -# insert into test default values returning x_tmtz; -# insert into test values(null); -# commit; -# -# alter domain dom22_17 set not null; -# alter domain dom22_17 drop constraint add constraint check( extract(minute from value) < 10 ); -# commit; -# -# insert into test default values returning x_tmtz; -- must fail with SQLSTATE = 23000 / validation error ... value "11:11:11.1110 Indian/Cocos" -# update test set x_tmtz = default where x_tmtz is null returning x_tmtz; -- must fail with SQLSTATE = 23000 / validation error ... value "11:11:11.1110 Indian/Cocos" -# commit; -# -# ------------------------------------------------ -# -# recreate table test(x_dts dom22_18); -- timestamp default '01.01.0001 00:00:01.001' check ( extract(minute from value) = 0 ); -# -# insert into test default values returning x_dts; -# insert into test values(null); -# commit; -# -# alter domain dom22_18 set not null; -# alter domain dom22_18 drop constraint add constraint check( extract(hour from value) > 7 ); -# commit; -# -# insert into test default values returning x_dts; -- must fail with SQLSTATE = 23000 / validation error ... value "01-JAN-0001 0:00:01.0010" -# update test set x_dts = default where x_dts is null returning x_dts; -- must fail with SQLSTATE = 23000 / validation error ... value "01-JAN-0001 0:00:01.0010" -# commit; -# -# ------------------------------------------------ -# -# recreate table test(x_tstz dom22_19); -- timestamp with time zone default '21.12.2013 11:11:11.111 Indian/Cocos' check ( extract(minute from value) <=30 ); -# insert into test default values returning x_tstz; -# insert into test values(null); -# commit; -# -# alter domain dom22_19 set not null; -# alter domain dom22_19 drop constraint add constraint check( value = '21.12.2013 11:11:11.111 Indian/Comoro' ); -# commit; -# -# insert into test default values returning x_tstz; -- must fail with SQLSTATE = 23000 / validation error ... value "01-JAN-0001 0:00:01.0010" -# update test set x_tstz = default where x_tstz is null returning x_tstz; -- must fail with SQLSTATE = 23000 / validation error ... value "01-JAN-0001 0:00:01.0010" -# commit; -# -# ------------------------------------------------ -# -# recreate table test(x_blob_20 dom22_20); -- default 'Ÿ' check( value in ('Ÿ', 'ÿ') ); -# -# insert into test default values returning x_blob_20; -# insert into test values(null); -# commit; -# -# alter domain dom22_20 set not null; -# alter domain dom22_20 drop constraint add constraint check( value in ('ă', 'ô') ); -# commit; -# -# insert into test default values returning x_blob_20; -- must fail with SQLSTATE = 22018 / conversion error ... value "BLOB" -# update test set x_blob_20 = default where x_blob_20 is null returning x_blob_20; -- must fail with SQLSTATE = 22018 / conversion error ... value "BLOB" -# commit; -# -# ------------------------------------------------ -# -# recreate table test(x_blob_21 dom22_21); -- blob sub_type text character set utf8 default 'Ätran' check (value is null or value NOT in (select river from rivers)) collate nm_coll; -# -# insert into test default values returning x_blob_21; -# insert into test values(null); -# commit; -# -# alter domain dom22_21 set not null; -# alter domain dom22_21 drop constraint add constraint check( value in (select river from rivers) ); -# commit; -# -# insert into test default values returning x_blob_21; -- must fail with SQLSTATE = 22018 / conversion error ... value "BLOB" -# update test set x_blob_21 = default where x_blob_21 is null returning x_blob_21; -- must fail with SQLSTATE = 22018 / conversion error ... value "BLOB" -# commit; -# -# ------------------------------------------------ -# -# recreate table test(x_blob_22 dom22_22); -- blob sub_type binary default 0x10 check (value > 0x01); -# -# insert into test default values returning x_blob_22; -# insert into test values(null); -# commit; -# -# alter domain dom22_22 set not null; -# alter domain dom22_22 drop constraint add constraint check( value < 0x01 ); -# commit; -# -# insert into test default values returning x_blob_22; -- must fail with SQLSTATE = 22018 / conversion error ... value "BLOB" -# update test set x_blob_22 = default where x_blob_22 is null returning x_blob_22; -- must fail with SQLSTATE = 22018 / conversion error ... value "BLOB" -# commit; -# ''' % dict(globals(), **locals()) -# -# f_run_sql=open( os.path.join(context['temp_directory'],'tmp_gtcs_domain_22.sql'), 'w') -# f_run_sql.write(sql_init) -# flush_and_close(f_run_sql) -# -# -# #f_run_log = open( os.path.join(context['temp_directory'],'tmp_gtcs_domain_22.log'), 'w') -# #f_run_err = open( os.path.join(context['temp_directory'],'tmp_gtcs_domain_22.err'), 'w') -# #subprocess.call( [context['isql_path'], dsn, '-q', '-i', f_run_sql.name, '-ch', 'utf8'], stdout = f_run_log, stderr = f_run_err ) -# #flush_and_close( f_run_log ) -# #flush_and_close( f_run_err ) -# -# runProgram( 'isql', ['-q', '-i', f_run_sql.name] ) -# -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stderr_1 = """ +expected_stderr = """ Statement failed, SQLSTATE = 22006 unsuccessful metadata update -Cannot make field X_SML of table TEST NOT NULL because there are NULLs present @@ -579,138 +78,138 @@ expected_stderr_1 = """ Statement failed, SQLSTATE = 23000 validation error for column "TEST"."X_DATE", value "2021-04-20" - + Statement failed, SQLSTATE = 23000 validation error for column "TEST"."X_DATE", value "2021-04-20" - + Statement failed, SQLSTATE = 22006 unsuccessful metadata update -Cannot make field X_CHAR of table TEST NOT NULL because there are NULLs present - + Statement failed, SQLSTATE = 23000 validation error for column "TEST"."X_CHAR", value "Wisła " - + Statement failed, SQLSTATE = 23000 validation error for column "TEST"."X_CHAR", value "Wisła " - + Statement failed, SQLSTATE = 22006 unsuccessful metadata update -Cannot make field X_VCHR of table TEST NOT NULL because there are NULLs present - + Statement failed, SQLSTATE = 23000 validation error for column "TEST"."X_VCHR", value "Norrström" - + Statement failed, SQLSTATE = 23000 validation error for column "TEST"."X_VCHR", value "Norrström" - + Statement failed, SQLSTATE = 22006 unsuccessful metadata update -Cannot make field X_NUM of table TEST NOT NULL because there are NULLs present - + Statement failed, SQLSTATE = 23000 validation error for column "TEST"."X_NUM", value "-327.68" - + Statement failed, SQLSTATE = 23000 validation error for column "TEST"."X_NUM", value "-327.68" - + Statement failed, SQLSTATE = 22006 unsuccessful metadata update -Cannot make field X_DEC of table TEST NOT NULL because there are NULLs present - + Statement failed, SQLSTATE = 22003 arithmetic exception, numeric overflow, or string truncation -numeric value is out of range - + Statement failed, SQLSTATE = 22003 arithmetic exception, numeric overflow, or string truncation -numeric value is out of range - + Statement failed, SQLSTATE = 22006 unsuccessful metadata update -Cannot make field X_DP of table TEST NOT NULL because there are NULLs present - + Statement failed, SQLSTATE = 23000 validation error for column "TEST"."X_DP", value "0.0000000000000000" - + Statement failed, SQLSTATE = 23000 validation error for column "TEST"."X_DP", value "0.0000000000000000" - + Statement failed, SQLSTATE = 22006 unsuccessful metadata update -Cannot make field X_BIG of table TEST NOT NULL because there are NULLs present - + Statement failed, SQLSTATE = 23000 validation error for column "TEST"."X_BIG", value "9223372036854775807" - + Statement failed, SQLSTATE = 23000 validation error for column "TEST"."X_BIG", value "9223372036854775807" - + Statement failed, SQLSTATE = 22006 unsuccessful metadata update -Cannot make field X_NC of table TEST NOT NULL because there are NULLs present - + Statement failed, SQLSTATE = 23000 validation error for column "TEST"."X_NC", value "Y" - + Statement failed, SQLSTATE = 23000 validation error for column "TEST"."X_NC", value "Y" - + Statement failed, SQLSTATE = 22006 unsuccessful metadata update -Cannot make field X_BIN of table TEST NOT NULL because there are NULLs present - + Statement failed, SQLSTATE = 23000 validation error for column "TEST"."X_BIN", value "Ÿ" - + Statement failed, SQLSTATE = 23000 validation error for column "TEST"."X_BIN", value "Ÿ" - + Statement failed, SQLSTATE = 22006 unsuccessful metadata update -Cannot make field X_VB of table TEST NOT NULL because there are NULLs present - + Statement failed, SQLSTATE = 23000 validation error for column "TEST"."X_VB", value "Ÿ" - + Statement failed, SQLSTATE = 23000 validation error for column "TEST"."X_VB", value "Ÿ" - + Statement failed, SQLSTATE = 22006 unsuccessful metadata update -Cannot make field X_BOO of table TEST NOT NULL because there are NULLs present - + Statement failed, SQLSTATE = 23000 validation error for column "TEST"."X_BOO", value "FALSE" - + Statement failed, SQLSTATE = 23000 validation error for column "TEST"."X_BOO", value "FALSE" - + Statement failed, SQLSTATE = 22006 unsuccessful metadata update -Cannot make field X_DF16 of table TEST NOT NULL because there are NULLs present - + Statement failed, SQLSTATE = 23000 validation error for column "TEST"."X_DF16", value "-9.999999999999999E+384" - + Statement failed, SQLSTATE = 23000 validation error for column "TEST"."X_DF16", value "-9.999999999999999E+384" - + Statement failed, SQLSTATE = 22006 unsuccessful metadata update -Cannot make field X_DF34 of table TEST NOT NULL because there are NULLs present - + Statement failed, SQLSTATE = 42000 expression evaluation not supported -Argument for LOG10 must be positive - + Statement failed, SQLSTATE = 42000 expression evaluation not supported -Argument for LOG10 must be positive - + Statement failed, SQLSTATE = 22006 unsuccessful metadata update -Cannot make field X_I128 of table TEST NOT NULL because there are NULLs present - + Statement failed, SQLSTATE = 23000 validation error for column "TEST"."X_I128", value "170141183460469231731687303715884105727" @@ -779,7 +278,7 @@ expected_stderr_1 = """ """ -expected_stdout_1 = """ +expected_stdout = """ DM_NAME DOM22_01 DM_TYPE 7 DM_SUBTYPE 0 @@ -1117,9 +616,507 @@ expected_stdout_1 = """ 16 """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# test_script_1 +#--- +# +# +# import os +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# sql_init=''' +# set list on; +# set blob all; +# set names utf8; +# connect '%(dsn)s' user '%(user_name)s' password '%(user_password)s'; +# +# create collation nm_coll for utf8 from unicode case insensitive accent insensitive; +# commit; +# +# create domain dom20u as varchar(20) character set utf8 collate nm_coll; +# commit; +# +# +# create table rivers( +# id int +# ,river dom20u +# ); +# insert into rivers(id, river) values(1, 'Волга'); +# insert into rivers(id, river) values(2, 'Дніпро'); +# insert into rivers(id, river) values(3, 'Wisła'); +# insert into rivers(id, river) values(4, 'Dunărea'); +# insert into rivers(id, river) values(5, 'Rhône'); +# commit; +# +# create domain dom22_01 as smallint default 0 check (value >= 0 and value < 100); +# create domain dom22_02 as integer default 500 check (value >= 500); +# create domain dom22_03 as date default 'TODAY' check (value >= 'today'); +# +# -- CHECK-expression of this domain will be changed to +# -- "check (value in (select river from rivers))" - see below: +# create domain dom22_04 as char(20) default 'Wisła' check ( value in ('Волга','Дніпро','Wisła','Dunărea','Rhône') ); +# +# -- CHECK-expression of this domain will be changed to +# -- "check (value NOT in (select river from rivers))" - see below: +# create domain dom22_05 as varchar(25) default 'Norrström' check ( value NOT in ('Волга','Дніпро','Wisła','Dunărea','Rhône') ); +# +# create domain dom22_06 as numeric(2,2) default -327.68 check (value < 0); +# create domain dom22_07 as decimal(6,2) default -999.99 check (value < 0); +# +# -- exp(-745.1332192) is max. double precision value that will be NOT distinguish from zero: +# create domain dom22_08 as double precision default 0 check (value is null or value is not distinct from exp(-745.1332192)); +# +# ----------------------------- +# +# -- Additional datataypes (they either not present in original test or did appear since FB 3.x): +# create domain dom22_09 as bigint default 9223372036854775807 check (value > 0); +# create domain dom22_10 as nchar(1) default 'Y' check( value in ('Y', 'y') ); -- alias for ISO8859_1 +# create domain dom22_11 as binary(2) default 'Ÿ' check( value in ('Ÿ', 'ÿ') ); -- this datatype is alias for char(N) character set octets +# create domain dom22_12 as varbinary(2) default 'Ÿ' check( value in ('Ÿ', 'ÿ') ); +# create domain dom22_13 as boolean default false check ( value is not true ); +# +# create domain dom22_14 as decfloat(16) default -9.999999999999999E+384 check( log10(abs(value)) >= 384 ); +# create domain dom22_15 as decfloat default -9.999999999999999999999999999999999E6144 check( log10(abs(value)) >= 6144 ); +# create domain dom22_16 as int128 default 170141183460469231731687303715884105727 check( value in(-170141183460469231731687303715884105728, 170141183460469231731687303715884105727) ); +# +# create domain dom22_17 as time with time zone default '11:11:11.111 Indian/Cocos' check ( extract(hour from value) <=12 ); +# create domain dom22_18 as timestamp default '01.01.0001 00:00:01.001' check ( extract(minute from value) = 0 ); +# create domain dom22_19 as timestamp with time zone default '21.12.2013 11:11:11.111 Indian/Cocos' check ( extract(minute from value) <=30 ); +# +# create domain dom22_20 as blob default 'Ÿ' check( value in ('Ÿ', 'ÿ') ); +# create domain dom22_21 as blob sub_type text character set utf8 default 'Ätran' check (value is null or value NOT in (select river from rivers)) collate nm_coll; +# create domain dom22_22 as blob sub_type binary default 0x10 check (value > 0x01); +# +# create or alter view v_test as +# select +# ff.rdb$field_name as dm_name +# ,ff.rdb$field_type as dm_type +# ,ff.rdb$field_sub_type as dm_subtype +# ,ff.rdb$field_length as dm_flen +# ,ff.rdb$field_scale as dm_fscale +# ,ff.rdb$field_precision as dm_fprec +# ,ff.rdb$character_set_id as dm_fcset +# ,ff.rdb$collation_id as dm_fcoll +# ,ff.rdb$character_length dm_fchrlen +# ,ff.rdb$null_flag as dm_fnull +# ,ff.rdb$validation_source as dm_fvalid +# ,ff.rdb$default_source as dm_fdefault +# from rdb$fields ff +# where +# ff.rdb$system_flag is distinct from 1 +# and ff.rdb$field_name starting with upper( 'dom22_' ) +# order by dm_name +# ; +# commit; +# +# select * from v_test; +# -- ############################################################ +# +# set bail off; +# +# ------------------------------------------------ +# +# recreate table test(x_sml dom22_01); -- smallint default 0 check (value >= 0 and value < 100); +# +# insert into test default values returning x_sml; +# insert into test values(null); +# commit; +# +# alter domain dom22_01 set not null; -- must fail with SQLSTATE = 22006 / -Cannot make field ... NOT NULL because there are NULLs +# alter domain dom22_01 drop constraint add constraint check(value < 0); +# commit; +# +# insert into test default values returning x_sml; -- must fail with SQLSTATE = 23000 / validation error ... value "0" +# update test set x_sml = default where x_sml is null; -- must fail with SQLSTATE = 23000 / validation error ... value "0" +# commit; +# +# ------------------------------------------------ +# +# recreate table test(x_int dom22_02); -- integer default 500 check (value >= 500); +# +# insert into test default values returning x_int; +# insert into test values(null); +# commit; +# +# alter domain dom22_02 set not null; -- must fail with SQLSTATE = 22006 / -Cannot make field ... NOT NULL because there are NULLs +# alter domain dom22_02 drop constraint add constraint check(value < 0); +# commit; +# +# insert into test default values returning x_int; -- must fail with SQLSTATE = 23000 / validation error ... value "500" +# update test set x_int = default where x_int is null; -- must fail with SQLSTATE = 23000 / validation error ... value "500" +# commit; +# +# ------------------------------------------------ +# +# recreate table test(x_date dom22_03); -- date default 'TODAY' check (value >= 'today'); +# +# insert into test default values returning x_date; +# insert into test values(null); +# commit; +# +# alter domain dom22_03 set not null; -- must fail with SQLSTATE = 22006 / -Cannot make field ... NOT NULL because there are NULLs +# alter domain dom22_03 drop constraint add constraint check(value < 'today'); +# commit; +# +# insert into test default values returning x_date; -- must fail with SQLSTATE = 23000 / validation error ... value +# update test set x_date = default where x_date is null; -- must fail with SQLSTATE = 23000 / validation error ... value +# commit; +# +# ------------------------------------------------ +# +# recreate table test(x_char dom22_04); -- char(20) default 'Wisła' check (value in (select river from rivers)); +# +# insert into test default values returning x_char; +# insert into test values(null); +# commit; +# +# alter domain dom22_04 set not null; -- must fail with SQLSTATE = 22006 / -Cannot make field ... NOT NULL because there are NULLs +# alter domain dom22_04 drop constraint add constraint check(value NOT in (select river from rivers)); +# commit; +# +# insert into test default values returning x_char; -- must fail with SQLSTATE = 23000 / validation error ... value "Wisła " +# update test set x_char = default where x_char is null; -- must fail with SQLSTATE = 23000 / validation error ... value "Wisła " +# commit; +# +# ------------------------------------------------ +# +# recreate table test(x_vchr dom22_05); -- varchar(25) default 'Norrström' check (value NOT in (select river from rivers)); +# +# insert into test default values returning x_vchr; +# insert into test values(null); +# commit; +# +# alter domain dom22_05 set not null; -- must fail with SQLSTATE = 22006 / -Cannot make field ... NOT NULL because there are NULLs +# alter domain dom22_05 drop constraint add constraint check(value in (select river from rivers)); +# commit; +# +# insert into test default values returning x_vchr; -- must fail with SQLSTATE = 23000 / validation error ... value "Norrström" +# update test set x_vchr = default where x_vchr is null; -- must fail with SQLSTATE = 23000 / validation error ... value "Norrström" +# commit; +# +# ------------------------------------------------ +# +# recreate table test(x_num dom22_06); -- numeric(2,2) default -327.68 check (value < 0); +# +# insert into test default values returning x_num; +# insert into test values(null); +# commit; +# +# alter domain dom22_06 set not null; -- must fail with SQLSTATE = 22006 / -Cannot make field ... NOT NULL because there are NULLs +# alter domain dom22_06 drop constraint add constraint check(value >0); +# commit; +# +# insert into test default values returning x_num; -- must fail with SQLSTATE = 23000 / validation error ... value "-327.68" +# update test set x_num = default where x_num is null; -- must fail with SQLSTATE = 23000 / validation error ... value "-327.68" +# commit; +# +# ------------------------------------------------ +# +# recreate table test(x_dec dom22_07); -- decimal(6,2) default -999.99 check (value < 0); +# +# insert into test default values returning x_dec; +# insert into test values(null); +# commit; +# +# alter domain dom22_07 set not null; +# +# -- numeric(2,2) can hold values from the scope -327.68 to +327.68. +# -- cast of -999.99 to numeric(2,2) (being valid for decimal(2,2)) must fail with 'numeric value is out of range': +# alter domain dom22_07 drop constraint add constraint check(cast(value as numeric(2,2)) < 0 ); +# commit; +# +# insert into test default values returning x_dec; -- must fail with SQLSTATE = 23000 / numeric value is out of range +# update test set x_dec = default where x_dec is null returning x_dec; -- must fail with SQLSTATE = 23000 / numeric value is out of range +# commit; +# +# ------------------------------------------------ +# +# recreate table test(x_dp dom22_08); -- default 0 check (value is not distinct from exp(-745.1332192)) +# +# insert into test default values returning x_dp; +# insert into test values(null); +# commit; +# +# alter domain dom22_08 set not null; +# alter domain dom22_08 drop constraint add constraint check(value is not distinct from exp(-745.1332191) ); -- minimal DP value that can be distinguished from zero +# commit; +# +# insert into test default values returning x_dp; -- must fail with SQLSTATE = 23000 / validation error ... value "0.0000000000000000" +# update test set x_dp = default where x_dp is null returning x_dp; -- must fail with SQLSTATE = 23000 / validation error ... value "0.0000000000000000" +# commit; +# +# ------------------------------------------------ +# +# recreate table test(x_big dom22_09); -- default 9223372036854775807 check (value > 0); +# +# insert into test default values returning x_big; +# insert into test values(null); +# commit; +# +# alter domain dom22_09 set not null; +# alter domain dom22_09 drop constraint add constraint check(value between bin_shr(-9223372036854775808,63) and bin_shr(9223372036854775807,63)); -- -1...0 +# commit; +# +# insert into test default values returning x_big; -- must fail with SQLSTATE = 23000 / validation error ... value "9223372036854775807" +# update test set x_big = default where x_big is null returning x_big; -- must fail with SQLSTATE = 23000 / validation error ... value "9223372036854775807" +# commit; +# +# ------------------------------------------------ +# +# recreate table test(x_nc dom22_10); -- nchar(1) default 'Y' check( value in ('Y', 'y') ); +# insert into test default values returning x_nc; +# insert into test values(null); +# commit; +# +# alter domain dom22_10 set not null; +# alter domain dom22_10 drop constraint add constraint check( value similar to 'U'); +# commit; +# +# insert into test default values returning x_nc; +# update test set x_nc = default where x_nc is null returning x_nc; +# commit; +# +# ------------------------------------------------ +# +# recreate table test(x_bin dom22_11); -- binary(2) default 'Ÿ' check( value in ('Ÿ', 'ÿ') ) +# insert into test default values returning x_bin; +# insert into test values(null); +# commit; +# +# alter domain dom22_11 set not null; +# alter domain dom22_11 drop constraint add constraint check( value is not distinct from 'ł' or value is not distinct from 'ă' or value is not distinct from 'ô' ); +# commit; +# +# insert into test default values returning x_bin; -- must fail with SQLSTATE = 23000 / validation error ... value "Ÿ" +# update test set x_bin = default where x_bin is null returning x_bin; -- must fail with SQLSTATE = 23000 / validation error ... value "Ÿ" +# commit; +# +# ------------------------------------------------ +# +# recreate table test(x_vb dom22_12); -- varbinary(2) default 'Ÿ' check( value in ('Ÿ', 'ÿ') ) +# insert into test default values returning x_vb; +# insert into test values(null); +# commit; +# +# alter domain dom22_12 set not null; +# alter domain dom22_12 drop constraint add constraint check( value = any(select 'ł' from rdb$database union all select 'ă' from rdb$database) ); +# commit; +# +# insert into test default values returning x_vb; -- must fail with SQLSTATE = 23000 / validation error ... value "Ÿ" +# update test set x_vb = default where x_vb is null returning x_vb; -- must fail with SQLSTATE = 23000 / validation error ... value "Ÿ" +# commit; +# +# ------------------------------------------------ +# +# recreate table test(x_boo dom22_13); -- boolean default false check ( value is not true ); +# insert into test default values returning x_boo; +# insert into test values(null); +# commit; +# +# alter domain dom22_13 set not null; +# alter domain dom22_13 drop constraint add constraint check( value NOT in (false) ); +# commit; +# +# insert into test default values returning x_boo; +# update test set x_boo = default where x_boo is null returning x_boo; +# commit; +# +# ------------------------------------------------ +# +# recreate table test(x_df16 dom22_14); -- decfloat(16) default -9.999999999999999E+384 check( log10(abs(value)) >= 384 ); +# +# insert into test default values returning x_df16; +# insert into test values(null); +# commit; +# +# alter domain dom22_14 set not null; +# alter domain dom22_14 drop constraint add constraint check( value = 0 ); +# commit; +# +# insert into test default values returning x_df16; +# update test set x_df16 = default where x_df16 is null returning x_df16; +# commit; +# +# ------------------------------------------------ +# +# recreate table test(x_df34 dom22_15); -- default -9.999999999999999999999999999999999E6144 check( log10(abs(value)) >= 6144 ); +# +# insert into test default values returning x_df34; +# insert into test values(null); +# commit; +# +# alter domain dom22_15 set not null; +# alter domain dom22_15 set default 0; +# alter domain dom22_15 drop constraint add constraint check( log10(abs(value)) < 0 ); +# commit; +# +# insert into test default values returning x_df34; -- must fail with SQLSTATE = 42000 / -Argument for LOG10 must be positive +# update test set x_df34 = default where x_df34 is null returning x_df34; -- must fail with SQLSTATE = 42000 / -Argument for LOG10 must be positive +# commit; +# +# ------------------------------------------------ +# +# recreate table test(x_i128 dom22_16); -- int128 default 170141183460469231731687303715884105727 check( value in(-170141183460469231731687303715884105728, 170141183460469231731687303715884105727) ); +# +# insert into test default values returning x_i128; +# insert into test values(null); +# commit; +# +# alter domain dom22_16 set not null; +# alter domain dom22_16 drop constraint add constraint check(value between bin_shr(-170141183460469231731687303715884105727,127) and bin_shr(170141183460469231731687303715884105727,127)); -- -1...0 +# commit; +# +# insert into test default values returning x_i128; -- must fail with SQLSTATE = 23000 / validation error ... value "170141183460469231731687303715884105727" +# update test set x_i128 = default where x_i128 is null returning x_i128; -- must fail with SQLSTATE = 23000 / validation error ... value "170141183460469231731687303715884105727" +# commit; +# +# ------------------------------------------------ +# +# recreate table test(x_tmtz dom22_17); -- time with time zone default '11:11:11.111 Indian/Cocos' check ( extract(hour from value) <=12 ); +# +# insert into test default values returning x_tmtz; +# insert into test values(null); +# commit; +# +# alter domain dom22_17 set not null; +# alter domain dom22_17 drop constraint add constraint check( extract(minute from value) < 10 ); +# commit; +# +# insert into test default values returning x_tmtz; -- must fail with SQLSTATE = 23000 / validation error ... value "11:11:11.1110 Indian/Cocos" +# update test set x_tmtz = default where x_tmtz is null returning x_tmtz; -- must fail with SQLSTATE = 23000 / validation error ... value "11:11:11.1110 Indian/Cocos" +# commit; +# +# ------------------------------------------------ +# +# recreate table test(x_dts dom22_18); -- timestamp default '01.01.0001 00:00:01.001' check ( extract(minute from value) = 0 ); +# +# insert into test default values returning x_dts; +# insert into test values(null); +# commit; +# +# alter domain dom22_18 set not null; +# alter domain dom22_18 drop constraint add constraint check( extract(hour from value) > 7 ); +# commit; +# +# insert into test default values returning x_dts; -- must fail with SQLSTATE = 23000 / validation error ... value "01-JAN-0001 0:00:01.0010" +# update test set x_dts = default where x_dts is null returning x_dts; -- must fail with SQLSTATE = 23000 / validation error ... value "01-JAN-0001 0:00:01.0010" +# commit; +# +# ------------------------------------------------ +# +# recreate table test(x_tstz dom22_19); -- timestamp with time zone default '21.12.2013 11:11:11.111 Indian/Cocos' check ( extract(minute from value) <=30 ); +# insert into test default values returning x_tstz; +# insert into test values(null); +# commit; +# +# alter domain dom22_19 set not null; +# alter domain dom22_19 drop constraint add constraint check( value = '21.12.2013 11:11:11.111 Indian/Comoro' ); +# commit; +# +# insert into test default values returning x_tstz; -- must fail with SQLSTATE = 23000 / validation error ... value "01-JAN-0001 0:00:01.0010" +# update test set x_tstz = default where x_tstz is null returning x_tstz; -- must fail with SQLSTATE = 23000 / validation error ... value "01-JAN-0001 0:00:01.0010" +# commit; +# +# ------------------------------------------------ +# +# recreate table test(x_blob_20 dom22_20); -- default 'Ÿ' check( value in ('Ÿ', 'ÿ') ); +# +# insert into test default values returning x_blob_20; +# insert into test values(null); +# commit; +# +# alter domain dom22_20 set not null; +# alter domain dom22_20 drop constraint add constraint check( value in ('ă', 'ô') ); +# commit; +# +# insert into test default values returning x_blob_20; -- must fail with SQLSTATE = 22018 / conversion error ... value "BLOB" +# update test set x_blob_20 = default where x_blob_20 is null returning x_blob_20; -- must fail with SQLSTATE = 22018 / conversion error ... value "BLOB" +# commit; +# +# ------------------------------------------------ +# +# recreate table test(x_blob_21 dom22_21); -- blob sub_type text character set utf8 default 'Ätran' check (value is null or value NOT in (select river from rivers)) collate nm_coll; +# +# insert into test default values returning x_blob_21; +# insert into test values(null); +# commit; +# +# alter domain dom22_21 set not null; +# alter domain dom22_21 drop constraint add constraint check( value in (select river from rivers) ); +# commit; +# +# insert into test default values returning x_blob_21; -- must fail with SQLSTATE = 22018 / conversion error ... value "BLOB" +# update test set x_blob_21 = default where x_blob_21 is null returning x_blob_21; -- must fail with SQLSTATE = 22018 / conversion error ... value "BLOB" +# commit; +# +# ------------------------------------------------ +# +# recreate table test(x_blob_22 dom22_22); -- blob sub_type binary default 0x10 check (value > 0x01); +# +# insert into test default values returning x_blob_22; +# insert into test values(null); +# commit; +# +# alter domain dom22_22 set not null; +# alter domain dom22_22 drop constraint add constraint check( value < 0x01 ); +# commit; +# +# insert into test default values returning x_blob_22; -- must fail with SQLSTATE = 22018 / conversion error ... value "BLOB" +# update test set x_blob_22 = default where x_blob_22 is null returning x_blob_22; -- must fail with SQLSTATE = 22018 / conversion error ... value "BLOB" +# commit; +# ''' % dict(globals(), **locals()) +# +# f_run_sql=open( os.path.join(context['temp_directory'],'tmp_gtcs_domain_22.sql'), 'w') +# f_run_sql.write(sql_init) +# flush_and_close(f_run_sql) +# +# +# #f_run_log = open( os.path.join(context['temp_directory'],'tmp_gtcs_domain_22.log'), 'w') +# #f_run_err = open( os.path.join(context['temp_directory'],'tmp_gtcs_domain_22.err'), 'w') +# #subprocess.call( [context['isql_path'], dsn, '-q', '-i', f_run_sql.name, '-ch', 'utf8'], stdout = f_run_log, stderr = f_run_err ) +# #flush_and_close( f_run_log ) +# #flush_and_close( f_run_err ) +# +# runProgram( 'isql', ['-q', '-i', f_run_sql.name] ) +# +#--- diff --git a/tests/functional/gtcs/test_execute_statement_within_procedure.py b/tests/functional/gtcs/test_execute_statement_within_procedure.py index 239e6bde..726bd7ef 100644 --- a/tests/functional/gtcs/test_execute_statement_within_procedure.py +++ b/tests/functional/gtcs/test_execute_statement_within_procedure.py @@ -1,40 +1,29 @@ #coding:utf-8 -# -# id: functional.gtcs.execute_statement_within_procedure -# title: GTCS/tests/CF_ISQL_27. EXECUTE STATEMENT within a stored procedure could lead to a problems -# decription: -# ::: NB ::: -# ### Name of original test has no any relation with actual task of this test: ### -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_27.script -# -# Discuss in fb-devel (subj: "Vulcan, CF_ISQL_27.sql"): -# https://sourceforge.net/p/firebird/mailman/message/17631672/ -# -# Author said that example from this test did not return any error (and he expacted this) -# plus either did not return value into output parameter or even lead server to crash -# (when such SP was called twise). -# For current FB versions no error occurs and output value is issued w/o any problems. -# -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.execute-statement-within-procedure +FBTEST: functional.gtcs.execute_statement_within_procedure +TITLE: EXECUTE STATEMENT within a stored procedure could lead to a problems +DESCRIPTION: + ::: NB ::: + ### Name of original test has no any relation with actual task of this test: ### + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_27.script + + Discuss in fb-devel (subj: "Vulcan, CF_ISQL_27.sql"): + https://sourceforge.net/p/firebird/mailman/message/17631672/ + + Author said that example from this test did not return any error (and he expacted this) + plus either did not return value into output parameter or even lead server to crash + (when such SP was called twise). + For current FB versions no error occurs and output value is issued w/o any problems. +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ create generator gen_test1; set generator gen_test1 to 1111111; create generator gen_test2; @@ -47,9 +36,9 @@ test_script_1 = """ ) as begin - for + for select rdb$generator_name - from rdb$generators + from rdb$generators where coalesce (rdb$system_flag, 0) = 0 order by 1 into :gen_name do @@ -68,9 +57,9 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ MSG point-1 GEN_NAME GEN_TEST1 GEN_VALUE 1111111 @@ -88,9 +77,8 @@ expected_stdout_1 = """ GEN_VALUE 2222222 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_external_file_03_d.py b/tests/functional/gtcs/test_external_file_03_d.py index 6bca55b5..fb068b1e 100644 --- a/tests/functional/gtcs/test_external_file_03_d.py +++ b/tests/functional/gtcs/test_external_file_03_d.py @@ -1,43 +1,59 @@ #coding:utf-8 -# -# id: functional.gtcs.external_file_03_d -# title: GTCS/tests/external-file-03-d. Test for external table with field of SMALLINT datatype -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/EXT_REL_0_3_D.script -# Checked on: 4.0.0.2240; 3.0.7.33380 -# -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: gtcs.external-file-03 +FBTEST: functional.gtcs.external_file_03_d +TITLE: Test for external table with field of SMALLINT datatype +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/EXT_REL_0_3_D.script +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] +act = python_act('db', substitutions=[('[ \t]+', ' ')]) -init_script_1 = """""" +expected_stderr = """ + Statement failed, SQLSTATE = 22003 + arithmetic exception, numeric overflow, or string truncation + -numeric value is out of range -db_1 = db_factory(sql_dialect=3, init=init_script_1) + Statement failed, SQLSTATE = 22003 + arithmetic exception, numeric overflow, or string truncation + -numeric value is out of range +""" + +expected_stdout = """ + F01 -32768 + F01 -1 + F01 0 + F01 1 + F01 32767 + Records affected: 5 +""" + +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=3.0') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import sys # import subprocess # import time -# +# # tmp_file = os.path.join(context['temp_directory'],'tmp_ext_03_d.tmp') # if os.path.isfile( tmp_file): # os.remove( tmp_file ) -# +# # this_fdb = db_conn.database_name -# +# # sql_cmd=''' # connect 'localhost:%(this_fdb)s' user '%(user_name)s' password '%(user_password)s'; # create table ext_table external file '%(tmp_file)s' (f01 smallint); @@ -54,43 +70,16 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # set count on; # select * from ext_table order by f01; # ''' % dict(globals(), **locals()) -# +# # runProgram('isql', [ '-q' ], sql_cmd) -# +# # f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_ext_03_d.sql'), 'w') # f_sql_chk.write(sql_cmd) # f_sql_chk.close() -# +# # time.sleep(1) -# +# # os.remove(f_sql_chk.name) # os.remove( tmp_file ) -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stderr_1 = """ - Statement failed, SQLSTATE = 22003 - arithmetic exception, numeric overflow, or string truncation - -numeric value is out of range - - Statement failed, SQLSTATE = 22003 - arithmetic exception, numeric overflow, or string truncation - -numeric value is out of range -""" - -expected_stdout_1 = """ - F01 -32768 - F01 -1 - F01 0 - F01 1 - F01 32767 - Records affected: 5 -""" - -@pytest.mark.version('>=3.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/gtcs/test_external_file_04_d.py b/tests/functional/gtcs/test_external_file_04_d.py index 0748b242..d8a27d61 100644 --- a/tests/functional/gtcs/test_external_file_04_d.py +++ b/tests/functional/gtcs/test_external_file_04_d.py @@ -1,43 +1,59 @@ #coding:utf-8 -# -# id: functional.gtcs.external_file_04_d -# title: GTCS/tests/external-file-04-d. Test for external table with field of INTEGER datatype -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/EXT_REL_0_4_D.script -# Checked on: 4.0.0.2240; 3.0.7.33380 -# -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: gtcs.external-file-04-A +FBTEST: functional.gtcs.external_file_04_d +TITLE: est for external table with field of INTEGER datatype +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/EXT_REL_0_4_D.script +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] +act = python_act('db', substitutions=[('[ \t]+', ' ')]) -init_script_1 = """""" +expected_stderr = """ + Statement failed, SQLSTATE = 22003 + arithmetic exception, numeric overflow, or string truncation + -numeric value is out of range -db_1 = db_factory(sql_dialect=3, init=init_script_1) + Statement failed, SQLSTATE = 22003 + arithmetic exception, numeric overflow, or string truncation + -numeric value is out of range +""" + +expected_stdout = """ + F01 -2147483648 + F01 -1 + F01 0 + F01 1 + F01 2147483647 + Records affected: 5 +""" + +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=3.0') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import sys # import subprocess # import time -# +# # tmp_file = os.path.join(context['temp_directory'],'tmp_ext_04_d.tmp') # if os.path.isfile( tmp_file): # os.remove( tmp_file ) -# +# # this_fdb = db_conn.database_name -# +# # sql_cmd=''' # connect 'localhost:%(this_fdb)s' user '%(user_name)s' password '%(user_password)s'; # create table ext_table external file '%(tmp_file)s' (f01 int); @@ -54,43 +70,16 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # set count on; # select * from ext_table order by f01; # ''' % dict(globals(), **locals()) -# +# # runProgram('isql', [ '-q' ], sql_cmd) -# +# # f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_ext_04_d.sql'), 'w') # f_sql_chk.write(sql_cmd) # f_sql_chk.close() -# +# # time.sleep(1) -# +# # os.remove(f_sql_chk.name) # os.remove( tmp_file ) -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stderr_1 = """ - Statement failed, SQLSTATE = 22003 - arithmetic exception, numeric overflow, or string truncation - -numeric value is out of range - - Statement failed, SQLSTATE = 22003 - arithmetic exception, numeric overflow, or string truncation - -numeric value is out of range -""" - -expected_stdout_1 = """ - F01 -2147483648 - F01 -1 - F01 0 - F01 1 - F01 2147483647 - Records affected: 5 -""" - -@pytest.mark.version('>=3.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/gtcs/test_external_file_04_d_bigint.py b/tests/functional/gtcs/test_external_file_04_d_bigint.py index cd3a3d00..85c89dff 100644 --- a/tests/functional/gtcs/test_external_file_04_d_bigint.py +++ b/tests/functional/gtcs/test_external_file_04_d_bigint.py @@ -1,43 +1,49 @@ #coding:utf-8 -# -# id: functional.gtcs.external_file_04_d_bigint -# title: GTCS/tests/external-file-04-d-bigint. Test for external table with field of BIGINT datatype -# decription: -# There is no similar test in GTCS, but for INTEGER datatype see: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/EXT_REL_0_4_D.script -# Checked on: 4.0.0.2240; 3.0.7.33380 -# -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: gtcs.external-file-04-B +FBTEST: functional.gtcs.external_file_04_d_bigint +TITLE: Test for external table with field of BIGINT datatype +DESCRIPTION: + There is no similar test in GTCS, but for INTEGER datatype see: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/EXT_REL_0_4_D.script +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] +act = python_act('db', substitutions=[('[ \t]+', ' ')]) -init_script_1 = """""" +expected_stdout = """ + F01 -9223372036854775808 + F01 -1 + F01 0 + F01 1 + F01 9223372036854775807 + Records affected: 5 +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=3.0') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import sys # import subprocess # import time -# +# # tmp_file = os.path.join(context['temp_directory'],'tmp_ext_04_d_bigint.tmp') # if os.path.isfile( tmp_file): # os.remove( tmp_file ) -# +# # this_fdb = db_conn.database_name -# +# # sql_cmd=''' # connect 'localhost:%(this_fdb)s' user '%(user_name)s' password '%(user_password)s'; # create table ext_table external file '%(tmp_file)s' (f01 bigint); @@ -52,33 +58,16 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # set count on; # select * from ext_table order by f01; # ''' % dict(globals(), **locals()) -# +# # runProgram('isql', [ '-q' ], sql_cmd) -# +# # f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_ext_04_d_bigint.sql'), 'w') # f_sql_chk.write(sql_cmd) # f_sql_chk.close() -# +# # time.sleep(1) -# +# # os.remove(f_sql_chk.name) # os.remove( tmp_file ) -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - F01 -9223372036854775808 - F01 -1 - F01 0 - F01 1 - F01 9223372036854775807 - Records affected: 5 -""" - -@pytest.mark.version('>=3.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/gtcs/test_external_file_04_d_int128.py b/tests/functional/gtcs/test_external_file_04_d_int128.py index a2df5c38..c7a35bfe 100644 --- a/tests/functional/gtcs/test_external_file_04_d_int128.py +++ b/tests/functional/gtcs/test_external_file_04_d_int128.py @@ -1,43 +1,49 @@ #coding:utf-8 -# -# id: functional.gtcs.external_file_04_d_int128 -# title: GTCS/tests/external-file-04-d-int128. Test for external table with field of INT128 datatype -# decription: -# There is no similar test in GTCS, but for INTEGER datatype see: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/EXT_REL_0_4_D.script -# Checked on: 4.0.0.2240 -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: None + +""" +ID: gtcs.external-file-04-C +FBTEST: functional.gtcs.external_file_04_d_int128 +TITLE: Test for external table with field of INT128 datatype +DESCRIPTION: + There is no similar test in GTCS, but for INTEGER datatype see: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/EXT_REL_0_4_D.script +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] +act = python_act('db', substitutions=[('[ \t]+', ' ')]) -init_script_1 = """""" +expected_stdout = """ + F01 -170141183460469231731687303715884105728 + F01 -1 + F01 0 + F01 1 + F01 170141183460469231731687303715884105727 + Records affected: 5 +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=4.0') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import sys # import subprocess # import time -# +# # tmp_file = os.path.join(context['temp_directory'],'tmp_ext_04_d_int128.tmp') # if os.path.isfile( tmp_file): # os.remove( tmp_file ) -# +# # this_fdb = db_conn.database_name -# +# # sql_cmd=''' # connect 'localhost:%(this_fdb)s' user '%(user_name)s' password '%(user_password)s'; # create table ext_table external file '%(tmp_file)s' (f01 int128); @@ -52,33 +58,16 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # set count on; # select * from ext_table order by f01; # ''' % dict(globals(), **locals()) -# +# # runProgram('isql', [ '-q' ], sql_cmd) -# +# # f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_ext_04_d_int128.sql'), 'w') # f_sql_chk.write(sql_cmd) # f_sql_chk.close() -# +# # time.sleep(1) -# +# # os.remove(f_sql_chk.name) # os.remove( tmp_file ) -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - F01 -170141183460469231731687303715884105728 - F01 -1 - F01 0 - F01 1 - F01 170141183460469231731687303715884105727 - Records affected: 5 -""" - -@pytest.mark.version('>=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/gtcs/test_external_file_06_d.py b/tests/functional/gtcs/test_external_file_06_d.py index e89ef2b7..786df973 100644 --- a/tests/functional/gtcs/test_external_file_06_d.py +++ b/tests/functional/gtcs/test_external_file_06_d.py @@ -1,47 +1,51 @@ #coding:utf-8 -# -# id: functional.gtcs.external_file_06_d -# title: GTCS/tests/external-file-06-d. Test for external table with field of DOUBLE PRECISION datatype -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/EXT_REL_0_6_D.script -# Checked on: 4.0.0.2240; 3.0.7.33380 -# -# 03-mar-2021. -# Added substitution for zero value ('F02') as result of evaluating exp(-745.1332192): -# on Windows number of digits in decimal representation more than on Linux for 1. -# -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: gtcs.external-file-06 +FBTEST: functional.gtcs.external_file_06_d +TITLE: Test for external table with field of DOUBLE PRECISION datatype +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/EXT_REL_0_6_D.script +NOTES: +[03.03.2021] + Added substitution for zero value ('F02') as result of evaluating exp(-745.1332192): + on Windows number of digits in decimal representation more than on Linux for 1. +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' '), ('0.0000000000000000', '0.000000000000000')] +act = python_act('db', substitutions=[('[ \t]+', ' '), ('0.0000000000000000', '0.000000000000000')]) -init_script_1 = """""" +expected_stdout = """ + F01 4.940656458412465e-324 + F02 0.0000000000000000 + F03 1.797693134862273e+308 + Records affected: 1 +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=3.0') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import sys # import subprocess # import time -# +# # tmp_file = os.path.join(context['temp_directory'],'tmp_ext_06_d.tmp') # if os.path.isfile( tmp_file): # os.remove( tmp_file ) -# +# # this_fdb = db_conn.database_name -# +# # sql_cmd=''' # connect 'localhost:%(this_fdb)s' user '%(user_name)s' password '%(user_password)s'; # create domain dm_dp double precision; @@ -54,31 +58,16 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # set count on; # select * from ext_table; # ''' % dict(globals(), **locals()) -# +# # runProgram('isql', [ '-q' ], sql_cmd) -# +# # f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_ext_06_d.sql'), 'w') # f_sql_chk.write(sql_cmd) # f_sql_chk.close() -# +# # time.sleep(1) -# +# # os.remove(f_sql_chk.name) # os.remove( tmp_file ) -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - F01 4.940656458412465e-324 - F02 0.0000000000000000 - F03 1.797693134862273e+308 - Records affected: 1 -""" - -@pytest.mark.version('>=3.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/gtcs/test_external_file_09_d.py b/tests/functional/gtcs/test_external_file_09_d.py index 8ef09a33..c2af0af7 100644 --- a/tests/functional/gtcs/test_external_file_09_d.py +++ b/tests/functional/gtcs/test_external_file_09_d.py @@ -1,43 +1,56 @@ #coding:utf-8 -# -# id: functional.gtcs.external_file_09_d -# title: GTCS/tests/external-file-09-d. Test for external table with field of DATE datatype -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/EXT_REL_0_9_D.script -# Checked on: 4.0.0.2240 SS: 2.473s; 3.0.7.33380 SS: 1.924s. -# -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: gtcs.external-file-09 +FBTEST: functional.gtcs.external_file_09_d +TITLE: Test for external table with field of DATE datatype +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/EXT_REL_0_9_D.script +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] +act = python_act('db', substitutions=[('[ \t]+', ' ')]) -init_script_1 = """""" +expected_stderr = """ + Statement failed, SQLSTATE = 22018 + conversion error from string "29-feb-9999" +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +expected_stdout = """ + F01 1994-06-28 + F01 2004-02-29 + F01 2001-09-01 + F01 0001-01-01 + F01 9999-12-31 + Records affected: 5 + + THIS_DAY_COUNT 2 +""" + +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=3.0') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import sys # import subprocess # import time -# +# # tmp_file = os.path.join(context['temp_directory'],'tmp_ext_09_d.tmp') # if os.path.isfile( tmp_file): # os.remove( tmp_file ) -# +# # this_fdb = db_conn.database_name -# +# # sql_cmd=''' # connect 'localhost:%(this_fdb)s' user '%(user_name)s' password '%(user_password)s'; # create table ext_table external file '%(tmp_file)s' (f01 date); @@ -59,40 +72,16 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # select count(*) as this_day_count from ext_table where f01=current_date; # commit; # ''' % dict(globals(), **locals()) -# +# # runProgram('isql', [ '-q' ], sql_cmd) -# +# # f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_ext_09_d.sql'), 'w') # f_sql_chk.write(sql_cmd) # f_sql_chk.close() -# +# # time.sleep(1) -# +# # os.remove(f_sql_chk.name) # os.remove( tmp_file ) -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stderr_1 = """ - Statement failed, SQLSTATE = 22018 - conversion error from string "29-feb-9999" -""" - -expected_stdout_1 = """ - F01 1994-06-28 - F01 2004-02-29 - F01 2001-09-01 - F01 0001-01-01 - F01 9999-12-31 - Records affected: 5 - - THIS_DAY_COUNT 2 -""" - -@pytest.mark.version('>=3.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/gtcs/test_gtcs_proc_cast_isql.py b/tests/functional/gtcs/test_gtcs_proc_cast_isql.py index b130b3c9..173d1212 100644 --- a/tests/functional/gtcs/test_gtcs_proc_cast_isql.py +++ b/tests/functional/gtcs/test_gtcs_proc_cast_isql.py @@ -1,64 +1,322 @@ #coding:utf-8 -# -# id: functional.gtcs.gtcs_proc_cast_isql -# title: GTCS/tests/PROC_CAST1_ISQL.script ... PROC_CAST10_ISQL.script -# decription: -# Original tests see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_CAST1_ISQL.script -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_CAST2_ISQL.script -# ... -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_CAST10_ISQL.script -# -# Checked on WI-V3.0.6.33283; WI-T4.0.0.1881. -# -# tracker_id: -# min_versions: ['3.0.6'] -# versions: 3.0.6 -# qmid: None + +""" +ID: gtcs.proc_cast_isql +FBTEST: functional.gtcs.gtcs_proc_cast_isql +TITLE: PROC_CAST1_ISQL.script ... PROC_CAST10_ISQL.script +DESCRIPTION: + Original tests see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_CAST1_ISQL.script + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_CAST2_ISQL.script + ... + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_CAST10_ISQL.script +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 3.0.6 -# resources: None +db = db_factory() -substitutions_1 = [('BLOB_ID.*', ''), ('[ \t]+', ' ')] +act = python_act('db', substitutions=[('BLOB_ID.*', ''), ('[ \t]+', ' ')]) -init_script_1 = """""" +expected_stdout = """ + bigint_bigint 80 + BLOB_ID 0:1 + 80.4450 + bigint_char(10) 80.4450 + bigint_date 2003-04-22 + bigint_decimal( 4,2) 0.04 + bigint_decimal( 4,2) 0.05 + bigint_decimal(10,4) 80.4450 + bigint_double precision 80.44499999999999 + bigint_float 80.445 + bigint_nchar(10) 80.4450 + bigint_numeric( 4,2) 0.04 + bigint_numeric( 4,2) 0.05 + bigint_numeric(10,4) 80.4450 + bigint_smallint 80 + bigint_time 01:02:03.0000 + bigint_timestamp 2003-04-22 11:35:39.0000 + bigint_varchar(10) 80.4450 + blob_bigint 9223372036854775807 + blob_boolean + blob_char(30) 81985529216487135 + blob_date 2004-02-29 + blob_decimal(5,2) 80.45 + blob_double precision 80.44499999999999 + blob_float 80.445 + blob_int -2147483648 + blob_nchar(30) 81985529216487135 + blob_numeric(5,2) 80.45 + blob_smallint 32767 + blob_time 01:02:03.4560 + blob_varchar(30) 81985529216487135 + char(30)_bigint 9223372036854775807 + BLOB_ID 0:1 + 81985529216487135 + char(30)_boolean + char(30)_date 2004-02-29 + char(30)_decimal(5,2) 80.45 + char(30)_double precision 80.44499999999999 + char(30)_float 80.445 + char(30)_int -2147483648 + char(30)_nchar(30) 81985529216487135 + char(30)_numeric(5,2) 80.45 + char(30)_smallint 32767 + char(30)_time 01:02:03.4560 + char(30)_varchar(30) 81985529216487135 + date_bigint 147558 + BLOB_ID 0:1 + 2004-02-29 + date_char(10) 2004-02-29 + date_decimal(4,2) 2.00 + date_double precision 2.000000000000000 + date_float 2 + date_int 147558 + date_nchar(10) 2004-02-29 + date_numeric(4,2) 2.00 + date_smallint 1461 + date_time 01:02:05.0000 + date_timestamp 2003-02-03 01:02:03.0000 + date_varchar(10) 2004-02-29 + decimal(4,2)_bigint 80 + BLOB_ID 0:1 + 0.05 + BLOB_ID 0:3 + 0.06 + BLOB_ID 0:5 + 0.08 + decimal(4,2)_char(10) 0.05 + decimal(4,2)_char(10) 0.06 + decimal(4,2)_char(10) 0.08 + decimal(4,2)_date 2003-04-22 + decimal(4,2)_decimal(4,2) 0.05 + decimal(4,2)_decimal(4,2) 0.06 + decimal(4,2)_decimal(4,2) 0.08 + decimal(4,2)_double precision 80.45000000000000 + decimal(4,2)_double precision 0.05000000000000000 + decimal(4,2)_double precision 0.06000000000000000 + decimal(4,2)_double precision 0.08000000000000000 + decimal(4,2)_float 80.449997 + decimal(4,2)_float 0.050000001 + decimal(4,2)_float 0.059999999 + decimal(4,2)_float 0.079999998 + decimal(4,2)_int 80 + decimal(4,2)_nchar(10) 0.05 + decimal(4,2)_nchar(10) 0.06 + decimal(4,2)_nchar(10) 0.08 + decimal(4,2)_numeric(4,2) 0.05 + decimal(4,2)_numeric(4,2) 0.06 + decimal(4,2)_numeric(4,2) 0.08 + decimal(4,2)_smallint 80 + decimal(4,2)_time 01:03:23.4500 + decimal(4,2)_timestamp 2003-04-22 11:50:03.0000 + decimal(4,2)_varchar(10) 0.05 + decimal(4,2)_varchar(10) 0.06 + decimal(4,2)_varchar(10) 0.08 + double precision_bigint 80 + BLOB_ID 0:1 + 80.44499999999999 + double precision_char(10) 80.445000 + double precision_date 2003-04-22 + ouble precision_decimal(10,4) 80.4450 + double precision_decimal(4,2) 0.05 + double precision_decimal(4,2) 0.06 + double precision_decimal(4,2) 0.08 + double precision_float 80.445 + double precision_int 80 + double precision_nchar(10) 80.445000 + ouble precision_numeric(10,4) 80.4450 + double precision_numeric(4,2) 0.05 + double precision_numeric(4,2) 0.06 + double precision_numeric(4,2) 0.08 + double precision_smallint 80 + double precision_time 01:03:23.4450 + double precision_timestamp 2003-04-22 11:42:51.0000 + double precision_varchar(10) 80.445000 + float_bigint 80 + BLOB_ID 0:1 + 80.445000 + float_char(10) 80.445000 + float_date 2003-04-22 + float_decimal(10,4) 80.4450 + float_decimal(4,2) 0.05 + float_double precision 80.44499969482422 + float_int 80 + float_nchar(10) 80.445000 + float_numeric( 4,2) 0.05 + float_numeric(10,4) 80.4450 + float_smallint 80 + float_time 01:03:23.4450 + float_timestamp 2003-04-22 11:42:50.9736 + float_varchar(10) 80.445000 + int_bigint 80 + BLOB_ID 0:1 + 80.4450 + int_char(10) 80.4450 + int_date 2003-04-22 + int_decimal( 4,2) 0.04 + int_decimal( 4,2) 0.05 + int_decimal(10,4) 80.4450 + int_double precision 80.44499999999999 + int_float 80.445 + int_nchar(10) 80.4450 + int_numeric( 4,2) 0.04 + int_numeric( 4,2) 0.05 + int_numeric(10,4) 80.4450 + int_smallint 80 + int_time 01:02:03.0000 + int_timestamp 2003-04-22 11:35:39.0000 + int_varchar(10) 80.4450 + nchar(30)_bigint 9223372036854775807 + BLOB_ID 0:1 + 81985529216487135 + nchar(30)_boolean + nchar(30)_char(30) 81985529216487135 + nchar(30)_date 2004-02-29 + nchar(30)_decimal(5,2) 80.45 + nchar(30)_double precision 80.44499999999999 + nchar(30)_float 80.445 + nchar(30)_int -2147483648 + nchar(30)_numeric(5,2) 80.45 + nchar(30)_smallint 32767 + nchar(30)_time 01:02:03.4560 + nchar(30)_varchar(30) 81985529216487135 + numeric(4,2)_bigint 80 + BLOB_ID 0:1 + 0.05 + BLOB_ID 0:3 + 0.06 + BLOB_ID 0:5 + 0.08 + numeric(4,2)_char(10) 0.05 + numeric(4,2)_char(10) 0.06 + numeric(4,2)_char(10) 0.08 + numeric(4,2)_date 2003-04-22 + numeric(4,2)_decimal(4,2) 0.05 + numeric(4,2)_decimal(4,2) 0.06 + numeric(4,2)_decimal(4,2) 0.08 + numeric(4,2)_double precision 80.45000000000000 + numeric(4,2)_double precision 0.05000000000000000 + numeric(4,2)_double precision 0.06000000000000000 + numeric(4,2)_double precision 0.08000000000000000 + numeric(4,2)_float 80.449997 + numeric(4,2)_float 0.050000001 + numeric(4,2)_float 0.059999999 + numeric(4,2)_float 0.079999998 + numeric(4,2)_int 80 + numeric(4,2)_nchar(10) 0.05 + numeric(4,2)_nchar(10) 0.06 + numeric(4,2)_nchar(10) 0.08 + numeric(4,2)_numeric(4,2) 0.05 + numeric(4,2)_numeric(4,2) 0.06 + numeric(4,2)_numeric(4,2) 0.08 + numeric(4,2)_smallint 80 + numeric(4,2)_time 01:03:23.4500 + numeric(4,2)_timestamp 2003-04-22 11:50:03.0000 + numeric(4,2)_varchar(10) 0.05 + numeric(4,2)_varchar(10) 0.06 + numeric(4,2)_varchar(10) 0.08 + smallint_bigint 10922 + BLOB_ID 0:1 + 80.4450 + smallint_char(10) 80.4450 + smallint_date 2003-11-19 + smallint_decimal( 4,2) 80.45 + smallint_decimal(10,4) 80.4450 + smallint_double precision 80.44499999999999 + smallint_float 80.445 + smallint_int -10922 + smallint_int 10922 + smallint_nchar(10) 80.4450 + smallint_numeric( 4,2) 80.45 + smallint_numeric(10,4) 80.4450 + smallint_time 01:06:55.0000 + smallint_timestamp 2003-11-21 01:02:03.0000 + smallint_varchar(10) 80.4450 + time_bigint 82677 + BLOB_ID 0:1 + 01:02:03.0000 + time_char(13) 01:02:03.0000 + time_date 2003-02-01 + time_decimal(10,2) 82676.67 + time_double precision 82676.66600000000 + time_float 82676.664 + time_int 82677 + time_nchar(13) 01:02:03.0000 + time_numeric(10,2) 82676.67 + time_smallint 3661 + time_timestamp 2003-02-01 01:02:03.0000 + time_varchar(13) 01:02:03.0000 + timestamp_bigint 1 + BLOB_ID 0:1 + 2004-02-29 01:02:03.4560 + timestamp_char(30) 2004-02-29 01:02:03.4560 + timestamp_date 2004-02-29 + timestamp_decimal(10,2) 0.58 + timestamp_double precision 0.5755401160000000 + timestamp_float 0.57554013 + timestamp_int 1 + timestamp_nchar(30) 2004-02-29 01:02:03.4560 + timestamp_numeric(10,2) 0.58 + timestamp_smallint 0 + timestamp_time 01:02:03.0000 + timestamp_varchar(30) 2004-02-29 01:02:03.4560 + varchar(30)_bigint -268435456 + varchar(30)_bigint 4026531840 + varchar(30)_bigint 9223372036854775807 + varchar(30)_bigint -1 + BLOB_ID 0:1 + 81985529216487135 + varchar(30)_boolean + varchar(30)_char(30) 81985529216487135 + varchar(30)_date 2004-02-29 + varchar(30)_decimal(5,2) 80.45 + varchar(30)_double precision 80.44499999999999 + varchar(30)_float 80.445 + varchar(30)_int -2147483648 + varchar(30)_nchar(30) 81985529216487135 + varchar(30)_numeric(5,2) 80.45 + varchar(30)_smallint 32767 + varchar(30)_time 01:02:03.4560 +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=3.0.6') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import sys # import time # import subprocess # from fdb import services -# +# # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password -# +# # db_conn.close() -# +# # #-------------------------------------------- -# +# # def flush_and_close( file_handle ): # # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and +# # If you're starting with a Python file object f, +# # first do f.flush(), and # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # global os -# +# # file_handle.flush() # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # # otherwise: "OSError: [Errno 9] Bad file descriptor"! # os.fsync(file_handle.fileno()) # file_handle.close() -# +# # #-------------------------------------------- -# +# # def cleanup( f_names_list ): # global os # for f in f_names_list: @@ -69,27 +327,27 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # else: # print('Unrecognized type of element:', f, ' - can not be treated as file.') # del_name = None -# +# # if del_name and os.path.isfile( del_name ): # os.remove( del_name ) -# +# # #-------------------------------------------- -# +# # sql_gen_ddl = os.path.join(context['files_location'],'gtcs-cast-gen-ddl.sql') -# +# # f_init_run=open( os.path.join(context['temp_directory'],'tmp_gtcs_cast_ddl.sql'), 'w', buffering = 0) # f_init_err=open( os.path.join(context['temp_directory'],'tmp_gtcs_cast_ddl.err'), 'w', buffering = 0) # subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_gen_ddl], stdout=f_init_run, stderr=f_init_err ) # flush_and_close( f_init_run ) # flush_and_close( f_init_err ) -# -# +# +# # f_cast_log=open( os.path.join(context['temp_directory'],'tmp_gtcs_cast_run.log'), 'w', buffering = 0) # f_cast_err=open( os.path.join(context['temp_directory'],'tmp_gtcs_cast_run.err'), 'w', buffering = 0) # subprocess.call( [context['isql_path'], dsn, '-q', '-i', f_init_run.name], stdout=f_cast_log, stderr=f_cast_err ) # flush_and_close( f_cast_log ) # flush_and_close( f_cast_err ) -# +# # # CHECKS: # ######### # for g in (f_init_err, f_cast_err): @@ -97,286 +355,16 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # for line in f: # if line.split(): # print('UNEXPECTED OUTPUT in ' + os.path.split(g.name)[-1] + ': ' + line ) -# +# # with open(f_cast_log.name, 'r') as f: # for line in f: # if line.split(): # print( line.strip() ) -# +# # # CLEANUP: # ########## -# # do NOT remove this pause otherwise some of logs will not be enable for deletion and test will finish with +# # do NOT remove this pause otherwise some of logs will not be enable for deletion and test will finish with # # Exception raised while executing Python test script. exception: WindowsError: 32 # time.sleep(1) # cleanup( ( f_init_run, f_init_err, f_cast_log, f_cast_err ) ) #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - bigint_bigint 80 - BLOB_ID 0:1 - 80.4450 - bigint_char(10) 80.4450 - bigint_date 2003-04-22 - bigint_decimal( 4,2) 0.04 - bigint_decimal( 4,2) 0.05 - bigint_decimal(10,4) 80.4450 - bigint_double precision 80.44499999999999 - bigint_float 80.445 - bigint_nchar(10) 80.4450 - bigint_numeric( 4,2) 0.04 - bigint_numeric( 4,2) 0.05 - bigint_numeric(10,4) 80.4450 - bigint_smallint 80 - bigint_time 01:02:03.0000 - bigint_timestamp 2003-04-22 11:35:39.0000 - bigint_varchar(10) 80.4450 - blob_bigint 9223372036854775807 - blob_boolean - blob_char(30) 81985529216487135 - blob_date 2004-02-29 - blob_decimal(5,2) 80.45 - blob_double precision 80.44499999999999 - blob_float 80.445 - blob_int -2147483648 - blob_nchar(30) 81985529216487135 - blob_numeric(5,2) 80.45 - blob_smallint 32767 - blob_time 01:02:03.4560 - blob_varchar(30) 81985529216487135 - char(30)_bigint 9223372036854775807 - BLOB_ID 0:1 - 81985529216487135 - char(30)_boolean - char(30)_date 2004-02-29 - char(30)_decimal(5,2) 80.45 - char(30)_double precision 80.44499999999999 - char(30)_float 80.445 - char(30)_int -2147483648 - char(30)_nchar(30) 81985529216487135 - char(30)_numeric(5,2) 80.45 - char(30)_smallint 32767 - char(30)_time 01:02:03.4560 - char(30)_varchar(30) 81985529216487135 - date_bigint 147558 - BLOB_ID 0:1 - 2004-02-29 - date_char(10) 2004-02-29 - date_decimal(4,2) 2.00 - date_double precision 2.000000000000000 - date_float 2 - date_int 147558 - date_nchar(10) 2004-02-29 - date_numeric(4,2) 2.00 - date_smallint 1461 - date_time 01:02:05.0000 - date_timestamp 2003-02-03 01:02:03.0000 - date_varchar(10) 2004-02-29 - decimal(4,2)_bigint 80 - BLOB_ID 0:1 - 0.05 - BLOB_ID 0:3 - 0.06 - BLOB_ID 0:5 - 0.08 - decimal(4,2)_char(10) 0.05 - decimal(4,2)_char(10) 0.06 - decimal(4,2)_char(10) 0.08 - decimal(4,2)_date 2003-04-22 - decimal(4,2)_decimal(4,2) 0.05 - decimal(4,2)_decimal(4,2) 0.06 - decimal(4,2)_decimal(4,2) 0.08 - decimal(4,2)_double precision 80.45000000000000 - decimal(4,2)_double precision 0.05000000000000000 - decimal(4,2)_double precision 0.06000000000000000 - decimal(4,2)_double precision 0.08000000000000000 - decimal(4,2)_float 80.449997 - decimal(4,2)_float 0.050000001 - decimal(4,2)_float 0.059999999 - decimal(4,2)_float 0.079999998 - decimal(4,2)_int 80 - decimal(4,2)_nchar(10) 0.05 - decimal(4,2)_nchar(10) 0.06 - decimal(4,2)_nchar(10) 0.08 - decimal(4,2)_numeric(4,2) 0.05 - decimal(4,2)_numeric(4,2) 0.06 - decimal(4,2)_numeric(4,2) 0.08 - decimal(4,2)_smallint 80 - decimal(4,2)_time 01:03:23.4500 - decimal(4,2)_timestamp 2003-04-22 11:50:03.0000 - decimal(4,2)_varchar(10) 0.05 - decimal(4,2)_varchar(10) 0.06 - decimal(4,2)_varchar(10) 0.08 - double precision_bigint 80 - BLOB_ID 0:1 - 80.44499999999999 - double precision_char(10) 80.445000 - double precision_date 2003-04-22 - ouble precision_decimal(10,4) 80.4450 - double precision_decimal(4,2) 0.05 - double precision_decimal(4,2) 0.06 - double precision_decimal(4,2) 0.08 - double precision_float 80.445 - double precision_int 80 - double precision_nchar(10) 80.445000 - ouble precision_numeric(10,4) 80.4450 - double precision_numeric(4,2) 0.05 - double precision_numeric(4,2) 0.06 - double precision_numeric(4,2) 0.08 - double precision_smallint 80 - double precision_time 01:03:23.4450 - double precision_timestamp 2003-04-22 11:42:51.0000 - double precision_varchar(10) 80.445000 - float_bigint 80 - BLOB_ID 0:1 - 80.445000 - float_char(10) 80.445000 - float_date 2003-04-22 - float_decimal(10,4) 80.4450 - float_decimal(4,2) 0.05 - float_double precision 80.44499969482422 - float_int 80 - float_nchar(10) 80.445000 - float_numeric( 4,2) 0.05 - float_numeric(10,4) 80.4450 - float_smallint 80 - float_time 01:03:23.4450 - float_timestamp 2003-04-22 11:42:50.9736 - float_varchar(10) 80.445000 - int_bigint 80 - BLOB_ID 0:1 - 80.4450 - int_char(10) 80.4450 - int_date 2003-04-22 - int_decimal( 4,2) 0.04 - int_decimal( 4,2) 0.05 - int_decimal(10,4) 80.4450 - int_double precision 80.44499999999999 - int_float 80.445 - int_nchar(10) 80.4450 - int_numeric( 4,2) 0.04 - int_numeric( 4,2) 0.05 - int_numeric(10,4) 80.4450 - int_smallint 80 - int_time 01:02:03.0000 - int_timestamp 2003-04-22 11:35:39.0000 - int_varchar(10) 80.4450 - nchar(30)_bigint 9223372036854775807 - BLOB_ID 0:1 - 81985529216487135 - nchar(30)_boolean - nchar(30)_char(30) 81985529216487135 - nchar(30)_date 2004-02-29 - nchar(30)_decimal(5,2) 80.45 - nchar(30)_double precision 80.44499999999999 - nchar(30)_float 80.445 - nchar(30)_int -2147483648 - nchar(30)_numeric(5,2) 80.45 - nchar(30)_smallint 32767 - nchar(30)_time 01:02:03.4560 - nchar(30)_varchar(30) 81985529216487135 - numeric(4,2)_bigint 80 - BLOB_ID 0:1 - 0.05 - BLOB_ID 0:3 - 0.06 - BLOB_ID 0:5 - 0.08 - numeric(4,2)_char(10) 0.05 - numeric(4,2)_char(10) 0.06 - numeric(4,2)_char(10) 0.08 - numeric(4,2)_date 2003-04-22 - numeric(4,2)_decimal(4,2) 0.05 - numeric(4,2)_decimal(4,2) 0.06 - numeric(4,2)_decimal(4,2) 0.08 - numeric(4,2)_double precision 80.45000000000000 - numeric(4,2)_double precision 0.05000000000000000 - numeric(4,2)_double precision 0.06000000000000000 - numeric(4,2)_double precision 0.08000000000000000 - numeric(4,2)_float 80.449997 - numeric(4,2)_float 0.050000001 - numeric(4,2)_float 0.059999999 - numeric(4,2)_float 0.079999998 - numeric(4,2)_int 80 - numeric(4,2)_nchar(10) 0.05 - numeric(4,2)_nchar(10) 0.06 - numeric(4,2)_nchar(10) 0.08 - numeric(4,2)_numeric(4,2) 0.05 - numeric(4,2)_numeric(4,2) 0.06 - numeric(4,2)_numeric(4,2) 0.08 - numeric(4,2)_smallint 80 - numeric(4,2)_time 01:03:23.4500 - numeric(4,2)_timestamp 2003-04-22 11:50:03.0000 - numeric(4,2)_varchar(10) 0.05 - numeric(4,2)_varchar(10) 0.06 - numeric(4,2)_varchar(10) 0.08 - smallint_bigint 10922 - BLOB_ID 0:1 - 80.4450 - smallint_char(10) 80.4450 - smallint_date 2003-11-19 - smallint_decimal( 4,2) 80.45 - smallint_decimal(10,4) 80.4450 - smallint_double precision 80.44499999999999 - smallint_float 80.445 - smallint_int -10922 - smallint_int 10922 - smallint_nchar(10) 80.4450 - smallint_numeric( 4,2) 80.45 - smallint_numeric(10,4) 80.4450 - smallint_time 01:06:55.0000 - smallint_timestamp 2003-11-21 01:02:03.0000 - smallint_varchar(10) 80.4450 - time_bigint 82677 - BLOB_ID 0:1 - 01:02:03.0000 - time_char(13) 01:02:03.0000 - time_date 2003-02-01 - time_decimal(10,2) 82676.67 - time_double precision 82676.66600000000 - time_float 82676.664 - time_int 82677 - time_nchar(13) 01:02:03.0000 - time_numeric(10,2) 82676.67 - time_smallint 3661 - time_timestamp 2003-02-01 01:02:03.0000 - time_varchar(13) 01:02:03.0000 - timestamp_bigint 1 - BLOB_ID 0:1 - 2004-02-29 01:02:03.4560 - timestamp_char(30) 2004-02-29 01:02:03.4560 - timestamp_date 2004-02-29 - timestamp_decimal(10,2) 0.58 - timestamp_double precision 0.5755401160000000 - timestamp_float 0.57554013 - timestamp_int 1 - timestamp_nchar(30) 2004-02-29 01:02:03.4560 - timestamp_numeric(10,2) 0.58 - timestamp_smallint 0 - timestamp_time 01:02:03.0000 - timestamp_varchar(30) 2004-02-29 01:02:03.4560 - varchar(30)_bigint -268435456 - varchar(30)_bigint 4026531840 - varchar(30)_bigint 9223372036854775807 - varchar(30)_bigint -1 - BLOB_ID 0:1 - 81985529216487135 - varchar(30)_boolean - varchar(30)_char(30) 81985529216487135 - varchar(30)_date 2004-02-29 - varchar(30)_decimal(5,2) 80.45 - varchar(30)_double precision 80.44499999999999 - varchar(30)_float 80.445 - varchar(30)_int -2147483648 - varchar(30)_nchar(30) 81985529216487135 - varchar(30)_numeric(5,2) 80.45 - varchar(30)_smallint 32767 - varchar(30)_time 01:02:03.4560 -""" - -@pytest.mark.version('>=3.0.6') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/gtcs/test_gtcs_proc_isql_01.py b/tests/functional/gtcs/test_gtcs_proc_isql_01.py index 83fc0c68..df1cc37a 100644 --- a/tests/functional/gtcs/test_gtcs_proc_isql_01.py +++ b/tests/functional/gtcs/test_gtcs_proc_isql_01.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.gtcs_proc_isql_01 -# title: gtcs-proc-isql-01 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_01.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.proc-isql-01 +TITLE: gtcs-proc-isql-01 +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_01.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +FBTEST: functional.gtcs.gtcs_proc_isql_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(from_backup='gtcs_sp1.fbk') -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='gtcs_sp1.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set term ^; @@ -54,9 +44,9 @@ test_script_1 = """ select 'point-7' msg, p.a from proc1 p where p.a = (select avg(x.a) from proc1 x); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ A 400 @@ -89,9 +79,8 @@ expected_stdout_1 = """ Records affected: 1 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_gtcs_proc_isql_02.py b/tests/functional/gtcs/test_gtcs_proc_isql_02.py index d3d4c63e..4624fb41 100644 --- a/tests/functional/gtcs/test_gtcs_proc_isql_02.py +++ b/tests/functional/gtcs/test_gtcs_proc_isql_02.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.gtcs_proc_isql_02 -# title: gtcs-proc-isql-02 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_02.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.proc-isql-02 +TITLE: gtcs-proc-isql-02 +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_02.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +FBTEST: functional.gtcs.gtcs_proc_isql_02 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(from_backup='gtcs_sp1.fbk') -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='gtcs_sp1.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set term ^; create procedure proc2 returns (a varchar(5), b varchar(5),c integer) as @@ -53,9 +43,9 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ A B C S4 P5 400 @@ -88,9 +78,8 @@ expected_stdout_1 = """ Records affected: 1 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_gtcs_proc_isql_03.py b/tests/functional/gtcs/test_gtcs_proc_isql_03.py index 7b0c1798..0a318ad5 100644 --- a/tests/functional/gtcs/test_gtcs_proc_isql_03.py +++ b/tests/functional/gtcs/test_gtcs_proc_isql_03.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.gtcs_proc_isql_03 -# title: gtcs-proc-isql-03 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_03.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.proc-isql-03 +TITLE: gtcs-proc-isql-03 +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_03.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +FBTEST: functional.gtcs.gtcs_proc_isql_03 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(from_backup='gtcs_sp1.fbk') -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='gtcs_sp1.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set term ^; create procedure proc3 returns (a varchar(5), c integer) as @@ -42,7 +32,7 @@ test_script_1 = """ ^ set term ;^ - execute procedure proc3; + execute procedure proc3; set count on; @@ -55,9 +45,9 @@ test_script_1 = """ select 'point-7' msg, p.a, p.c from proc3 p where p.c = (select avg(x.c) from proc3 x); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ A C P5 400 @@ -90,9 +80,8 @@ expected_stdout_1 = """ Records affected: 1 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_gtcs_proc_isql_04.py b/tests/functional/gtcs/test_gtcs_proc_isql_04.py index f0b56f24..9f5f4ad4 100644 --- a/tests/functional/gtcs/test_gtcs_proc_isql_04.py +++ b/tests/functional/gtcs/test_gtcs_proc_isql_04.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.gtcs_proc_isql_04 -# title: gtcs-proc-isql-04 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_04.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.proc-isql-04 +TITLE: gtcs-proc-isql-04 +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_04.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +FBTEST: functional.gtcs.gtcs_proc_isql_04 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(from_backup='gtcs_sp1.fbk') -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='gtcs_sp1.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set term ^; create procedure proc4 returns (a varchar(6), b varchar(5),c integer) as @@ -55,9 +45,9 @@ test_script_1 = """ select 'point-7' msg, p.a, p.c from proc4 p where p.c = (select avg(x.c) from proc4 x); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ A B C Red P6 19 @@ -88,9 +78,8 @@ expected_stdout_1 = """ Records affected: 1 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_gtcs_proc_isql_05.py b/tests/functional/gtcs/test_gtcs_proc_isql_05.py index a34ae9b6..987080d3 100644 --- a/tests/functional/gtcs/test_gtcs_proc_isql_05.py +++ b/tests/functional/gtcs/test_gtcs_proc_isql_05.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.gtcs_proc_isql_05 -# title: gtcs-proc-isql-05 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_05.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.proc-isql-05 +TITLE: gtcs-proc-isql-05 +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_05.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +FBTEST: functional.gtcs.gtcs_proc_isql_05 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(from_backup='gtcs_sp1.fbk') -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='gtcs_sp1.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set term ^; create procedure proc5 returns (a varchar(20), b integer) as @@ -56,9 +46,9 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ A B Cog 19 @@ -89,9 +79,8 @@ expected_stdout_1 = """ Records affected: 1 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_gtcs_proc_isql_06.py b/tests/functional/gtcs/test_gtcs_proc_isql_06.py index 21ff34ac..e7a397e5 100644 --- a/tests/functional/gtcs/test_gtcs_proc_isql_06.py +++ b/tests/functional/gtcs/test_gtcs_proc_isql_06.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.gtcs_proc_isql_06 -# title: gtcs-proc-isql-06 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_06.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.proc-isql-06 +TITLE: gtcs-proc-isql-06 +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_06.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +FBTEST: functional.gtcs.gtcs_proc_isql_06 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(from_backup='gtcs_sp1.fbk') -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='gtcs_sp1.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set term ^; create procedure proc6 returns (a varchar(20), b integer) as @@ -54,9 +44,9 @@ test_script_1 = """ select 'point-7' msg, p.a , b from proc6 p where b = (select avg(x.b) from proc6 x); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ A B Cog 19 @@ -67,29 +57,28 @@ expected_stdout_1 = """ MSG MAX point-2 Cog Records affected: 1 - + MSG B point-3 19 Records affected: 1 - + MSG A B point-4 Cog 19 Records affected: 1 - + Records affected: 0 - + MSG A AVG point-6 Cog 19 Records affected: 1 - + MSG A B point-7 Cog 19 Records affected: 1 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_gtcs_proc_isql_07.py b/tests/functional/gtcs/test_gtcs_proc_isql_07.py index b4f186e0..b85f0dd6 100644 --- a/tests/functional/gtcs/test_gtcs_proc_isql_07.py +++ b/tests/functional/gtcs/test_gtcs_proc_isql_07.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.gtcs_proc_isql_07 -# title: gtcs-proc-isql-07 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_07.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.proc-isql-07 +TITLE: gtcs-proc-isql-07 +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_07.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +FBTEST: functional.gtcs.gtcs_proc_isql_07 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(from_backup='gtcs_sp1.fbk') -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='gtcs_sp1.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set term ^; create procedure proc7 returns (a integer) as @@ -55,9 +45,9 @@ test_script_1 = """ select 'point-7' msg, p.a from proc7 p where p.a = (select avg(x.a) from proc7 x); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ A 400 @@ -91,9 +81,8 @@ expected_stdout_1 = """ Records affected: 1 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_gtcs_proc_isql_08.py b/tests/functional/gtcs/test_gtcs_proc_isql_08.py index 574efe3f..2ed4f167 100644 --- a/tests/functional/gtcs/test_gtcs_proc_isql_08.py +++ b/tests/functional/gtcs/test_gtcs_proc_isql_08.py @@ -1,35 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.gtcs_proc_isql_08 -# title: gtcs-proc-isql-08 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_08.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# 'd_atabase': 'Restore', -# 'b_ackup_file': 'gtcs_sp1.fbk', -# -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.proc-isql-08 +TITLE: gtcs-proc-isql-08 +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_08.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +FBTEST: functional.gtcs.gtcs_proc_isql_08 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; recreate table sp ( @@ -63,11 +50,11 @@ test_script_1 = """ set count on; execute procedure proc08; - + select 'point-1' msg, p.a, p.b, p.c from proc08 p order by 2,3,4; select 'point-2' msg, max(p.c) from proc08 p; - + select 'point-3' msg, p.a from proc08 p order by 2; select 'point-4' msg, p.* from proc08 p order by p.c, p.a, p.b; @@ -78,9 +65,9 @@ test_script_1 = """ select 'point-7' msg, p.a, p.c from proc08 p where p.c < (select avg(x.c) from proc08 x) order by p.a, p.c; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ A B C ====== ====== ============ S1 P1 300 @@ -142,9 +129,8 @@ expected_stdout_1 = """ Records affected: 3 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_gtcs_proc_isql_10.py b/tests/functional/gtcs/test_gtcs_proc_isql_10.py index f7fae5b2..0ae2a931 100644 --- a/tests/functional/gtcs/test_gtcs_proc_isql_10.py +++ b/tests/functional/gtcs/test_gtcs_proc_isql_10.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.gtcs_proc_isql_10 -# title: gtcs-proc-isql-10 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_10.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.proc-isql-10 +TITLE: gtcs-proc-isql-10 +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_10.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +FBTEST: functional.gtcs.gtcs_proc_isql_10 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(from_backup='gtcs_sp1.fbk') -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='gtcs_sp1.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set term ^; create procedure proc10 returns( a varchar(20), b varchar(5), c integer) as @@ -54,9 +44,9 @@ test_script_1 = """ select 'point-7' msg, p.a, p.c from proc10 p where p.c > (select avg(x.c) from proc10 x); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ A B C Nut Red 12 @@ -97,9 +87,8 @@ expected_stdout_1 = """ Records affected: 1 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_gtcs_proc_isql_11.py b/tests/functional/gtcs/test_gtcs_proc_isql_11.py index 00ee1970..8b3db899 100644 --- a/tests/functional/gtcs/test_gtcs_proc_isql_11.py +++ b/tests/functional/gtcs/test_gtcs_proc_isql_11.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.gtcs_proc_isql_11 -# title: gtcs-proc-isql-11 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_11.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.proc-isql-11 +TITLE: gtcs-proc-isql-11 +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_11.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +FBTEST: functional.gtcs.gtcs_proc_isql_11 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(from_backup='gtcs_sp1.fbk') -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='gtcs_sp1.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set term ^; create procedure proc11 returns (a varchar(5), b varchar(21), c integer) as @@ -50,9 +40,9 @@ test_script_1 = """ select 'point-7' msg, p.a, p.c from proc11 p where p.c > (select avg(x.c) from proc11 x); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ A B C P1 Nut 2 @@ -101,9 +91,8 @@ expected_stdout_1 = """ Records affected: 1 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_gtcs_proc_isql_12.py b/tests/functional/gtcs/test_gtcs_proc_isql_12.py index 0ae77de4..f13da56f 100644 --- a/tests/functional/gtcs/test_gtcs_proc_isql_12.py +++ b/tests/functional/gtcs/test_gtcs_proc_isql_12.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.gtcs_proc_isql_12 -# title: gtcs-proc-isql-12 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_12.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.proc-isql-12 +TITLE: gtcs-proc-isql-12 +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_12.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +FBTEST: functional.gtcs.gtcs_proc_isql_12 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(from_backup='gtcs_sp1.fbk') -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='gtcs_sp1.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set term ^; create procedure proc12 returns (a varchar(21), b integer) as @@ -55,9 +45,9 @@ test_script_1 = """ select 'point-7' msg, p.a, p.b from proc12 p where b > (select avg(x.b) from proc12 x); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ A B Bolt 17 @@ -92,9 +82,8 @@ expected_stdout_1 = """ Records affected: 1 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_gtcs_proc_isql_13.py b/tests/functional/gtcs/test_gtcs_proc_isql_13.py index e77a85ef..f0eaf89f 100644 --- a/tests/functional/gtcs/test_gtcs_proc_isql_13.py +++ b/tests/functional/gtcs/test_gtcs_proc_isql_13.py @@ -1,35 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.gtcs_proc_isql_13 -# title: gtcs-proc-isql-13 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_13.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: -# 4.0.0.1803 SS: 1.822s. -# 3.0.6.33265 SS: 0.849s. -# 2.5.9.27149 SC: 0.313s. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.proc-isql-13 +TITLE: gtcs-proc-isql-13 +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_13.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +FBTEST: functional.gtcs.gtcs_proc_isql_13 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(from_backup='gtcs_sp1.fbk') -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='gtcs_sp1.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set term ^; create procedure proc13 returns (a integer) as @@ -58,9 +45,9 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ A 10 @@ -113,9 +100,8 @@ expected_stdout_1 = """ point-7 30 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_gtcs_proc_isql_15.py b/tests/functional/gtcs/test_gtcs_proc_isql_15.py index 685acebf..6b8e1d36 100644 --- a/tests/functional/gtcs/test_gtcs_proc_isql_15.py +++ b/tests/functional/gtcs/test_gtcs_proc_isql_15.py @@ -1,35 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.gtcs_proc_isql_15 -# title: gtcs-proc-isql-15 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_15.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: -# 4.0.0.1803 SS: 1.822s. -# 3.0.6.33265 SS: 0.849s. -# 2.5.9.27149 SC: 0.313s. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.proc-isql-15 +TITLE: gtcs-proc-isql-15 +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_15.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +FBTEST: functional.gtcs.gtcs_proc_isql_15 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(from_backup='gtcs_sp1.fbk') -substitutions_1 = [('={3,}', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='gtcs_sp1.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set term ^; create procedure proc_insert (a char(5), b char(20), c char(6), d smallint, e char(15)) as begin @@ -42,9 +29,9 @@ test_script_1 = """ select 'point-2' msg, p.* from p; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('={3,}', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ MSG PNO PNAME COLOR WEIGHT CITY point-1 P1 Nut Red 12 London point-1 P2 Bolt Green 17 Paris @@ -63,9 +50,8 @@ expected_stdout_1 = """ point-2 P7 Widget Pink 23 Hoboken """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_gtcs_proc_isql_16.py b/tests/functional/gtcs/test_gtcs_proc_isql_16.py index c8400435..bc6106d9 100644 --- a/tests/functional/gtcs/test_gtcs_proc_isql_16.py +++ b/tests/functional/gtcs/test_gtcs_proc_isql_16.py @@ -1,35 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.gtcs_proc_isql_16 -# title: gtcs-proc-isql-16 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_16.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: -# 4.0.0.1803 SS: 1.822s. -# 3.0.6.33265 SS: 0.849s. -# 2.5.9.27149 SC: 0.313s. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.proc-isql-16 +TITLE: gtcs-proc-isql-16 +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_16.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +FBTEST: functional.gtcs.gtcs_proc_isql_16 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(from_backup='gtcs_sp1.fbk') -substitutions_1 = [('={3,}', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='gtcs_sp1.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set term ^; create procedure proc_insert (a char(5), b char(20), c char(6), d smallint, e char(15)) as begin @@ -43,9 +30,9 @@ test_script_1 = """ select 'point-2' as msg, p.* from p; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('={3,}', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ MSG PNO PNAME COLOR WEIGHT CITY point-1 P1 Nut Red 12 London point-1 P2 Bolt Green 17 Paris @@ -65,9 +52,8 @@ expected_stdout_1 = """ point-2 P16 Varanasi """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_gtcs_proc_isql_17.py b/tests/functional/gtcs/test_gtcs_proc_isql_17.py index fe584633..943751b6 100644 --- a/tests/functional/gtcs/test_gtcs_proc_isql_17.py +++ b/tests/functional/gtcs/test_gtcs_proc_isql_17.py @@ -1,35 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.gtcs_proc_isql_17 -# title: gtcs-proc-isql-17 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_17.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: -# 4.0.0.1803 SS: 1.822s. -# 3.0.6.33265 SS: 0.849s. -# 2.5.9.27149 SC: 0.313s. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.proc-isql-17 +TITLE: gtcs-proc-isql-17 +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_17.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +FBTEST: functional.gtcs.gtcs_proc_isql_17 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(from_backup='gtcs_sp1.fbk') -substitutions_1 = [('={3,}', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='gtcs_sp1.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set term ^; create procedure insert_sno (sno varchar(5)) as declare c int; @@ -44,9 +31,9 @@ test_script_1 = """ select p.* from sp p; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('={3,}', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ SNO PNO QTY S1 P1 300 S1 P3 400 @@ -57,9 +44,8 @@ expected_stdout_1 = """ S10 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_gtcs_proc_isql_18.py b/tests/functional/gtcs/test_gtcs_proc_isql_18.py index 4f7db7a2..d1cbbdcf 100644 --- a/tests/functional/gtcs/test_gtcs_proc_isql_18.py +++ b/tests/functional/gtcs/test_gtcs_proc_isql_18.py @@ -1,35 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.gtcs_proc_isql_18 -# title: gtcs-proc-isql-18 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_18.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: -# 4.0.0.1803 SS: 1.822s. -# 3.0.6.33265 SS: 0.849s. -# 2.5.9.27149 SC: 0.313s. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.proc-isql-18 +TITLE: gtcs-proc-isql-18 +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_18.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +FBTEST: functional.gtcs.gtcs_proc_isql_18 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(from_backup='gtcs_sp1.fbk') -substitutions_1 = [('={3,}', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='gtcs_sp1.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set term ^; create procedure proc_select_insert as begin @@ -46,9 +33,9 @@ test_script_1 = """ select 'point-4' as msg, p.* from sp p; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('={3,}', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ MSG SNO PNO QTY point-1 S1 P1 300 point-1 S1 P3 400 @@ -86,9 +73,8 @@ expected_stdout_1 = """ point-4 S5 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_gtcs_proc_isql_19.py b/tests/functional/gtcs/test_gtcs_proc_isql_19.py index 709b2c3f..fc50a2a8 100644 --- a/tests/functional/gtcs/test_gtcs_proc_isql_19.py +++ b/tests/functional/gtcs/test_gtcs_proc_isql_19.py @@ -1,38 +1,25 @@ #coding:utf-8 -# -# id: functional.gtcs.gtcs_proc_isql_19 -# title: gtcs-proc-isql-19 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_19.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: -# 4.0.0.1803 SS: 1.822s. -# 3.0.6.33265 SS: 0.849s. -# 2.5.9.27149 SC: 0.313s. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.proc-isql-19 +TITLE: gtcs-proc-isql-19 +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_19.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +FBTEST: functional.gtcs.gtcs_proc_isql_19 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(from_backup='gtcs_sp1.fbk') -substitutions_1 = [('={3,}', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='gtcs_sp1.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set term ^; create procedure proc_select_insert2 as - declare variable t varchar(5); + declare variable t varchar(5); begin for select sno from s where sno not in (select sno from sp) into :t do @@ -47,9 +34,9 @@ test_script_1 = """ select 'result-2' as msg, p.* from sp p; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('={3,}', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ MSG SNO PNO QTY result-1 S1 P1 300 result-1 S1 P3 400 @@ -69,9 +56,8 @@ expected_stdout_1 = """ result-2 S5 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_gtcs_proc_isql_20.py b/tests/functional/gtcs/test_gtcs_proc_isql_20.py index de2fd237..c8dd79c1 100644 --- a/tests/functional/gtcs/test_gtcs_proc_isql_20.py +++ b/tests/functional/gtcs/test_gtcs_proc_isql_20.py @@ -1,44 +1,31 @@ #coding:utf-8 -# -# id: functional.gtcs.gtcs_proc_isql_20 -# title: gtcs-proc-isql-20 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_20.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: -# 4.0.0.1803 SS: 1.822s. -# 3.0.6.33265 SS: 0.849s. -# 2.5.9.27149 SC: 0.313s. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.proc-isql-20 +TITLE: gtcs-proc-isql-20 +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_20.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +FBTEST: functional.gtcs.gtcs_proc_isql_20 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(from_backup='gtcs_sp1.fbk') -substitutions_1 = [('={3,}', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='gtcs_sp1.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set term ^; create procedure proc_select_insert3 as declare t varchar(5); begin for - select sno - from s - where sno NOT IN (select sno from sp) - into :t + select sno + from s + where sno NOT IN (select sno from sp) + into :t do begin insert into sp(sno) values (:t); exit; @@ -51,9 +38,9 @@ test_script_1 = """ select 'result-2' as msg, p.* from sp p; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('={3,}', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ MSG SNO PNO QTY result-1 S1 P1 300 result-1 S1 P3 400 @@ -72,9 +59,8 @@ expected_stdout_1 = """ result-2 S3 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_gtcs_select_delete_isql.py b/tests/functional/gtcs/test_gtcs_select_delete_isql.py index 50c8f04e..0314b2e5 100644 --- a/tests/functional/gtcs/test_gtcs_select_delete_isql.py +++ b/tests/functional/gtcs/test_gtcs_select_delete_isql.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.gtcs_select_delete_isql -# title: GTCS/tests/SELECT_DELETE_ISQL. Test for select from SP that deletes record after its output. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/SELECT_DELETE_ISQL.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.select-delete-isql +TITLE: Test for select from SP that deletes record after its output +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/SELECT_DELETE_ISQL.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script +FBTEST: functional.gtcs.gtcs_select_delete_isql +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(from_backup='gtcs_sp1.fbk') -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='gtcs_sp1.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set term ^; create procedure proc returns (x varchar(5)) as @@ -39,23 +29,23 @@ test_script_1 = """ end ^ create procedure proc1 returns (x varchar(5)) as - begin - for select sno from sp into :x + begin + for select sno from sp into :x do begin - suspend; - end + suspend; + end delete from s where sno = :x; end - ^ + ^ create procedure proc2 returns (x varchar(5)) as - begin - for select sno from sp into :x + begin + for select sno from sp into :x do begin delete from s where sno = :x; - suspend; - end - end - ^ + suspend; + end + end + ^ set term ;^ set count on; @@ -68,12 +58,11 @@ test_script_1 = """ rollback; select 'point-5' msg, p.* from proc2 p; rollback; - """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ MSG X point-1 S1 point-1 S1 @@ -114,9 +103,8 @@ expected_stdout_1 = """ Records affected: 6 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_invalid_stream_when_use_trim.py b/tests/functional/gtcs/test_invalid_stream_when_use_trim.py index d6be98f5..332cf2e6 100644 --- a/tests/functional/gtcs/test_invalid_stream_when_use_trim.py +++ b/tests/functional/gtcs/test_invalid_stream_when_use_trim.py @@ -1,51 +1,39 @@ #coding:utf-8 -# -# id: functional.gtcs.invalid_stream_when_use_trim -# title: GTCS/tests/CF_ISQL_32. Statement with TRIM raises "bad BLR -- invalid stream" -# decription: -# ::: NB ::: -# ### Name of original test has no any relation with actual task of this test: ### -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_32.script -# -# Source description (Rudo Mihal, message of 2004-05-06 11:32:10; FB 1.5.1.4443): -# https://sourceforge.net/p/firebird/mailman/message/17016190/ -# -# Example for reproducing (by N. Samofatov, with UDF usage): -# https://sourceforge.net/p/firebird/mailman/message/17017012/ -# -# Checked on: 4.0.0.1804 SS; 3.0.6.33271 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.invalid-stream-when-use-trim +TITLE: Statement with TRIM raises "bad BLR -- invalid stream" +DESCRIPTION: + ::: NB ::: + ### Name of original test has no any relation with actual task of this test: ### + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_32.script + + Source description (Rudo Mihal, message of 2004-05-06 11:32:10; FB 1.5.1.4443): + https://sourceforge.net/p/firebird/mailman/message/17016190/ + + Example for reproducing (by N. Samofatov, with UDF usage): + https://sourceforge.net/p/firebird/mailman/message/17017012/ +FBTEST: functional.gtcs.invalid_stream_when_use_trim +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' '), ('TRIM_RESULT.*', 'TRIM_RESULT')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; select trim(TRAILING FROM (select max(rdb$relation_id) from rdb$database)) trim_result from rdb$database; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' '), ('TRIM_RESULT.*', 'TRIM_RESULT')]) -expected_stdout_1 = """ +expected_stdout = """ TRIM_RESULT 128 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_isql_show_command_ambiguity.py b/tests/functional/gtcs/test_isql_show_command_ambiguity.py index e853bd44..eb40a124 100644 --- a/tests/functional/gtcs/test_isql_show_command_ambiguity.py +++ b/tests/functional/gtcs/test_isql_show_command_ambiguity.py @@ -1,34 +1,23 @@ #coding:utf-8 -# -# id: functional.gtcs.isql_show_command_ambiguity -# title: GTCS/tests/CF_ISQL_22. SHOW TABLE / VIEW: ambiguity between tables and views -# decription: -# ::: NB ::: -# ### Name of original test has no any relation with actual task of this test: ### -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_22.script -# -# bug #223513 ambiguity between tables and views -# -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.isql-show-command-ambiguity +TITLE: SHOW TABLE / VIEW: ambiguity between tables and views +DESCRIPTION: + ::: NB ::: + ### Name of original test has no any relation with actual task of this test: ### + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_22.script + + bug #223513 ambiguity between tables and views +FBTEST: functional.gtcs.isql_show_command_ambiguity +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ create table t(a int); create view v as select a from t; show tables; @@ -39,9 +28,9 @@ test_script_1 = """ show view t; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ T V A INTEGER Nullable @@ -50,17 +39,16 @@ expected_stdout_1 = """ View Source: select a from t """ -expected_stderr_1 = """ + +expected_stderr = """ There is no table V in this database There is no view T in this database """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/gtcs/test_isql_show_command_collation.py b/tests/functional/gtcs/test_isql_show_command_collation.py index 50d0320b..b3cdbea8 100644 --- a/tests/functional/gtcs/test_isql_show_command_collation.py +++ b/tests/functional/gtcs/test_isql_show_command_collation.py @@ -1,34 +1,23 @@ #coding:utf-8 -# -# id: functional.gtcs.isql_show_command_collation -# title: GTCS/tests/CF_ISQL_20. Misplaced collation when extracting metadata with isql -# decription: -# ::: NB ::: -# ### Name of original test has no any relation with actual task of this test: ### -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_20.script -# -# bug #223126 Misplaced collation when extracting metadata with isql -# -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.isql-show-command-collation +TITLE: Misplaced collation when extracting metadata with isql +DESCRIPTION: + ::: NB ::: + ### Name of original test has no any relation with actual task of this test: ### + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_20.script + + bug #223126 Misplaced collation when extracting metadata with isql +FBTEST: functional.gtcs.isql_show_command_collation +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ create domain domain_with_collate_clause as char(1) character set iso8859_1 default 'v' @@ -46,9 +35,9 @@ test_script_1 = """ show table table_with_collated_field; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ DOMAIN_WITH_COLLATE_CLAUSE CHAR(1) CHARACTER SET ISO8859_1 Nullable default 'v' check(value >='a' and value <='z') @@ -60,9 +49,8 @@ expected_stdout_1 = """ check( field_01 >= 'c' ) """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_minimum_grant_test.py b/tests/functional/gtcs/test_minimum_grant_test.py index 214a1401..6dd780e5 100644 --- a/tests/functional/gtcs/test_minimum_grant_test.py +++ b/tests/functional/gtcs/test_minimum_grant_test.py @@ -1,32 +1,25 @@ #coding:utf-8 -# -# id: functional.gtcs.minimum_grant_test -# title: GTCS/tests/CF_ISQL_34. minimum-grant-test -# decription: -# ::: NB ::: -# ### Name of original test has no any relation with actual task of this test: ### -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_34.script -# -# Checked on: 4.0.0.1804 SS; 3.0.6.33271 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.minimum-grant +TITLE: Minimum grant test +DESCRIPTION: + ::: NB ::: + ### Name of original test has no any relation with actual task of this test: ### + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_34.script +FBTEST: functional.gtcs.minimum_grant_test +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +substitutions = [('no permission for (read/select|SELECT) access.*', 'no permission for read access'), + ('no permission for (insert/write|INSERT) access.*', 'no permission for write access'), + ('[ \t]+', ' '), ('-{0,1}[ ]{0,1}Effective user is.*', '')] -substitutions_1 = [('[ \t]+', ' '), ('no permission for (read/select|SELECT) access.*', 'no permission for read access'), ('no permission for (insert/write|INSERT) access.*', 'no permission for write access'), ('-{0,1}[ ]{0,1}Effective user is.*', '')] +db = db_factory() -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; set term ^; @@ -41,11 +34,11 @@ test_script_1 = """ execute statement 'drop user tmp$qa_user2' with autonomous transaction; when any do begin end end - + end^ set term ;^ commit; - + create user tmp$qa_user1 password '123'; create user tmp$qa_user2 password '456'; commit; @@ -72,14 +65,15 @@ test_script_1 = """ commit; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=substitutions) -expected_stdout_1 = """ +expected_stdout = """ WHOAMI TMP$QA_USER1 WHOAMI TMP$QA_USER2 C1 1 """ -expected_stderr_1 = """ + +expected_stderr = """ Statement failed, SQLSTATE = 28000 no permission for read/select access to TABLE TEST @@ -87,12 +81,10 @@ expected_stderr_1 = """ no permission for insert/write access to TABLE TEST """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/gtcs/test_parser_comments_in_sql.py b/tests/functional/gtcs/test_parser_comments_in_sql.py index 012668cc..99954d16 100644 --- a/tests/functional/gtcs/test_parser_comments_in_sql.py +++ b/tests/functional/gtcs/test_parser_comments_in_sql.py @@ -1,34 +1,23 @@ #coding:utf-8 -# -# id: functional.gtcs.parser_comments_in_sql -# title: GTCS/tests/CF_ISQL_19. Check for problems with comments (single-line and multi-line) -# decription: -# ::: NB ::: -# ### Name of original test has no any relation with actual task of this test: ### -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_19.script -# -# bug #781610 problems with one line comments (--) -# -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.parser-comments-in-sql +TITLE: Check for problems with comments (single-line and multi-line) +DESCRIPTION: + ::: NB ::: + ### Name of original test has no any relation with actual task of this test: ### + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_19.script + + bug #781610 problems with one line comments (--) +FBTEST: functional.gtcs.parser_comments_in_sql +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set heading off; create table test (n integer); @@ -66,11 +55,11 @@ test_script_1 = """ */ from test; - select * + select * /* comment */ from test; - select * + select * -- comment from test; @@ -88,7 +77,7 @@ test_script_1 = """ -- single-line comment --*/ select * from test; - /* * / / * q'{ + /* * / / * q'{ BEGIN multi-line comment-1 '*/ select * from test; @@ -101,9 +90,9 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ 1 1 1 @@ -119,9 +108,8 @@ expected_stdout_1 = """ 1 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_ref_integ_drop_fk_index.py b/tests/functional/gtcs/test_ref_integ_drop_fk_index.py index 9e04de61..9991eda3 100644 --- a/tests/functional/gtcs/test_ref_integ_drop_fk_index.py +++ b/tests/functional/gtcs/test_ref_integ_drop_fk_index.py @@ -1,57 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.ref_integ_drop_fk_index -# title: GTCS/tests/REF_INT.4.ISQL ; ref-integ-drop-fk-index. Index that is used for FK should not be avail for DROP. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/REF_INT.4.ISQL.script -# Checked on: 4.0.0.1806 SS; 3.0.6.33272 CS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 3.0 -# qmid: None + +""" +ID: gtcs.ref-integ-drop-fk-index +TITLE: Index that is used for FK should not be avail for DROP +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/REF_INT.4.ISQL.script +FBTEST: functional.gtcs.ref_integ_drop_fk_index +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = python_act('db') -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import subprocess -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# db_conn.close() -# -# with open( os.path.join(context['files_location'],'gtcs-ref-integ.sql'), 'r') as f: -# sql_init = f.read() -# -# sql_addi=''' -# drop index ref_key; -# commit; -# insert into employee( emp_no, last_name, dept_no) values (12, 'e12', -1); -- should FAIL -# set count on; -# select * from employee e where e.dept_no < 0; -# ''' -# -# runProgram('isql', [ dsn], os.linesep.join( (sql_init, sql_addi) ) ) -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stderr_1 = """ +expected_stderr = """ Statement failed, SQLSTATE = 27000 unsuccessful metadata update -DROP INDEX REF_KEY failed @@ -64,13 +29,37 @@ expected_stderr_1 = """ -Problematic key value is ("DEPT_NO" = '-1') """ -expected_stdout_1 = """ +expected_stdout = """ Records affected: 0 """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=3.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# test_script_1 +#--- +# +# import os +# import sys +# import subprocess +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# db_conn.close() +# +# with open( os.path.join(context['files_location'],'gtcs-ref-integ.sql'), 'r') as f: +# sql_init = f.read() +# +# sql_addi=''' +# drop index ref_key; +# commit; +# insert into employee( emp_no, last_name, dept_no) values (12, 'e12', -1); -- should FAIL +# set count on; +# select * from employee e where e.dept_no < 0; +# ''' +# +# runProgram('isql', [ dsn], os.linesep.join( (sql_init, sql_addi) ) ) +#--- diff --git a/tests/functional/gtcs/test_ref_integ_drop_fk_then_pk.py b/tests/functional/gtcs/test_ref_integ_drop_fk_then_pk.py index eea348b1..117dd243 100644 --- a/tests/functional/gtcs/test_ref_integ_drop_fk_then_pk.py +++ b/tests/functional/gtcs/test_ref_integ_drop_fk_then_pk.py @@ -1,44 +1,46 @@ #coding:utf-8 -# -# id: functional.gtcs.ref_integ_drop_fk_then_pk -# title: GTCS/tests/REF_INT.1.ISQL ; ref-integ-drop-fk-then-pk. Outcome must be SUCCESS if first we drop FK and after this PK constraint. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/REF_INT.1.ISQL.script -# Checked on: 4.0.0.1806 SS; 3.0.6.33272 CS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 3.0 -# qmid: None + +""" +ID: gtcs.ref-integ-drop-fk-then-pk +TITLE: Outcome must be SUCCESS if first we drop FK and after this PK constraint +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/REF_INT.1.ISQL.script +FBTEST: functional.gtcs.ref_integ_drop_fk_then_pk +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = python_act('db') -init_script_1 = """""" +expected_stdout = """ + Records affected: 1 + Records affected: 1 +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=3.0') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import sys # import subprocess -# +# # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password -# +# # db_conn.close() -# +# # with open( os.path.join(context['files_location'],'gtcs-ref-integ.sql'), 'r') as f: # sql_init = f.read() -# +# # sql_addi=''' # alter table employee drop constraint ref_key; # alter table department drop constraint dept_key; @@ -47,19 +49,6 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # insert into department( dept_no, dept_name) values (1, 'k1'); # insert into employee( emp_no, last_name, dept_no) values (12, 'e12', -1); -- should FAIL # ''' -# +# # runProgram('isql', [ dsn], os.linesep.join( (sql_init, sql_addi) ) ) #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - Records affected: 1 - Records affected: 1 -""" - -@pytest.mark.version('>=3.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/gtcs/test_ref_integ_drop_pk_constraint.py b/tests/functional/gtcs/test_ref_integ_drop_pk_constraint.py index a01f57d7..12d717ed 100644 --- a/tests/functional/gtcs/test_ref_integ_drop_pk_constraint.py +++ b/tests/functional/gtcs/test_ref_integ_drop_pk_constraint.py @@ -1,56 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.ref_integ_drop_pk_constraint -# title: GTCS/tests/REF_INT.2.ISQL ; ref-integ-drop-pk-constraint. Constraint of PRIMARY KEY should not be avail for DROP if there is FK that depends on it. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/REF_INT.2.ISQL.script -# Checked on: 4.0.0.1806 SS; 3.0.6.33272 CS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 3.0 -# qmid: None + +""" +ID: gtcs.ref_integ_drop_pk_constraint +TITLE: Constraint of PRIMARY KEY should not be avail for DROP if there is FK that depends on it +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/REF_INT.2.ISQL.script +FBTEST: functional.gtcs.ref_integ_drop_pk_constraint +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = python_act('db') -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import subprocess -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# db_conn.close() -# -# with open( os.path.join(context['files_location'],'gtcs-ref-integ.sql'), 'r') as f: -# sql_init = f.read() -# -# sql_addi=''' -# drop index dept_key; -# -- Check that PK index still in use: following must FAIL: -# set count on; -# insert into department( dept_no, dept_name) values (1, 'k1'); -# ''' -# -# runProgram('isql', [ dsn], os.linesep.join( (sql_init, sql_addi) ) ) -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stderr_1 = """ +expected_stderr = """ Statement failed, SQLSTATE = 27000 unsuccessful metadata update -DROP INDEX DEPT_KEY failed @@ -62,13 +28,36 @@ expected_stderr_1 = """ -Problematic key value is ("DEPT_NO" = '1') """ -expected_stdout_1 = """ +expected_stdout = """ Records affected: 0 """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=3.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# test_script_1 +#--- +# +# import os +# import sys +# import subprocess +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# db_conn.close() +# +# with open( os.path.join(context['files_location'],'gtcs-ref-integ.sql'), 'r') as f: +# sql_init = f.read() +# +# sql_addi=''' +# drop index dept_key; +# -- Check that PK index still in use: following must FAIL: +# set count on; +# insert into department( dept_no, dept_name) values (1, 'k1'); +# ''' +# +# runProgram('isql', [ dsn], os.linesep.join( (sql_init, sql_addi) ) ) +#--- diff --git a/tests/functional/gtcs/test_ref_integ_drop_pk_index.py b/tests/functional/gtcs/test_ref_integ_drop_pk_index.py index 62069f8f..901bf394 100644 --- a/tests/functional/gtcs/test_ref_integ_drop_pk_index.py +++ b/tests/functional/gtcs/test_ref_integ_drop_pk_index.py @@ -1,56 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.ref_integ_drop_pk_index -# title: GTCS/tests/REF_INT.3.ISQL ; ref-integ-drop-pk-index. Index that is used for PRIMARY KEY should not be avail for DROP. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/REF_INT.3.ISQL.script -# Checked on: 4.0.0.1806 SS; 3.0.6.33272 CS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 3.0 -# qmid: None + +""" +ID: gtcs.ref_integ_drop_pk_index +TITLE: Index that is used for PRIMARY KEY should not be avail for DROP +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/REF_INT.3.ISQL.script +FBTEST: functional.gtcs.ref_integ_drop_pk_index +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = python_act('db') -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import subprocess -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# db_conn.close() -# -# with open( os.path.join(context['files_location'],'gtcs-ref-integ.sql'), 'r') as f: -# sql_init = f.read() -# -# sql_addi=''' -# drop index dept_key; -# -- Check that PK index still in use: following must FAIL: -# set count on; -# insert into department( dept_no, dept_name) values (1, 'k1'); -# ''' -# -# runProgram('isql', [ dsn], os.linesep.join( (sql_init, sql_addi) ) ) -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stderr_1 = """ +expected_stderr = """ Statement failed, SQLSTATE = 27000 unsuccessful metadata update -DROP INDEX DEPT_KEY failed @@ -62,13 +28,36 @@ expected_stderr_1 = """ -Problematic key value is ("DEPT_NO" = '1') """ -expected_stdout_1 = """ +expected_stdout = """ Records affected: 0 """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=3.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# test_script_1 +#--- +# +# import os +# import sys +# import subprocess +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# db_conn.close() +# +# with open( os.path.join(context['files_location'],'gtcs-ref-integ.sql'), 'r') as f: +# sql_init = f.read() +# +# sql_addi=''' +# drop index dept_key; +# -- Check that PK index still in use: following must FAIL: +# set count on; +# insert into department( dept_no, dept_name) values (1, 'k1'); +# ''' +# +# runProgram('isql', [ dsn], os.linesep.join( (sql_init, sql_addi) ) ) +#--- diff --git a/tests/functional/gtcs/test_ref_integ_inactive_fk_index.py b/tests/functional/gtcs/test_ref_integ_inactive_fk_index.py index 4f2f4b22..62fb3bcb 100644 --- a/tests/functional/gtcs/test_ref_integ_inactive_fk_index.py +++ b/tests/functional/gtcs/test_ref_integ_inactive_fk_index.py @@ -1,60 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.ref_integ_inactive_fk_index -# title: GTCS/tests/REF_INT.8.ISQL ; ref-integ-inactive-fk-index. Index that is used for FK should not be avail for INACTIVE. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/REF_INT.8.ISQL.script -# Checked on: 4.0.0.1806 SS; 3.0.6.33272 CS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 3.0 -# qmid: None + +""" +ID: gtcs.ref_integ_inactive_fk_index +TITLE: Index that is used for FK should not be avail for INACTIVE +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/REF_INT.8.ISQL.script +FBTEST: functional.gtcs.ref_integ_inactive_fk_index +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db_1 = db_factory() -substitutions_1 = [] +act_1 = python_act('db_1') -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import subprocess -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# db_conn.close() -# -# with open( os.path.join(context['files_location'],'gtcs-ref-integ.sql'), 'r') as f: -# sql_init = f.read() -# -# sql_addi=''' -# alter index ref_key inactive; -- should FAIL -# commit; -# -# insert into employee( emp_no, last_name, dept_no) values (11, 'e11', 1); -# insert into employee( emp_no, last_name, dept_no) values (12, 'e12', -1); -# -# set count on; -# select * from employee e where e.dept_no < 0; -# ''' -# -# runProgram('isql', [ dsn], os.linesep.join( (sql_init, sql_addi) ) ) -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stderr_1 = """ +expected_stderr = """ Statement failed, SQLSTATE = 27000 unsuccessful metadata update -ALTER INDEX REF_KEY failed @@ -67,13 +29,40 @@ expected_stderr_1 = """ -Problematic key value is ("DEPT_NO" = '-1') """ -expected_stdout_1 = """ +expected_stdout = """ Records affected: 0 """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=3.0') -@pytest.mark.xfail def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - + pytest.fail("Not IMPLEMENTED") +# test_script_1 +#--- +# +# import os +# import sys +# import subprocess +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# db_conn.close() +# +# with open( os.path.join(context['files_location'],'gtcs-ref-integ.sql'), 'r') as f: +# sql_init = f.read() +# +# sql_addi=''' +# alter index ref_key inactive; -- should FAIL +# commit; +# +# insert into employee( emp_no, last_name, dept_no) values (11, 'e11', 1); +# insert into employee( emp_no, last_name, dept_no) values (12, 'e12', -1); +# +# set count on; +# select * from employee e where e.dept_no < 0; +# ''' +# +# runProgram('isql', [ dsn], os.linesep.join( (sql_init, sql_addi) ) ) +#--- diff --git a/tests/functional/gtcs/test_ref_integ_inactive_pk_index.py b/tests/functional/gtcs/test_ref_integ_inactive_pk_index.py index 5d8b8206..85f1f75d 100644 --- a/tests/functional/gtcs/test_ref_integ_inactive_pk_index.py +++ b/tests/functional/gtcs/test_ref_integ_inactive_pk_index.py @@ -1,66 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.ref_integ_inactive_pk_index -# title: GTCS/tests/REF_INT.7.ISQL ; ref-integ-inactive-pk-index. Index that is used for PRIMARY KEY should not be avail for INACTIVE. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/REF_INT.7.ISQL.script -# Checked on: 4.0.0.1806 SS; 3.0.6.33272 CS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 3.0 -# qmid: None + +""" +ID: gtcs.ref_integ_inactive_pk_index +TITLE: Index that is used for PRIMARY KEY should not be avail for INACTIVE +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/REF_INT.7.ISQL.script +FBTEST: functional.gtcs.ref_integ_inactive_pk_index +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = python_act('db') -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import subprocess -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# db_conn.close() -# -# with open( os.path.join(context['files_location'],'gtcs-ref-integ.sql'), 'r') as f: -# sql_init = f.read() -# -# sql_addi=''' -# alter index dept_key inactive; -# commit; -# -- Check that PK index still in use: following must FAIL: -# insert into department( dept_no, dept_name) values (1, 'k1'); -# -# -- Check that it is ALLOWED to insert record into child table (employee) -# -- if value of dept_no exists in the parent table (department) -# -- QUOTE FROM SOURCE TEST: -# -- "... attempts to insert valid records into another table connected -# -- to this table by foreign key constraint. The current behaviour is -# -- that the insertion of valid records fails because of the index being -# -- inactivated in the other connected table (bug 7517)" -# set count on; -# insert into employee values (11, 'e11', 1); -- ==> Records affected: 1 -# ''' -# -# runProgram('isql', [ dsn], os.linesep.join( (sql_init, sql_addi) ) ) -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stderr_1 = """ +expected_stderr = """ Statement failed, SQLSTATE = 27000 unsuccessful metadata update -ALTER INDEX DEPT_KEY failed @@ -72,13 +28,46 @@ expected_stderr_1 = """ -Problematic key value is ("DEPT_NO" = '1') """ -expected_stdout_1 = """ +expected_stdout = """ Records affected: 1 """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=3.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# test_script_1 +#--- +# +# import os +# import sys +# import subprocess +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# db_conn.close() +# +# with open( os.path.join(context['files_location'],'gtcs-ref-integ.sql'), 'r') as f: +# sql_init = f.read() +# +# sql_addi=''' +# alter index dept_key inactive; +# commit; +# -- Check that PK index still in use: following must FAIL: +# insert into department( dept_no, dept_name) values (1, 'k1'); +# +# -- Check that it is ALLOWED to insert record into child table (employee) +# -- if value of dept_no exists in the parent table (department) +# -- QUOTE FROM SOURCE TEST: +# -- "... attempts to insert valid records into another table connected +# -- to this table by foreign key constraint. The current behaviour is +# -- that the insertion of valid records fails because of the index being +# -- inactivated in the other connected table (bug 7517)" +# set count on; +# insert into employee values (11, 'e11', 1); -- ==> Records affected: 1 +# ''' +# +# runProgram('isql', [ dsn], os.linesep.join( (sql_init, sql_addi) ) ) +#--- diff --git a/tests/functional/gtcs/test_ref_integ_inactive_pk_index_2.py b/tests/functional/gtcs/test_ref_integ_inactive_pk_index_2.py index b44b68d4..b984ef7b 100644 --- a/tests/functional/gtcs/test_ref_integ_inactive_pk_index_2.py +++ b/tests/functional/gtcs/test_ref_integ_inactive_pk_index_2.py @@ -1,66 +1,30 @@ #coding:utf-8 -# -# id: functional.gtcs.ref_integ_inactive_pk_index_2 -# title: GTCS/tests/REF_INT.6.ISQL ; ref-integ-inactive-pk-index-2. Index that is used for PRIMARY KEY should not be avail for INACTIVE. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/REF_INT.7.ISQL.script -# Checked on: 4.0.0.1806 SS; 3.0.6.33272 CS; 2.5.9.27149 SC. -# -# NOTE on difference from GTCS/tests/REF_INT.7.ISQL: -# we attampt to insert into child table (employee) record which VIOLATES ref. integrity. -# See quote from source test: -# ==== -# attempts to insert records into another table in violation of the referential -# integrity constraint. The current behaviour is that even though the -# unique index has been inactivated, the insertion fails because of referential -# integrity violation.. (bug 7517) -# ==== -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 3.0 -# qmid: None + +""" +ID: gtcs.ref_integ_inactive_pk_index_2 +TITLE: Index that is used for PRIMARY KEY should not be avail for INACTIVE +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/REF_INT.7.ISQL.script + + NOTE on difference from GTCS/tests/REF_INT.7.ISQL: + we attampt to insert into child table (employee) record which VIOLATES ref. integrity. + See quote from source test: + attempts to insert records into another table in violation of the referential + integrity constraint. The current behaviour is that even though the + unique index has been inactivated, the insertion fails because of referential + integrity violation.. (bug 7517) +FBTEST: functional.gtcs.ref_integ_inactive_pk_index_2 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = python_act('db') -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import subprocess -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# db_conn.close() -# -# with open( os.path.join(context['files_location'],'gtcs-ref-integ.sql'), 'r') as f: -# sql_init = f.read() -# -# sql_addi=''' -# alter index dept_key inactive; -# commit; -# set count on; -# insert into employee values (11, 'e11', -1); -- ==> Records affected: 0 -# ''' -# -# runProgram('isql', [ dsn], os.linesep.join( (sql_init, sql_addi) ) ) -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stderr_1 = """ +expected_stderr = """ Statement failed, SQLSTATE = 27000 unsuccessful metadata update -ALTER INDEX DEPT_KEY failed @@ -73,13 +37,36 @@ expected_stderr_1 = """ -Problematic key value is ("DEPT_NO" = '-1') """ -expected_stdout_1 = """ +expected_stdout = """ Records affected: 0 """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=3.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# test_script_1 +#--- +# +# import os +# import sys +# import subprocess +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# db_conn.close() +# +# with open( os.path.join(context['files_location'],'gtcs-ref-integ.sql'), 'r') as f: +# sql_init = f.read() +# +# sql_addi=''' +# alter index dept_key inactive; +# commit; +# set count on; +# insert into employee values (11, 'e11', -1); -- ==> Records affected: 0 +# ''' +# +# runProgram('isql', [ dsn], os.linesep.join( (sql_init, sql_addi) ) ) +#--- diff --git a/tests/functional/gtcs/test_regexp_similar_to.py b/tests/functional/gtcs/test_regexp_similar_to.py index 7014a60b..3521a44c 100644 --- a/tests/functional/gtcs/test_regexp_similar_to.py +++ b/tests/functional/gtcs/test_regexp_similar_to.py @@ -1,50 +1,40 @@ #coding:utf-8 -# -# id: functional.gtcs.regexp_similar_to -# title: GTCS/tests/FB_SQL_REGEX_1, statements with SIMILAR TO. Miscelaneous tests. -# decription: -# Test creates table and fills it with unicode data to be checked (field 'str'), -# pattern for right part of SIMILAR TO expression and expected result. -# -# Then data will be verified against pattern twise: -# * without casting them to UTF8 charset; -# * with such casting. -# Checked on: 4.0.0.1789 -# -# ::: NOTE ::: -# Test parameter 'database_character_set' must be SKIPPED here! -# Comparison of non-ascii diacritical character can bring surprising result if we skip preliminary -# casting of characters to UTF8. -# For example consider character 'á' (small A with Acute, https://www.compart.com/en/unicode/U+00E1). -# 1) If we do not specify charset: -# select 'á' similar to '_' from rdb$database -- then this expression returns FALSE; -# (this is because here SIMILAR_TO works using BYTE-basis, and 'á' has 2 bytes and this don't match '_'). -# 2) but if we do this: -# select _utf8 'á' similar to '_' as test_1 from rdb$database -- then result will be TRUE. -# 3) all tests here do NOT check results of substring( similar to , -# see separate test for this: regexp-substring-similar_to.fbt -# -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/FB_SQL_REGEX_1.output -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: gtcs.regexp-similar-to +TITLE: Statements with SIMILAR TO - miscelaneous tests. +DESCRIPTION: + Test creates table and fills it with unicode data to be checked (field 'str'), + pattern for right part of SIMILAR TO expression and expected result. + + Then data will be verified against pattern twise: + * without casting them to UTF8 charset; + * with such casting. + Checked on: 4.0.0.1789 + + Test parameter 'database_character_set' must be SKIPPED here! + Comparison of non-ascii diacritical character can bring surprising result if we skip preliminary + casting of characters to UTF8. + For example consider character 'á' (small A with Acute, https://www.compart.com/en/unicode/U+00E1). + 1) If we do not specify charset: + select 'á' similar to '_' from rdb$database -- then this expression returns FALSE; + (this is because here SIMILAR_TO works using BYTE-basis, and 'á' has 2 bytes and this don't match '_'). + 2) but if we do this: + select _utf8 'á' similar to '_' as test_1 from rdb$database -- then result will be TRUE. + 3) all tests here do NOT check results of substring( similar to , + see separate test for this: regexp-substring-similar_to.fbt + + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/FB_SQL_REGEX_1.output +FBTEST: functional.gtcs.regexp_similar_to +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ recreate table tests ( id integer generated by default as identity, str varchar(20), @@ -120,139 +110,138 @@ test_script_1 = """ set heading off; select 'without_cast' as msg, str, pattern, iif(str similar to pattern escape '\\', 1, 0) from tests order by id; - + select 'with_utf8_cast' as msg, str, pattern, iif(cast(str as varchar(20) character set utf8) similar to cast(pattern as varchar(20) character set utf8) escape '\\', 1, 0) from tests order by id; - + """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' ')]) -expected_stdout_1 = """ -without_cast aa (aa){1} 1 -without_cast aa (a){1} 0 -without_cast a (a){1} 1 -without_cast A (a){1} 0 -without_cast á (a){1} 0 -without_cast Á (ã){1} 0 -without_cast aa a{1} 0 -without_cast (1|2){0,} 1 -without_cast (1|2){1,} 0 -without_cast 1 (1|2){0,} 1 -without_cast 1 (1|2){0,1} 1 -without_cast 1 (1|2){1} 1 -without_cast 12 (1|1[2]){1} 1 -without_cast 1212 (1|1[2]){3,5} 0 -without_cast 121212 (1|1[2]){3,5} 1 -without_cast 12121212 (1|1[2]){3,5} 1 -without_cast 1212121212 (1|1[2]){3,5} 1 -without_cast 121212121212 (1|1[2]){3,5} 0 -without_cast á _ 0 -without_cast 1 [1-53-7] 1 +expected_stdout = """ +without_cast aa (aa){1} 1 +without_cast aa (a){1} 0 +without_cast a (a){1} 1 +without_cast A (a){1} 0 +without_cast á (a){1} 0 +without_cast Á (ã){1} 0 +without_cast aa a{1} 0 +without_cast (1|2){0,} 1 +without_cast (1|2){1,} 0 +without_cast 1 (1|2){0,} 1 +without_cast 1 (1|2){0,1} 1 +without_cast 1 (1|2){1} 1 +without_cast 12 (1|1[2]){1} 1 +without_cast 1212 (1|1[2]){3,5} 0 +without_cast 121212 (1|1[2]){3,5} 1 +without_cast 12121212 (1|1[2]){3,5} 1 +without_cast 1212121212 (1|1[2]){3,5} 1 +without_cast 121212121212 (1|1[2]){3,5} 0 +without_cast á _ 0 +without_cast 1 [1-53-7] 1 -without_cast 2 [1-53-7] 1 -without_cast 4 [1-53-7] 1 -without_cast 6 [1-53-7] 1 -without_cast 8 [1-53-7] 0 -without_cast 1 [1-53-78-0] 1 -without_cast 2 [1-53-78-0] 1 -without_cast 4 [1-53-78-0] 1 -without_cast 6 [1-53-78-0] 1 -without_cast 8 [1-53-78-0] 0 -without_cast 0 [8-0] 0 -without_cast 1 [8-0] 0 -without_cast 8 [8-0] 0 -without_cast 9 [8-0] 0 -without_cast 0 [8-09-0] 0 -without_cast 1 [8-09-0] 0 -without_cast 8 [8-09-0] 0 -without_cast 9 [8-09-0] 0 -without_cast 1 [1-53-7^4] 1 -without_cast 2 [1-53-7^4] 1 -without_cast 4 [1-53-7^4] 0 +without_cast 2 [1-53-7] 1 +without_cast 4 [1-53-7] 1 +without_cast 6 [1-53-7] 1 +without_cast 8 [1-53-7] 0 +without_cast 1 [1-53-78-0] 1 +without_cast 2 [1-53-78-0] 1 +without_cast 4 [1-53-78-0] 1 +without_cast 6 [1-53-78-0] 1 +without_cast 8 [1-53-78-0] 0 +without_cast 0 [8-0] 0 +without_cast 1 [8-0] 0 +without_cast 8 [8-0] 0 +without_cast 9 [8-0] 0 +without_cast 0 [8-09-0] 0 +without_cast 1 [8-09-0] 0 +without_cast 8 [8-09-0] 0 +without_cast 9 [8-09-0] 0 +without_cast 1 [1-53-7^4] 1 +without_cast 2 [1-53-7^4] 1 +without_cast 4 [1-53-7^4] 0 -without_cast 6 [1-53-7^4] 1 -without_cast 8 [1-53-7^4] 0 -without_cast 1 [1-53-7^2-5] 1 -without_cast 2 [1-53-7^2-5] 0 -without_cast 4 [1-53-7^2-5] 0 -without_cast 6 [1-53-7^2-5] 1 -without_cast 8 [1-53-7^2-5] 0 -without_cast 1 [1-53-7^2-53-6] 1 -without_cast 2 [1-53-7^2-53-6] 0 -without_cast 4 [1-53-7^2-53-6] 0 -without_cast 6 [1-53-7^2-53-6] 0 -without_cast 8 [1-53-7^2-53-6] 0 -without_cast 1 [1-53-7^5-2] 1 -without_cast 2 [1-53-7^5-2] 1 -without_cast 4 [1-53-7^5-2] 1 -without_cast 6 [1-53-7^5-2] 1 -without_cast 8 [1-53-7^5-2] 0 +without_cast 6 [1-53-7^4] 1 +without_cast 8 [1-53-7^4] 0 +without_cast 1 [1-53-7^2-5] 1 +without_cast 2 [1-53-7^2-5] 0 +without_cast 4 [1-53-7^2-5] 0 +without_cast 6 [1-53-7^2-5] 1 +without_cast 8 [1-53-7^2-5] 0 +without_cast 1 [1-53-7^2-53-6] 1 +without_cast 2 [1-53-7^2-53-6] 0 +without_cast 4 [1-53-7^2-53-6] 0 +without_cast 6 [1-53-7^2-53-6] 0 +without_cast 8 [1-53-7^2-53-6] 0 +without_cast 1 [1-53-7^5-2] 1 +without_cast 2 [1-53-7^5-2] 1 +without_cast 4 [1-53-7^5-2] 1 +without_cast 6 [1-53-7^5-2] 1 +without_cast 8 [1-53-7^5-2] 0 -with_utf8_cast aa (aa){1} 1 -with_utf8_cast aa (a){1} 0 -with_utf8_cast a (a){1} 1 -with_utf8_cast A (a){1} 0 -with_utf8_cast á (a){1} 0 -with_utf8_cast Á (ã){1} 0 -with_utf8_cast aa a{1} 0 -with_utf8_cast (1|2){0,} 1 -with_utf8_cast (1|2){1,} 0 -with_utf8_cast 1 (1|2){0,} 1 -with_utf8_cast 1 (1|2){0,1} 1 -with_utf8_cast 1 (1|2){1} 1 -with_utf8_cast 12 (1|1[2]){1} 1 -with_utf8_cast 1212 (1|1[2]){3,5} 0 -with_utf8_cast 121212 (1|1[2]){3,5} 1 -with_utf8_cast 12121212 (1|1[2]){3,5} 1 -with_utf8_cast 1212121212 (1|1[2]){3,5} 1 -with_utf8_cast 121212121212 (1|1[2]){3,5} 0 -with_utf8_cast á _ 1 -with_utf8_cast 1 [1-53-7] 1 +with_utf8_cast aa (aa){1} 1 +with_utf8_cast aa (a){1} 0 +with_utf8_cast a (a){1} 1 +with_utf8_cast A (a){1} 0 +with_utf8_cast á (a){1} 0 +with_utf8_cast Á (ã){1} 0 +with_utf8_cast aa a{1} 0 +with_utf8_cast (1|2){0,} 1 +with_utf8_cast (1|2){1,} 0 +with_utf8_cast 1 (1|2){0,} 1 +with_utf8_cast 1 (1|2){0,1} 1 +with_utf8_cast 1 (1|2){1} 1 +with_utf8_cast 12 (1|1[2]){1} 1 +with_utf8_cast 1212 (1|1[2]){3,5} 0 +with_utf8_cast 121212 (1|1[2]){3,5} 1 +with_utf8_cast 12121212 (1|1[2]){3,5} 1 +with_utf8_cast 1212121212 (1|1[2]){3,5} 1 +with_utf8_cast 121212121212 (1|1[2]){3,5} 0 +with_utf8_cast á _ 1 +with_utf8_cast 1 [1-53-7] 1 -with_utf8_cast 2 [1-53-7] 1 -with_utf8_cast 4 [1-53-7] 1 -with_utf8_cast 6 [1-53-7] 1 -with_utf8_cast 8 [1-53-7] 0 -with_utf8_cast 1 [1-53-78-0] 1 -with_utf8_cast 2 [1-53-78-0] 1 -with_utf8_cast 4 [1-53-78-0] 1 -with_utf8_cast 6 [1-53-78-0] 1 -with_utf8_cast 8 [1-53-78-0] 0 -with_utf8_cast 0 [8-0] 0 -with_utf8_cast 1 [8-0] 0 -with_utf8_cast 8 [8-0] 0 -with_utf8_cast 9 [8-0] 0 -with_utf8_cast 0 [8-09-0] 0 -with_utf8_cast 1 [8-09-0] 0 -with_utf8_cast 8 [8-09-0] 0 -with_utf8_cast 9 [8-09-0] 0 -with_utf8_cast 1 [1-53-7^4] 1 -with_utf8_cast 2 [1-53-7^4] 1 -with_utf8_cast 4 [1-53-7^4] 0 +with_utf8_cast 2 [1-53-7] 1 +with_utf8_cast 4 [1-53-7] 1 +with_utf8_cast 6 [1-53-7] 1 +with_utf8_cast 8 [1-53-7] 0 +with_utf8_cast 1 [1-53-78-0] 1 +with_utf8_cast 2 [1-53-78-0] 1 +with_utf8_cast 4 [1-53-78-0] 1 +with_utf8_cast 6 [1-53-78-0] 1 +with_utf8_cast 8 [1-53-78-0] 0 +with_utf8_cast 0 [8-0] 0 +with_utf8_cast 1 [8-0] 0 +with_utf8_cast 8 [8-0] 0 +with_utf8_cast 9 [8-0] 0 +with_utf8_cast 0 [8-09-0] 0 +with_utf8_cast 1 [8-09-0] 0 +with_utf8_cast 8 [8-09-0] 0 +with_utf8_cast 9 [8-09-0] 0 +with_utf8_cast 1 [1-53-7^4] 1 +with_utf8_cast 2 [1-53-7^4] 1 +with_utf8_cast 4 [1-53-7^4] 0 -with_utf8_cast 6 [1-53-7^4] 1 -with_utf8_cast 8 [1-53-7^4] 0 -with_utf8_cast 1 [1-53-7^2-5] 1 -with_utf8_cast 2 [1-53-7^2-5] 0 -with_utf8_cast 4 [1-53-7^2-5] 0 -with_utf8_cast 6 [1-53-7^2-5] 1 -with_utf8_cast 8 [1-53-7^2-5] 0 -with_utf8_cast 1 [1-53-7^2-53-6] 1 -with_utf8_cast 2 [1-53-7^2-53-6] 0 -with_utf8_cast 4 [1-53-7^2-53-6] 0 -with_utf8_cast 6 [1-53-7^2-53-6] 0 -with_utf8_cast 8 [1-53-7^2-53-6] 0 -with_utf8_cast 1 [1-53-7^5-2] 1 -with_utf8_cast 2 [1-53-7^5-2] 1 -with_utf8_cast 4 [1-53-7^5-2] 1 -with_utf8_cast 6 [1-53-7^5-2] 1 -with_utf8_cast 8 [1-53-7^5-2] 0 +with_utf8_cast 6 [1-53-7^4] 1 +with_utf8_cast 8 [1-53-7^4] 0 +with_utf8_cast 1 [1-53-7^2-5] 1 +with_utf8_cast 2 [1-53-7^2-5] 0 +with_utf8_cast 4 [1-53-7^2-5] 0 +with_utf8_cast 6 [1-53-7^2-5] 1 +with_utf8_cast 8 [1-53-7^2-5] 0 +with_utf8_cast 1 [1-53-7^2-53-6] 1 +with_utf8_cast 2 [1-53-7^2-53-6] 0 +with_utf8_cast 4 [1-53-7^2-53-6] 0 +with_utf8_cast 6 [1-53-7^2-53-6] 0 +with_utf8_cast 8 [1-53-7^2-53-6] 0 +with_utf8_cast 1 [1-53-7^5-2] 1 +with_utf8_cast 2 [1-53-7^5-2] 1 +with_utf8_cast 4 [1-53-7^5-2] 1 +with_utf8_cast 6 [1-53-7^5-2] 1 +with_utf8_cast 8 [1-53-7^5-2] 0 """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_regexp_substring_similar_to.py b/tests/functional/gtcs/test_regexp_substring_similar_to.py index 7997b0d2..01ef9280 100644 --- a/tests/functional/gtcs/test_regexp_substring_similar_to.py +++ b/tests/functional/gtcs/test_regexp_substring_similar_to.py @@ -1,35 +1,24 @@ #coding:utf-8 -# -# id: functional.gtcs.regexp_substring_similar_to -# title: GTCS/tests/FB_SQL_REGEX_1 SUBSTRING ; Miscelaneous tests of SIMILAR TO . -# decription: -# Test creates table and fills it with unicode data to be checked (field 'str'), pattern and -# performs output of SUBSTRING( SIMILAR ). -# Also, some additional examples presents for other checks. -# Checked on: 4.0.0.1789 -# -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/FB_SQL_REGEX_1.script -# -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: gtcs.regexp-substring-similar-to +TITLE: SUBSTRING - miscelaneous tests of SIMILAR TO +DESCRIPTION: + Test creates table and fills it with unicode data to be checked (field 'str'), pattern and + performs output of SUBSTRING( SIMILAR ). + Also, some additional examples presents for other checks. + + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/FB_SQL_REGEX_1.script +FBTEST: functional.gtcs.regexp_substring_similar_to +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ recreate table tests ( id integer generated by default as identity, str varchar(20), @@ -46,7 +35,7 @@ test_script_1 = """ insert into tests(id, str, pattern, expected) values ( gen_id(g,1), 'abc123abc456', '\\"%\\"abc%7', null); commit; select id, substring(str similar pattern escape '\\'), expected from tests; - + select gen_id(g,1), substring('(12) 3456-7890' similar '\\(__\\) \\"%\\-%\\"' escape '\\') from rdb$database; -- 3.0.6: invalid pattern select gen_id(g,1), substring('abc123abc456' similar '\\"%\\"abc%6' escape '\\') from rdb$database; select gen_id(g,1), substring('abc123abc456' similar '\\"%\\"abc%7' escape '\\') from rdb$database; @@ -60,30 +49,29 @@ test_script_1 = """ select gen_id(g,1), cast(substring(cast(_utf8 'aaaЫxЫЫccc' as varchar(10) character set win1251) similar '%aaa#"%#"ccc%' escape '#') as varchar(10) character set utf8) from rdb$database; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' ')]) + +expected_stdout = """ + 1 3456-7890 3456-7890 + 2 + 3 abc123 abc123 + 4 + + 5 3456-7890 + 6 abc123 + 7 + + 8 12.34 + 9 5 + 10 + 11 ЫxЫЫ + 12 ЫxЫЫ + 13 ЫxЫЫ -expected_stdout_1 = """ - 1 3456-7890 3456-7890 - 2 - 3 abc123 abc123 - 4 - - 5 3456-7890 - 6 abc123 - 7 - - 8 12.34 - 9 5 - 10 - 11 ЫxЫЫ - 12 ЫxЫЫ - 13 ЫxЫЫ - """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_sql_join_03.py b/tests/functional/gtcs/test_sql_join_03.py index edfffb34..cfb0ab50 100644 --- a/tests/functional/gtcs/test_sql_join_03.py +++ b/tests/functional/gtcs/test_sql_join_03.py @@ -1,32 +1,22 @@ #coding:utf-8 -# -# id: functional.gtcs.sql_join_03 -# title: GTCS/tests/C_SQL_JOIN_3. Ability to run query: ( A LEFT JOIN B ) INER JOIN C, plus ORDER BY with fields not from SELECT list. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/C_SQL_JOIN_3.script -# Original backup file that is used for this test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/test-files/atlas.gbk -# Checked on 4.0.0.1896; 3.0.6.33288; 2.5.9.27149 -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.sql-join-03 +TITLE: Ability to run query: ( A LEFT JOIN B ) INER JOIN C, plus ORDER BY with fields not from SELECT list +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/C_SQL_JOIN_3.script + Original backup file that is used for this test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/test-files/atlas.gbk +FBTEST: functional.gtcs.sql_join_03 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(from_backup='gtcs_atlas.fbk') -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='gtcs_atlas.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ select 'DSQL-test' as msg, b.team_name, c.city, s.state_name from ( @@ -68,9 +58,9 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ MSG TEAM_NAME CITY STATE_NAME DSQL-test Astros Houston Texas DSQL-test Braves Atlanta Georgia @@ -112,9 +102,8 @@ expected_stdout_1 = """ PSQL-test Yankees New York New York """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_sql_join_04.py b/tests/functional/gtcs/test_sql_join_04.py index e663ae5f..8b04570f 100644 --- a/tests/functional/gtcs/test_sql_join_04.py +++ b/tests/functional/gtcs/test_sql_join_04.py @@ -1,37 +1,26 @@ #coding:utf-8 -# -# id: functional.gtcs.sql_join_04 -# title: GTCS/tests/C_SQL_JOIN_4. Ability to run query: ( A LEFT JOIN B ) , C. Then add ORDER BY with fields not from SELECT list. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/C_SQL_JOIN_3.script -# Original backup file that is used for this test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/test-files/atlas.gbk -# -# NOTE: cross join here can be specified in IMPLICIT FORM, using ",' instead of "cross join" clause. -# Though this leads to very poor readability, this form is still allowed. -# Because of WHERE-clause, resultset must be the same as it was INNER join here. -# -# Checked on 4.0.0.1896; 3.0.6.33288; 2.5.9.27149 -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.sql-join-04 +TITLE: Ability to run query: ( A LEFT JOIN B ) , C. Then add ORDER BY with fields not from SELECT list +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/C_SQL_JOIN_3.script + Original backup file that is used for this test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/test-files/atlas.gbk + + NOTE: cross join here can be specified in IMPLICIT FORM, using ",' instead of "cross join" clause. + Though this leads to very poor readability, this form is still allowed. + Because of WHERE-clause, resultset must be the same as it was INNER join here. +FBTEST: functional.gtcs.sql_join_04 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(from_backup='gtcs_atlas.fbk') -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='gtcs_atlas.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ select 'DSQL-test' as msg, b.team_name, c.city, s.state_name from ( @@ -73,9 +62,9 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ MSG TEAM_NAME CITY STATE_NAME DSQL-test Astros Houston Texas DSQL-test Braves Atlanta Georgia @@ -117,9 +106,8 @@ expected_stdout_1 = """ PSQL-test Yankees New York New York """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/gtcs/test_time_zone.py b/tests/functional/gtcs/test_time_zone.py index e5f3979d..c5736d0c 100644 --- a/tests/functional/gtcs/test_time_zone.py +++ b/tests/functional/gtcs/test_time_zone.py @@ -1,48 +1,38 @@ #coding:utf-8 -# -# id: functional.gtcs.time_zone -# title: GTCS/tests/FB_SQL_TIME_ZONE. Miscelaneous tests. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/FB_SQL_TIME_ZONE.script -# Checked on 4.0.0.1931. -# 05.05.2020: added block for CORE-6271 (from GTCS). Checked on 4.0.0.1954. -# -# 28.10.2020 -# Old code was completely replaced by source from GTCS. -# It was changed to meet new requirements to format of timezone offset: -# 1) it must include SIGN, i.e. + or -; -# 2) in must contain BOTH hours and minutes delimited by colon. -# -# This means that following (old) statements will fail with SQLSTATE = 22009: -# * set time zone '00:00' -# Invalid time zone region: 00:00 -# (because of missed sign "+") -# -# * ... datediff(hour from timestamp '... -03' to timestamp '... -03') -# Invalid time zone offset: -03 - must use format +/-hours:minutes and be between -14:00 and +14:00 -# -# See: https://github.com/FirebirdSQL/firebird/commit/ff37d445ce844f991242b1e2c1f96b80a5d1636d -# Checked on 4.0.0.2238 -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: gtcs.time-zone +TITLE: Miscelaneous time zone tests +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/FB_SQL_TIME_ZONE.script + Checked on 4.0.0.1931. +NOTES: +[05.05.2020] added block for CORE-6271 (from GTCS). Checked on 4.0.0.1954. +[28.10.2020] + Old code was completely replaced by source from GTCS. + It was changed to meet new requirements to format of timezone offset: + 1) it must include SIGN, i.e. + or -; + 2) in must contain BOTH hours and minutes delimited by colon. + + This means that following (old) statements will fail with SQLSTATE = 22009: + * set time zone '00:00' + Invalid time zone region: 00:00 + (because of missed sign "+") + + * ... datediff(hour from timestamp '... -03' to timestamp '... -03') + Invalid time zone offset: -03 - must use format +/-hours:minutes and be between -14:00 and +14:00 + + See: https://github.com/FirebirdSQL/firebird/commit/ff37d445ce844f991242b1e2c1f96b80a5d1636d +FBTEST: functional.gtcs.time_zone +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; set time zone '+00:00'; @@ -173,7 +163,7 @@ test_script_1 = """ --- - /* + /* 28.10.2020: ... datediff(hour from timestamp '... -03' to timestamp '... -03') Statement failed, SQLSTATE = 22009 @@ -687,9 +677,9 @@ test_script_1 = """ set term ;^ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ CAST 01:23:45.0000 +00:00 CAST 2018-01-01 01:23:45.0000 +00:00 EXTRACT 0 @@ -1292,7 +1282,8 @@ expected_stdout_1 = """ END_TZH -8 END_TZM 0 """ -expected_stderr_1 = """ + +expected_stderr = """ Statement failed, SQLSTATE = 22018 conversion error from string "01:23:45.0000 -03:00" Statement failed, SQLSTATE = 22018 @@ -1321,11 +1312,9 @@ expected_stderr_1 = """ """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/gtcs/test_transactions_autocommit_1.py b/tests/functional/gtcs/test_transactions_autocommit_1.py index 20ae0610..a05ebe1b 100644 --- a/tests/functional/gtcs/test_transactions_autocommit_1.py +++ b/tests/functional/gtcs/test_transactions_autocommit_1.py @@ -1,77 +1,72 @@ #coding:utf-8 -# -# id: functional.gtcs.transactions_autocommit_1 -# title: GTCS/tests/AUTO_COMMIT.1.ESQL. AUTO COMMIT must preserve changes that were made by all DML even if ROLLBACK is issued. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/AUTO_COMMIT.1.ESQL.script -# -# Test creates three tables (test_1, test_2 and test_3) and AI-trigger for one of them (test_1). -# This trigger does INSERTs into test_2 and test_3. -# -# Then we add record into test_1 and after this INSERT its trigger add records into test_2 and test_3. -# After this we make transaction ROLLED BACK and check how many records are preserved. -# Expected result: each of three tables must have one record in itself, i.e. result looks like we did COMMIT rather than ROLLBACK. -# -# NB: we use custom TPB with fdb.isc_tpb_autocommit in order to start DML transactions in AUTOCOMMIT=1 mode. -# Checked on: -# 4.0.0.1812 SS: 2.344s. -# 4.0.0.1767 SC: 2.339s. -# 4.0.0.1810 SS: 2.151s. -# 3.0.6.33273 SS: 1.023s. -# 3.0.6.33240 SC: 1.301s. -# 3.0.6.33247 CS: 2.598s. -# 2.5.6.27020 SS: 3.156s. -# 2.5.9.27149 SC: 0.323s. -# 2.5.9.27143 CS: 1.159s. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.transactions-autocommit-01 +TITLE: AUTO COMMIT must preserve changes that were made by all DML even if ROLLBACK is issued +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/AUTO_COMMIT.1.ESQL.script + + Test creates three tables (test_1, test_2 and test_3) and AI-trigger for one of them (test_1). + This trigger does INSERTs into test_2 and test_3. + + Then we add record into test_1 and after this INSERT its trigger add records into test_2 and test_3. + After this we make transaction ROLLED BACK and check how many records are preserved. + Expected result: each of three tables must have one record in itself, i.e. result looks like we did COMMIT rather than ROLLBACK. + + NB: we use custom TPB with fdb.isc_tpb_autocommit in order to start DML transactions in AUTOCOMMIT=1 mode. +FBTEST: functional.gtcs.transactions_autocommit_1 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] +act = python_act('db', substitutions=[('[ \t]+', ' ')]) -init_script_1 = """""" +expected_stdout = """ + mon$auto_commit: 1 + test_1 777 + test_2 5439 + test_3 603729 +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=3') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import sys # import subprocess # import inspect # import time -# +# # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password # db_conn.close() -# +# # #-------------------------------------------- -# +# # def flush_and_close( file_handle ): # # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and +# # If you're starting with a Python file object f, +# # first do f.flush(), and # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # global os -# +# # file_handle.flush() # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # # otherwise: "OSError: [Errno 9] Bad file descriptor"! # os.fsync(file_handle.fileno()) # file_handle.close() -# +# # #-------------------------------------------- -# +# # def cleanup( f_names_list ): # global os # for f in f_names_list: @@ -82,12 +77,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # else: # print('Unrecognized type of element:', f, ' - can not be treated as file.') # del_name = None -# +# # if del_name and os.path.isfile( del_name ): # os.remove( del_name ) -# +# # #-------------------------------------------- -# +# # sql_init=''' # set bail on; # recreate table test_1 (x integer); @@ -103,69 +98,54 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # set term ;^ # commit; # ''' -# +# # f_init_sql = open( os.path.join(context['temp_directory'],'tmp_gtcs_tx_ac2.sql'), 'w', buffering = 0) # f_init_sql.write( sql_init ) # flush_and_close( f_init_sql ) -# +# # f_init_log = open( '.'.join( (os.path.splitext( f_init_sql.name )[0], 'log') ), 'w', buffering = 0) # f_init_err = open( '.'.join( (os.path.splitext( f_init_sql.name )[0], 'err') ), 'w', buffering = 0) -# +# # subprocess.call( [ context['isql_path'], dsn, '-q', '-i', f_init_sql.name ], stdout = f_init_log, stderr = f_init_err) -# +# # flush_and_close( f_init_log ) # flush_and_close( f_init_err ) -# +# # CUSTOM_TX_PARAMS = ( [ fdb.isc_tpb_nowait, fdb.isc_tpb_autocommit ] ) -# +# # con1 = fdb.connect( dsn = dsn ) # tra1 = con1.trans( default_tpb = CUSTOM_TX_PARAMS ) -# +# # tra1.begin() # cur1=tra1.cursor() -# +# # cur1.execute('select mon$auto_commit from mon$transactions where mon$transaction_id = current_transaction') # for r in cur1: # print( 'mon$auto_commit:', r[0] ) -# +# # cur1.execute( 'insert into test_1 values(?)', ( 777,) ) -# +# # #---------------------- R O L L B A C K ----------------------- -# +# # tra1.rollback() # cur1.close() # con1.close() -# +# # #---------------------- R E C O N N E C T ----------------------- -# +# # con2 = fdb.connect( dsn = dsn ) # cur2=con2.cursor() # cur2.execute("select 'test_1' tab_name, x from test_1 union all select 'test_2', x from test_2 union all select 'test_3', x from test_3") # # Here we must see records from ALL THREE tables. # for r in cur2: # print( r[0], r[1] ) -# +# # cur2.close() # con2.close() -# +# # # cleanup: # ########## # time.sleep(1) # cleanup( ( f_init_sql, f_init_log, f_init_err) ) -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - mon$auto_commit: 1 - test_1 777 - test_2 5439 - test_3 603729 -""" - -@pytest.mark.version('>=2.5') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/gtcs/test_transactions_autocommit_2.py b/tests/functional/gtcs/test_transactions_autocommit_2.py index 6687219b..094b649f 100644 --- a/tests/functional/gtcs/test_transactions_autocommit_2.py +++ b/tests/functional/gtcs/test_transactions_autocommit_2.py @@ -1,80 +1,74 @@ #coding:utf-8 -# -# id: functional.gtcs.transactions_autocommit_2 -# title: GTCS/tests/AUTO_COMMIT.2.ESQL. Changes within AUTO COMMIT must be cancelled when exception raises in some TRIGGER. -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/AUTO_COMMIT.2.ESQL.script -# -# Test creates three tables (test_1, test_2 and test_3) and AI-trigger for one of them (test_1). -# This trigger does INSERTs into test_2 and test_3. -# For test_3 we create UNIQUE index that will prevent from insertion of duplicates. -# Then we add one record into test_3 with value = 1000. -# Finally, we try to add record into test_1 and after this INSERT its trigger attempts to add records, -# into test_2 and test_3. The latter will fail because of UK violation (we try to insert apropriate value -# into test-1 in order this exception be raised). -# Expected result: NONE of just performed INSERTS must be saved in DB. The only existing record must be -# in the table test_3 that we added there on initial phase. -# -# NB: we use custom TPB with fdb.isc_tpb_autocommit in order to start DML transactions in AUTOCOMMIT=1 mode. -# Checked on: -# 4.0.0.1812 SS: 2.054s. -# 4.0.0.1767 SC: 1.893s. -# 4.0.0.1810 SS: 1.922s. -# 3.0.6.33273 SS: 0.973s. -# 3.0.6.33240 SC: 1.082s. -# 3.0.6.33247 CS: 2.120s. -# 2.5.6.27020 SS: 2.612s. -# 2.5.9.27149 SC: 0.453s. -# 2.5.9.27143 CS: 0.963s. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.transactions-autocommit-02 +TITLE: Changes within AUTO COMMIT must be cancelled when exception raises in some TRIGGER +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/AUTO_COMMIT.2.ESQL.script + + Test creates three tables (test_1, test_2 and test_3) and AI-trigger for one of them (test_1). + This trigger does INSERTs into test_2 and test_3. + For test_3 we create UNIQUE index that will prevent from insertion of duplicates. + Then we add one record into test_3 with value = 1000. + Finally, we try to add record into test_1 and after this INSERT its trigger attempts to add records, + into test_2 and test_3. The latter will fail because of UK violation (we try to insert apropriate value + into test-1 in order this exception be raised). + Expected result: NONE of just performed INSERTS must be saved in DB. The only existing record must be + in the table test_3 that we added there on initial phase. + + NB: we use custom TPB with fdb.isc_tpb_autocommit in order to start DML transactions in AUTOCOMMIT=1 mode. +FBTEST: functional.gtcs.transactions_autocommit_2 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] +act = python_act('db', substitutions=[('[ \t]+', ' ')]) -init_script_1 = """""" +expected_stdout = """ + mon$auto_commit: 1 + exception occured, gdscode: 335544349 + test_3 1000 +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=3') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import sys # import subprocess # import inspect # import time -# +# # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password # db_conn.close() -# +# # #-------------------------------------------- -# +# # def flush_and_close( file_handle ): # # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and +# # If you're starting with a Python file object f, +# # first do f.flush(), and # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # global os -# +# # file_handle.flush() # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # # otherwise: "OSError: [Errno 9] Bad file descriptor"! # os.fsync(file_handle.fileno()) # file_handle.close() -# +# # #-------------------------------------------- -# +# # def cleanup( f_names_list ): # global os # for f in f_names_list: @@ -85,12 +79,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # else: # print('Unrecognized type of element:', f, ' - can not be treated as file.') # del_name = None -# +# # if del_name and os.path.isfile( del_name ): # os.remove( del_name ) -# +# # #-------------------------------------------- -# +# # sql_init=''' # set bail on; # recreate table test_1 (x integer); @@ -105,71 +99,57 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # insert into test_3 values (new.x * 100); # end ^ # set term ;^ -# +# # insert into test_3 values (1000); # commit; # ''' -# +# # f_init_sql = open( os.path.join(context['temp_directory'],'tmp_gtcs_tx_ac2.sql'), 'w', buffering = 0) # f_init_sql.write( sql_init ) # flush_and_close( f_init_sql ) -# +# # f_init_log = open( '.'.join( (os.path.splitext( f_init_sql.name )[0], 'log') ), 'w', buffering = 0) # f_init_err = open( '.'.join( (os.path.splitext( f_init_sql.name )[0], 'err') ), 'w', buffering = 0) -# +# # # This can take about 25-30 seconds: # #################################### # subprocess.call( [ context['isql_path'], dsn, '-q', '-i', f_init_sql.name ], stdout = f_init_log, stderr = f_init_err) -# +# # flush_and_close( f_init_log ) # flush_and_close( f_init_err ) -# +# # #CUSTOM_TX_PARAMS = ( [ fdb.isc_tpb_read_committed, fdb.isc_tpb_no_rec_version, fdb.isc_tpb_nowait, fdb.isc_tpb_autocommit ] ) # CUSTOM_TX_PARAMS = ( [ fdb.isc_tpb_nowait, fdb.isc_tpb_autocommit ] ) -# +# # con = fdb.connect( dsn = dsn ) # tx = con.trans( default_tpb = CUSTOM_TX_PARAMS ) -# +# # tx.begin() # cx=tx.cursor() -# +# # cx.execute('select mon$auto_commit from mon$transactions where mon$transaction_id = current_transaction') # for r in cx: # print( 'mon$auto_commit:', r[0] ) -# +# # try: # cx.execute( 'insert into test_1 values(?)', (10,) ) # this leads to PK/UK violation in the table 'test_3' # except Exception as e: # #print('exception in ', inspect.stack()[0][3], ': ', sys.exc_info()[0]) # print('exception occured, gdscode:', e[2]) -# +# # tx.commit() -# +# # cx.execute("select 'test_1' tab_name, x from test_1 union all select 'test_2', x from test_2 union all select 'test_3', x from test_3") # for r in cx: # print( r[0], r[1] ) -# +# # cx.close() # tx.close() # con.close() -# +# # # cleanup # ######### # time.sleep(1) # cleanup( ( f_init_sql, f_init_log, f_init_err) ) -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - mon$auto_commit: 1 - exception occured, gdscode: 335544349 - test_3 1000 -""" - -@pytest.mark.version('>=2.5') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/gtcs/test_transactions_autocommit_3.py b/tests/functional/gtcs/test_transactions_autocommit_3.py index 631a97af..05b5d61f 100644 --- a/tests/functional/gtcs/test_transactions_autocommit_3.py +++ b/tests/functional/gtcs/test_transactions_autocommit_3.py @@ -1,69 +1,63 @@ #coding:utf-8 -# -# id: functional.gtcs.transactions_autocommit_3 -# title: GTCS/tests/AUTO_COMMIT.3.ESQL. Changes within AUTO COMMIT must be cancelled when exception raises in some PROCEDURE. -# decription: -# Test does the same actions as described in GTCS/tests/AUTO_COMMIT.3.ESQL.script, see: -# https://github.com/FirebirdSQL/fbtcs/commit/166cb8b72a0aad18ef8ece34977d6d87d803616e#diff-69d4c7d7661d57fdf94aaf32a3377c82 -# -# It creates three tables, each with single SMALLINT column (thus max value that we can put in it is 32767). -# Then it creates three procedures which do insert values in 'their' table plus call "next level" SP -# with passing there input value multiplied by 100. -# When sp_ins_1 is called with argument = 3 then sp_ins_2 will insert into test_2 table value = 300 -# and sp_ins_3 will insert into test_3 value = 30000. -# This mean that we can can NOT call sp_ins_1 with values equal or more than 4 because of numeric overflow exception -# that will be raised in sp_ins_3. -# -# Test calls sp_ins1 two times: with arg=3 and arg=4. Second time must fail and we check that all three tables contain only -# values which are from 1st call: 3, 300 and 30000. -# -# NB: we use custom TPB with fdb.isc_tpb_autocommit in order to start DML transactions in AUTOCOMMIT=1 mode. -# -# Checked on: -# 4.0.0.1767 SS: 1.219s. -# 4.0.0.1712 SC: 1.942s. -# 4.0.0.1763 CS: 1.835s. -# 3.0.6.33246 SS: 0.642s. -# 3.0.5.33084 SC: 1.352s. -# 3.0.6.33246 CS: 1.178s. -# 2.5.9.27119 SS: 0.531s. -# 2.5.9.27149 SC: 0.422s. -# 2.5.9.27143 CS: 0.781s. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.transactions-autocommit-03 +TITLE: Changes within AUTO COMMIT must be cancelled when exception raises in some PROCEDURE +DESCRIPTION: + Test does the same actions as described in GTCS/tests/AUTO_COMMIT.3.ESQL.script, see: + https://github.com/FirebirdSQL/fbtcs/commit/166cb8b72a0aad18ef8ece34977d6d87d803616e#diff-69d4c7d7661d57fdf94aaf32a3377c82 + + It creates three tables, each with single SMALLINT column (thus max value that we can put in it is 32767). + Then it creates three procedures which do insert values in 'their' table plus call "next level" SP + with passing there input value multiplied by 100. + When sp_ins_1 is called with argument = 3 then sp_ins_2 will insert into test_2 table value = 300 + and sp_ins_3 will insert into test_3 value = 30000. + This mean that we can can NOT call sp_ins_1 with values equal or more than 4 because of numeric overflow exception + that will be raised in sp_ins_3. + + Test calls sp_ins1 two times: with arg=3 and arg=4. Second time must fail and we check that all three tables contain only + values which are from 1st call: 3, 300 and 30000. + + NB: we use custom TPB with fdb.isc_tpb_autocommit in order to start DML transactions in AUTOCOMMIT=1 mode. +FBTEST: functional.gtcs.transactions_autocommit_3 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [] +act = python_act('db') -init_script_1 = """""" +expected_stdout = """ + mon$auto_commit: 1 + x: 3 + x: 300 + x: 30000 +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=3') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password -# +# # import os # import sys # import inspect # import fdb -# +# # N_MAX=3 -# +# # #CUSTOM_TX_PARAMS = ( [ fdb.isc_tpb_read_committed, fdb.isc_tpb_no_rec_version, fdb.isc_tpb_nowait, fdb.isc_tpb_autocommit ] ) # CUSTOM_TX_PARAMS = ( [ fdb.isc_tpb_nowait, fdb.isc_tpb_autocommit ] ) -# +# # db_conn.begin() # cx=db_conn.cursor() # sql_proc=''' @@ -74,56 +68,41 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # execute statement ( 'execute procedure sp_ins_%(k)s (?)' ) (:a_x * 100); # end # ''' -# +# # for i in range(N_MAX,0,-1): # k = i+1 # cx.execute( 'create table test_%(i)s(x smallint)' % locals() ) # cx.execute( sql_proc % locals() ) -# +# # db_conn.commit() -# +# # tx = db_conn.trans( default_tpb = CUSTOM_TX_PARAMS ) -# +# # tx.begin() # cx=tx.cursor() -# +# # cx.execute('select mon$auto_commit from mon$transactions where mon$transaction_id = current_transaction') # for r in cx: # print( 'mon$auto_commit:', r[0] ) -# +# # cx.callproc( 'sp_ins_1', (3,) ) -# +# # try: # cx.callproc( 'sp_ins_1', (4,) ) # except Exception as e: # pass # #print('Unexpected exception in ', inspect.stack()[0][3], ': ', sys.exc_info()[0]) # #print(e) -# +# # tx.commit() -# +# # cx = db_conn.cursor() # cx.execute('select x from test_1 union all select x from test_2 union all select x from test_3') # for r in cx: # print( 'x:', r[0]) -# +# # cx.close() # tx.close() # db_conn.close() -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - mon$auto_commit: 1 - x: 3 - x: 300 - x: 30000 -""" - -@pytest.mark.version('>=2.5') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/gtcs/test_trigger_variable_assignment.py b/tests/functional/gtcs/test_trigger_variable_assignment.py index 8d6174a3..2b0c2dfe 100644 --- a/tests/functional/gtcs/test_trigger_variable_assignment.py +++ b/tests/functional/gtcs/test_trigger_variable_assignment.py @@ -1,48 +1,35 @@ #coding:utf-8 -# -# id: functional.gtcs.trigger_variable_assignment -# title: GTCS/tests/CF_ISQL_21. Variable in the AFTER-trigger must be allowed for assignment OLD value in it. -# decription: -# ::: NB ::: -# ### Name of original test has no any relation with actual task of this test: ### -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_21.script -# -# AP,2005 - can't assign old.* fields in triggers -# -# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: gtcs.trigger-variable-assignment +TITLE: Variable in the AFTER-trigger must be allowed for assignment OLD value in it +DESCRIPTION: + ::: NB ::: + ### Name of original test has no any relation with actual task of this test: ### + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/CF_ISQL_21.script + + AP,2005 - can't assign old.* fields in triggers +FBTEST: functional.gtcs.trigger_variable_assignment +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ create table u(a int); set term ^; - create trigger trg_u_aid for u after insert or update or delete as + create trigger trg_u_aid for u after insert or update or delete as declare i int; begin - i = old.a; + i = old.a; end^ commit^ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act_1 = isql_act('db', test_script) - -@pytest.mark.version('>=2.5') +@pytest.mark.version('>=3') def test_1(act_1: Action): act_1.execute() - diff --git a/tests/functional/gtcs/test_window_func_01.py b/tests/functional/gtcs/test_window_func_01.py index 61607c21..49642a03 100644 --- a/tests/functional/gtcs/test_window_func_01.py +++ b/tests/functional/gtcs/test_window_func_01.py @@ -1,461 +1,23 @@ #coding:utf-8 -# -# id: functional.gtcs.window_func_01 -# title: GTCS/tests/FB_SQL_WINDOW_FUNC_01 - set of miscelaneous tests for verification of windowed functions. -# decription: -# Statements from this test are added to initial SQL which is stored in: ... -# bt-repo -# iles\\gtcs-window-func.sql -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/FB_SQL_WINDOW_FUNC_01.script -# -# Checked on 4.0.0.1854; 3.0.6.33277 -# -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: gtcs.window-func-01 +TITLE: Set of miscelaneous tests for verification of windowed functions +DESCRIPTION: + Statements from this test are added to initial SQL which is stored in: /files/gtcs-window-func.sql + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/FB_SQL_WINDOW_FUNC_01.script +FBTEST: functional.gtcs.window_func_01 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] +act = python_act('db', substitutions=[('[ \t]+', ' ')]) -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import subprocess -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# db_conn.close() -# -# with open( os.path.join(context['files_location'],'gtcs-window-func.sql'), 'r') as f: -# sql_init = f.read() -# -# sql_addi=''' -# set list on; -# select -# 'point-01' as msg, -# count(*), count(val), min(val), max(val), -# count(distinct val), min(distinct val), max(distinct val) -# from entries; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# -# select -# 'point-02' as msg, -# count(*) over (), count(val) over (), min(val) over (), max(val) over (), -# count(distinct val) over (), min(distinct val) over (), max(distinct val) over (), -# id -# from entries -# order by id; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-03' as msg, -# count(*) over (), count(val) over (), min(val) over (), max(val) over (), -# count(distinct val) over (), min(distinct val) over (), max(distinct val) over (), -# id -# from entries -# where 1 = 0 -# order by id; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-04' as msg, -# count(*), count(val), min(val), max(val), -# count(distinct val), min(distinct val), max(distinct val), -# person -# from entries -# group by person -# order by person; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-05' as msg, -# count(*) over (partition by person), -# count(val) over (partition by person), -# min(val) over (partition by person), -# max(val) over (partition by person), -# count(distinct val) over (partition by person), -# min(distinct val) over (partition by person), -# max(distinct val) over (partition by person), -# person -# from entries -# order by 1, 2, 3, 4, 5, 6, 7, 8, 9; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-06' as msg, -# count(*), -# count(e.val), -# min(e.val), -# max(e.val), -# count(distinct e.val), -# min(distinct e.val), -# max(distinct e.val), -# p.name -# from entries e -# join persons p on p.id = e.person -# group by p.name -# order by p.name; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-07' as msg, -# count(*) over (partition by p.id), -# count(e.val) over (partition by p.id), -# min(e.val) over (partition by p.id), -# max(e.val) over (partition by p.id), -# count(distinct e.val) over (partition by p.id), -# min(distinct e.val) over (partition by p.id), -# max(distinct e.val) over (partition by p.id), -# p.name -# from entries e -# join persons p on p.id = e.person -# order by e.id; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-08' as msg, -# person, -# count(person) over (partition by person) -# from entries -# group by person -# order by person; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-09' as msg, -# person, -# count(*) over (partition by person) -# from entries -# group by person -# order by person; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-10' as msg, -# v1.*, p.id -# from persons p -# join v1 on v1.x8 = p.name; -# -# select -# 'point-11' as msg, -# v1.*, p.id -# from persons p -# full join v1 on right(v1.x8, 1) = p.id; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-12' as msg, -# v1.*, p.id -# from persons p -# left join v1 on right(v1.x8, 1) = p.id -# where p.id in (1, 3); -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-13' as msg, -# x3, sum(x4) -# from v1 -# group by x3; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-14' as msg, -# x3, sum(x4), count(*)over() -# from v1 -# group by x3; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-15' as msg, -# x3, sum(x4), sum(sum(x4))over() -# from v1 -# group by x3; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-17' as msg, -# v2.person, sum(v2.val), count (*) over () -# from v2 -# join persons p -# on p.id = v2.person -# group by person; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-18' as msg, -# v3.person, v3.name, sum(v3.val), count (*) over (), sum(sum(v3.val)) over () -# from v3 -# join persons p -# on p.id = v3.person -# group by person, name; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-19' as msg, -# person, -# count(*) over (), -# count(*) over (partition by person) -# from entries -# order by 1, 2, 3, 4; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-20' as msg, -# person, -# count(*) over (), -# count(*) over (partition by person) -# from entries -# -# UNION ALL -# -# select -# 'point-20' as msg, -# person, -# count(*) over (), -# count(*) over (partition by person) -# from entries -# order by 1, 2, 3, 4; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-21' as msg, -# entries.*, -# count(*) over (partition by person || person) -# from entries -# order by 2; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-22' as msg, -# entries.*, -# count(*) over (), -# count(val) over (), -# count(*) over (partition by person), -# count(val) over (partition by person), -# count(*) over (partition by dat), -# count(val) over (partition by dat) -# from entries -# order by 2; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-23' as msg, -# entries.*, -# count(*) over (), -# count(val) over (), -# count(*) over (partition by person), -# count(val) over (partition by person), -# count(*) over (partition by extract(month from dat)), -# count(val) over (partition by extract(month from dat)) -# from entries -# order by 2; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-24' as msg, -# entries.*, -# min(dat) over (partition by person), -# max(dat) over (partition by person) -# from entries -# order by 2; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select distinct -# 'point-25' as msg, -# person, -# min(dat) over (partition by person), -# max(dat) over (partition by person) -# from entries; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select distinct -# 'point-26' as msg, -# person, -# count(*) over (), -# count(*) over (partition by person) -# from entries -# order by 2; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-27' as msg, -# person, -# count(*), -# count(*) over (), -# count(*) over (partition by person), -# count(*) over (partition by 1, 2, 3) -# from entries -# group by person -# order by 2; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-28' as msg, -# person, -# count(*), -# count(*) over (), -# count(*) over (partition by person), -# count(*) over (partition by 1, 2, 3) -# from entries -# group by person -# order by 2 desc; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select * -# from ( -# select -# 'point-29' as msg, -# person, -# count(*) c1, -# count(*) over () c2, -# count(*) over (partition by person) c3, -# count(*) over (partition by 1, 2, 3) c4 -# from entries -# group by person -# ) -# order by 2 desc; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-30' as msg, -# person, -# count(*), -# count(*) over (), -# count(*) over (partition by person), -# count(*) over (partition by 1, 2, 3) -# from entries -# group by person -# order by 2 desc; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-31' as msg, -# person, -# count(*), -# count(*) over (), -# count(*) over (partition by person), -# count(*) over (partition by 1, 2, 3), -# count(count(*)) over () -# from entries -# group by person -# order by 4 desc, 2 desc; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-32' as msg, -# person, -# count(*), -# count(*) over (), -# count(*) over (partition by person), -# count(*) over (partition by 1, 2, 3), -# count(count(*)) over () -# from entries -# group by person -# having count(*) = 3 -# order by 4 desc, 2 desc; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-33' as msg, -# person, -# sum(val), -# count(*) over (), -# count(*) over (partition by person), -# count(*) over (partition by 1, 2, 3), -# count(count(*)) over () -# from entries -# group by person -# order by 4 desc, 2 desc; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-34' as msg, -# person, -# sum(val), -# count(*) over (), -# count(*) over (partition by person), -# count(*) over (partition by 1, 2, 3), -# count(count(*)) over () -# from entries -# group by person -# having sum(val) between 16 and 26 -# order by 4 desc, 2 desc; -# -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -- Test invalid usages. Following statements must raise error: -# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select -# 'point-35' as msg, -# person, -# sum(val) over (partition by count(*)) -# from entries; -# -# select -# 'point-36' as msg, -# person -# from entries -# where count(*) over () = 1; -# -# select -# 'point-37' as msg, -# person -# from entries -# group by person -# having count(*) over () = 1; -# ''' -# -# runProgram('isql', [ dsn], os.linesep.join( (sql_init, sql_addi) ) ) -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stderr_1 = """ +expected_stderr = """ Statement failed, SQLSTATE = 42000 Dynamic SQL Error -SQL error code = -104 @@ -472,2091 +34,2515 @@ expected_stderr_1 = """ -Cannot use an aggregate or window function in a WHERE clause, use HAVING (for aggregate only) instead """ -expected_stdout_1 = """ - MSG point-01 - COUNT 16 - COUNT 15 - MIN 2.30 - MAX 15.40 - COUNT 10 - MIN 2.30 - MAX 15.40 - MSG point-02 - COUNT 16 - COUNT 15 - MIN 2.30 - MAX 15.40 - COUNT 10 - MIN 2.30 - MAX 15.40 - ID 1 - MSG point-02 - COUNT 16 - COUNT 15 - MIN 2.30 - MAX 15.40 - COUNT 10 - MIN 2.30 - MAX 15.40 - ID 2 - MSG point-02 - COUNT 16 - COUNT 15 - MIN 2.30 - MAX 15.40 - COUNT 10 - MIN 2.30 - MAX 15.40 - ID 3 - MSG point-02 - COUNT 16 - COUNT 15 - MIN 2.30 - MAX 15.40 - COUNT 10 - MIN 2.30 - MAX 15.40 - ID 4 - MSG point-02 - COUNT 16 - COUNT 15 - MIN 2.30 - MAX 15.40 - COUNT 10 - MIN 2.30 - MAX 15.40 - ID 5 - MSG point-02 - COUNT 16 - COUNT 15 - MIN 2.30 - MAX 15.40 - COUNT 10 - MIN 2.30 - MAX 15.40 - ID 6 - MSG point-02 - COUNT 16 - COUNT 15 - MIN 2.30 - MAX 15.40 - COUNT 10 - MIN 2.30 - MAX 15.40 - ID 7 - MSG point-02 - COUNT 16 - COUNT 15 - MIN 2.30 - MAX 15.40 - COUNT 10 - MIN 2.30 - MAX 15.40 - ID 8 - MSG point-02 - COUNT 16 - COUNT 15 - MIN 2.30 - MAX 15.40 - COUNT 10 - MIN 2.30 - MAX 15.40 - ID 9 - MSG point-02 - COUNT 16 - COUNT 15 - MIN 2.30 - MAX 15.40 - COUNT 10 - MIN 2.30 - MAX 15.40 - ID 10 - MSG point-02 - COUNT 16 - COUNT 15 - MIN 2.30 - MAX 15.40 - COUNT 10 - MIN 2.30 - MAX 15.40 - ID 11 - MSG point-02 - COUNT 16 - COUNT 15 - MIN 2.30 - MAX 15.40 - COUNT 10 - MIN 2.30 - MAX 15.40 - ID 12 - MSG point-02 - COUNT 16 - COUNT 15 - MIN 2.30 - MAX 15.40 - COUNT 10 - MIN 2.30 - MAX 15.40 - ID 13 - MSG point-02 - COUNT 16 - COUNT 15 - MIN 2.30 - MAX 15.40 - COUNT 10 - MIN 2.30 - MAX 15.40 - ID 14 - MSG point-02 - COUNT 16 - COUNT 15 - MIN 2.30 - MAX 15.40 - COUNT 10 - MIN 2.30 - MAX 15.40 - ID 15 - MSG point-02 - COUNT 16 - COUNT 15 - MIN 2.30 - MAX 15.40 - COUNT 10 - MIN 2.30 - MAX 15.40 - ID 16 - MSG point-04 - COUNT 4 - COUNT 3 - MIN 2.30 - MAX 3.40 - COUNT 2 - MIN 2.30 - MAX 3.40 - PERSON 1 - MSG point-04 - COUNT 3 - COUNT 3 - MIN 4.30 - MAX 6.40 - COUNT 2 - MIN 4.30 - MAX 6.40 - PERSON 2 - MSG point-04 - COUNT 3 - COUNT 3 - MIN 6.30 - MAX 9.40 - COUNT 2 - MIN 6.30 - MAX 9.40 - PERSON 3 - MSG point-04 - COUNT 3 - COUNT 3 - MIN 8.30 - MAX 12.40 - COUNT 2 - MIN 8.30 - MAX 12.40 - PERSON 4 - MSG point-04 - COUNT 3 - COUNT 3 - MIN 10.30 - MAX 15.40 - COUNT 2 - MIN 10.30 - MAX 15.40 - PERSON 5 - MSG point-05 - COUNT 3 - COUNT 3 - MIN 4.30 - MAX 6.40 - COUNT 2 - MIN 4.30 - MAX 6.40 - PERSON 2 - MSG point-05 - COUNT 3 - COUNT 3 - MIN 4.30 - MAX 6.40 - COUNT 2 - MIN 4.30 - MAX 6.40 - PERSON 2 - MSG point-05 - COUNT 3 - COUNT 3 - MIN 4.30 - MAX 6.40 - COUNT 2 - MIN 4.30 - MAX 6.40 - PERSON 2 - MSG point-05 - COUNT 3 - COUNT 3 - MIN 6.30 - MAX 9.40 - COUNT 2 - MIN 6.30 - MAX 9.40 - PERSON 3 - MSG point-05 - COUNT 3 - COUNT 3 - MIN 6.30 - MAX 9.40 - COUNT 2 - MIN 6.30 - MAX 9.40 - PERSON 3 - MSG point-05 - COUNT 3 - COUNT 3 - MIN 6.30 - MAX 9.40 - COUNT 2 - MIN 6.30 - MAX 9.40 - PERSON 3 - MSG point-05 - COUNT 3 - COUNT 3 - MIN 8.30 - MAX 12.40 - COUNT 2 - MIN 8.30 - MAX 12.40 - PERSON 4 - MSG point-05 - COUNT 3 - COUNT 3 - MIN 8.30 - MAX 12.40 - COUNT 2 - MIN 8.30 - MAX 12.40 - PERSON 4 - MSG point-05 - COUNT 3 - COUNT 3 - MIN 8.30 - MAX 12.40 - COUNT 2 - MIN 8.30 - MAX 12.40 - PERSON 4 - MSG point-05 - COUNT 3 - COUNT 3 - MIN 10.30 - MAX 15.40 - COUNT 2 - MIN 10.30 - MAX 15.40 - PERSON 5 - MSG point-05 - COUNT 3 - COUNT 3 - MIN 10.30 - MAX 15.40 - COUNT 2 - MIN 10.30 - MAX 15.40 - PERSON 5 - MSG point-05 - COUNT 3 - COUNT 3 - MIN 10.30 - MAX 15.40 - COUNT 2 - MIN 10.30 - MAX 15.40 - PERSON 5 - MSG point-05 - COUNT 4 - COUNT 3 - MIN 2.30 - MAX 3.40 - COUNT 2 - MIN 2.30 - MAX 3.40 - PERSON 1 - MSG point-05 - COUNT 4 - COUNT 3 - MIN 2.30 - MAX 3.40 - COUNT 2 - MIN 2.30 - MAX 3.40 - PERSON 1 - MSG point-05 - COUNT 4 - COUNT 3 - MIN 2.30 - MAX 3.40 - COUNT 2 - MIN 2.30 - MAX 3.40 - PERSON 1 - MSG point-05 - COUNT 4 - COUNT 3 - MIN 2.30 - MAX 3.40 - COUNT 2 - MIN 2.30 - MAX 3.40 - PERSON 1 - MSG point-06 - COUNT 4 - COUNT 3 - MIN 2.30 - MAX 3.40 - COUNT 2 - MIN 2.30 - MAX 3.40 - NAME Person 1 - MSG point-06 - COUNT 3 - COUNT 3 - MIN 4.30 - MAX 6.40 - COUNT 2 - MIN 4.30 - MAX 6.40 - NAME Person 2 - MSG point-06 - COUNT 3 - COUNT 3 - MIN 6.30 - MAX 9.40 - COUNT 2 - MIN 6.30 - MAX 9.40 - NAME Person 3 - MSG point-06 - COUNT 3 - COUNT 3 - MIN 8.30 - MAX 12.40 - COUNT 2 - MIN 8.30 - MAX 12.40 - NAME Person 4 - MSG point-06 - COUNT 3 - COUNT 3 - MIN 10.30 - MAX 15.40 - COUNT 2 - MIN 10.30 - MAX 15.40 - NAME Person 5 - MSG point-07 - COUNT 4 - COUNT 3 - MIN 2.30 - MAX 3.40 - COUNT 2 - MIN 2.30 - MAX 3.40 - NAME Person 1 - MSG point-07 - COUNT 3 - COUNT 3 - MIN 4.30 - MAX 6.40 - COUNT 2 - MIN 4.30 - MAX 6.40 - NAME Person 2 - MSG point-07 - COUNT 3 - COUNT 3 - MIN 6.30 - MAX 9.40 - COUNT 2 - MIN 6.30 - MAX 9.40 - NAME Person 3 - MSG point-07 - COUNT 3 - COUNT 3 - MIN 8.30 - MAX 12.40 - COUNT 2 - MIN 8.30 - MAX 12.40 - NAME Person 4 - MSG point-07 - COUNT 3 - COUNT 3 - MIN 10.30 - MAX 15.40 - COUNT 2 - MIN 10.30 - MAX 15.40 - NAME Person 5 - MSG point-07 - COUNT 4 - COUNT 3 - MIN 2.30 - MAX 3.40 - COUNT 2 - MIN 2.30 - MAX 3.40 - NAME Person 1 - MSG point-07 - COUNT 3 - COUNT 3 - MIN 4.30 - MAX 6.40 - COUNT 2 - MIN 4.30 - MAX 6.40 - NAME Person 2 - MSG point-07 - COUNT 3 - COUNT 3 - MIN 6.30 - MAX 9.40 - COUNT 2 - MIN 6.30 - MAX 9.40 - NAME Person 3 - MSG point-07 - COUNT 3 - COUNT 3 - MIN 8.30 - MAX 12.40 - COUNT 2 - MIN 8.30 - MAX 12.40 - NAME Person 4 - MSG point-07 - COUNT 3 - COUNT 3 - MIN 10.30 - MAX 15.40 - COUNT 2 - MIN 10.30 - MAX 15.40 - NAME Person 5 - MSG point-07 - COUNT 4 - COUNT 3 - MIN 2.30 - MAX 3.40 - COUNT 2 - MIN 2.30 - MAX 3.40 - NAME Person 1 - MSG point-07 - COUNT 3 - COUNT 3 - MIN 4.30 - MAX 6.40 - COUNT 2 - MIN 4.30 - MAX 6.40 - NAME Person 2 - MSG point-07 - COUNT 3 - COUNT 3 - MIN 6.30 - MAX 9.40 - COUNT 2 - MIN 6.30 - MAX 9.40 - NAME Person 3 - MSG point-07 - COUNT 3 - COUNT 3 - MIN 8.30 - MAX 12.40 - COUNT 2 - MIN 8.30 - MAX 12.40 - NAME Person 4 - MSG point-07 - COUNT 3 - COUNT 3 - MIN 10.30 - MAX 15.40 - COUNT 2 - MIN 10.30 - MAX 15.40 - NAME Person 5 - MSG point-07 - COUNT 4 - COUNT 3 - MIN 2.30 - MAX 3.40 - COUNT 2 - MIN 2.30 - MAX 3.40 - NAME Person 1 - MSG point-08 - PERSON 1 - COUNT 1 - MSG point-08 - PERSON 2 - COUNT 1 - MSG point-08 - PERSON 3 - COUNT 1 - MSG point-08 - PERSON 4 - COUNT 1 - MSG point-08 - PERSON 5 - COUNT 1 - MSG point-09 - PERSON 1 - COUNT 1 - MSG point-09 - PERSON 2 - COUNT 1 - MSG point-09 - PERSON 3 - COUNT 1 - MSG point-09 - PERSON 4 - COUNT 1 - MSG point-09 - PERSON 5 - COUNT 1 - MSG point-10 - X1 4 - X2 3 - X3 2.30 - X4 3.40 - X5 2 - X6 2.30 - X7 3.40 - X8 Person 1 - ID 1 - MSG point-10 - X1 4 - X2 3 - X3 2.30 - X4 3.40 - X5 2 - X6 2.30 - X7 3.40 - X8 Person 1 - ID 1 - MSG point-10 - X1 4 - X2 3 - X3 2.30 - X4 3.40 - X5 2 - X6 2.30 - X7 3.40 - X8 Person 1 - ID 1 - MSG point-10 - X1 4 - X2 3 - X3 2.30 - X4 3.40 - X5 2 - X6 2.30 - X7 3.40 - X8 Person 1 - ID 1 - MSG point-10 - X1 3 - X2 3 - X3 4.30 - X4 6.40 - X5 2 - X6 4.30 - X7 6.40 - X8 Person 2 - ID 2 - MSG point-10 - X1 3 - X2 3 - X3 4.30 - X4 6.40 - X5 2 - X6 4.30 - X7 6.40 - X8 Person 2 - ID 2 - MSG point-10 - X1 3 - X2 3 - X3 4.30 - X4 6.40 - X5 2 - X6 4.30 - X7 6.40 - X8 Person 2 - ID 2 - MSG point-10 - X1 3 - X2 3 - X3 6.30 - X4 9.40 - X5 2 - X6 6.30 - X7 9.40 - X8 Person 3 - ID 3 - MSG point-10 - X1 3 - X2 3 - X3 6.30 - X4 9.40 - X5 2 - X6 6.30 - X7 9.40 - X8 Person 3 - ID 3 - MSG point-10 - X1 3 - X2 3 - X3 6.30 - X4 9.40 - X5 2 - X6 6.30 - X7 9.40 - X8 Person 3 - ID 3 - MSG point-10 - X1 3 - X2 3 - X3 8.30 - X4 12.40 - X5 2 - X6 8.30 - X7 12.40 - X8 Person 4 - ID 4 - MSG point-10 - X1 3 - X2 3 - X3 8.30 - X4 12.40 - X5 2 - X6 8.30 - X7 12.40 - X8 Person 4 - ID 4 - MSG point-10 - X1 3 - X2 3 - X3 8.30 - X4 12.40 - X5 2 - X6 8.30 - X7 12.40 - X8 Person 4 - ID 4 - MSG point-10 - X1 3 - X2 3 - X3 10.30 - X4 15.40 - X5 2 - X6 10.30 - X7 15.40 - X8 Person 5 - ID 5 - MSG point-10 - X1 3 - X2 3 - X3 10.30 - X4 15.40 - X5 2 - X6 10.30 - X7 15.40 - X8 Person 5 - ID 5 - MSG point-10 - X1 3 - X2 3 - X3 10.30 - X4 15.40 - X5 2 - X6 10.30 - X7 15.40 - X8 Person 5 - ID 5 - MSG point-11 - X1 4 - X2 3 - X3 2.30 - X4 3.40 - X5 2 - X6 2.30 - X7 3.40 - X8 Person 1 - ID 1 - MSG point-11 - X1 4 - X2 3 - X3 2.30 - X4 3.40 - X5 2 - X6 2.30 - X7 3.40 - X8 Person 1 - ID 1 - MSG point-11 - X1 4 - X2 3 - X3 2.30 - X4 3.40 - X5 2 - X6 2.30 - X7 3.40 - X8 Person 1 - ID 1 - MSG point-11 - X1 4 - X2 3 - X3 2.30 - X4 3.40 - X5 2 - X6 2.30 - X7 3.40 - X8 Person 1 - ID 1 - MSG point-11 - X1 3 - X2 3 - X3 4.30 - X4 6.40 - X5 2 - X6 4.30 - X7 6.40 - X8 Person 2 - ID 2 - MSG point-11 - X1 3 - X2 3 - X3 4.30 - X4 6.40 - X5 2 - X6 4.30 - X7 6.40 - X8 Person 2 - ID 2 - MSG point-11 - X1 3 - X2 3 - X3 4.30 - X4 6.40 - X5 2 - X6 4.30 - X7 6.40 - X8 Person 2 - ID 2 - MSG point-11 - X1 3 - X2 3 - X3 6.30 - X4 9.40 - X5 2 - X6 6.30 - X7 9.40 - X8 Person 3 - ID 3 - MSG point-11 - X1 3 - X2 3 - X3 6.30 - X4 9.40 - X5 2 - X6 6.30 - X7 9.40 - X8 Person 3 - ID 3 - MSG point-11 - X1 3 - X2 3 - X3 6.30 - X4 9.40 - X5 2 - X6 6.30 - X7 9.40 - X8 Person 3 - ID 3 - MSG point-11 - X1 3 - X2 3 - X3 8.30 - X4 12.40 - X5 2 - X6 8.30 - X7 12.40 - X8 Person 4 - ID 4 - MSG point-11 - X1 3 - X2 3 - X3 8.30 - X4 12.40 - X5 2 - X6 8.30 - X7 12.40 - X8 Person 4 - ID 4 - MSG point-11 - X1 3 - X2 3 - X3 8.30 - X4 12.40 - X5 2 - X6 8.30 - X7 12.40 - X8 Person 4 - ID 4 - MSG point-11 - X1 3 - X2 3 - X3 10.30 - X4 15.40 - X5 2 - X6 10.30 - X7 15.40 - X8 Person 5 - ID 5 - MSG point-11 - X1 3 - X2 3 - X3 10.30 - X4 15.40 - X5 2 - X6 10.30 - X7 15.40 - X8 Person 5 - ID 5 - MSG point-11 - X1 3 - X2 3 - X3 10.30 - X4 15.40 - X5 2 - X6 10.30 - X7 15.40 - X8 Person 5 - ID 5 - MSG point-12 - X1 4 - X2 3 - X3 2.30 - X4 3.40 - X5 2 - X6 2.30 - X7 3.40 - X8 Person 1 - ID 1 - MSG point-12 - X1 4 - X2 3 - X3 2.30 - X4 3.40 - X5 2 - X6 2.30 - X7 3.40 - X8 Person 1 - ID 1 - MSG point-12 - X1 4 - X2 3 - X3 2.30 - X4 3.40 - X5 2 - X6 2.30 - X7 3.40 - X8 Person 1 - ID 1 - MSG point-12 - X1 4 - X2 3 - X3 2.30 - X4 3.40 - X5 2 - X6 2.30 - X7 3.40 - X8 Person 1 - ID 1 - MSG point-12 - X1 3 - X2 3 - X3 6.30 - X4 9.40 - X5 2 - X6 6.30 - X7 9.40 - X8 Person 3 - ID 3 - MSG point-12 - X1 3 - X2 3 - X3 6.30 - X4 9.40 - X5 2 - X6 6.30 - X7 9.40 - X8 Person 3 - ID 3 - MSG point-12 - X1 3 - X2 3 - X3 6.30 - X4 9.40 - X5 2 - X6 6.30 - X7 9.40 - X8 Person 3 - ID 3 - MSG point-13 - X3 2.30 - SUM 13.60 - MSG point-13 - X3 4.30 - SUM 19.20 - MSG point-13 - X3 6.30 - SUM 28.20 - MSG point-13 - X3 8.30 - SUM 37.20 - MSG point-13 - X3 10.30 - SUM 46.20 - MSG point-14 - X3 2.30 - SUM 13.60 - COUNT 5 - MSG point-14 - X3 4.30 - SUM 19.20 - COUNT 5 - MSG point-14 - X3 6.30 - SUM 28.20 - COUNT 5 - MSG point-14 - X3 8.30 - SUM 37.20 - COUNT 5 - MSG point-14 - X3 10.30 - SUM 46.20 - COUNT 5 - MSG point-15 - X3 2.30 - SUM 13.60 - SUM 144.40 - MSG point-15 - X3 4.30 - SUM 19.20 - SUM 144.40 - MSG point-15 - X3 6.30 - SUM 28.20 - SUM 144.40 - MSG point-15 - X3 8.30 - SUM 37.20 - SUM 144.40 - MSG point-15 - X3 10.30 - SUM 46.20 - SUM 144.40 - MSG point-17 - PERSON 1 - SUM 9.10 - COUNT 5 - MSG point-17 - PERSON 2 - SUM 17.10 - COUNT 5 - MSG point-17 - PERSON 3 - SUM 25.10 - COUNT 5 - MSG point-17 - PERSON 4 - SUM 33.10 - COUNT 5 - MSG point-17 - PERSON 5 - SUM 41.10 - COUNT 5 - MSG point-18 - PERSON 1 - NAME Person 1 - SUM 9.10 - COUNT 5 - SUM 125.50 - MSG point-18 - PERSON 2 - NAME Person 2 - SUM 17.10 - COUNT 5 - SUM 125.50 - MSG point-18 - PERSON 3 - NAME Person 3 - SUM 25.10 - COUNT 5 - SUM 125.50 - MSG point-18 - PERSON 4 - NAME Person 4 - SUM 33.10 - COUNT 5 - SUM 125.50 - MSG point-18 - PERSON 5 - NAME Person 5 - SUM 41.10 - COUNT 5 - SUM 125.50 - MSG point-19 - PERSON 1 - COUNT 16 - COUNT 4 - MSG point-19 - PERSON 1 - COUNT 16 - COUNT 4 - MSG point-19 - PERSON 1 - COUNT 16 - COUNT 4 - MSG point-19 - PERSON 1 - COUNT 16 - COUNT 4 - MSG point-19 - PERSON 2 - COUNT 16 - COUNT 3 - MSG point-19 - PERSON 2 - COUNT 16 - COUNT 3 - MSG point-19 - PERSON 2 - COUNT 16 - COUNT 3 - MSG point-19 - PERSON 3 - COUNT 16 - COUNT 3 - MSG point-19 - PERSON 3 - COUNT 16 - COUNT 3 - MSG point-19 - PERSON 3 - COUNT 16 - COUNT 3 - MSG point-19 - PERSON 4 - COUNT 16 - COUNT 3 - MSG point-19 - PERSON 4 - COUNT 16 - COUNT 3 - MSG point-19 - PERSON 4 - COUNT 16 - COUNT 3 - MSG point-19 - PERSON 5 - COUNT 16 - COUNT 3 - MSG point-19 - PERSON 5 - COUNT 16 - COUNT 3 - MSG point-19 - PERSON 5 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 1 - COUNT 16 - COUNT 4 - MSG point-20 - PERSON 1 - COUNT 16 - COUNT 4 - MSG point-20 - PERSON 1 - COUNT 16 - COUNT 4 - MSG point-20 - PERSON 1 - COUNT 16 - COUNT 4 - MSG point-20 - PERSON 1 - COUNT 16 - COUNT 4 - MSG point-20 - PERSON 1 - COUNT 16 - COUNT 4 - MSG point-20 - PERSON 1 - COUNT 16 - COUNT 4 - MSG point-20 - PERSON 1 - COUNT 16 - COUNT 4 - MSG point-20 - PERSON 2 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 2 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 2 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 2 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 2 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 2 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 3 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 3 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 3 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 3 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 3 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 3 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 4 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 4 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 4 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 4 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 4 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 4 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 5 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 5 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 5 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 5 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 5 - COUNT 16 - COUNT 3 - MSG point-20 - PERSON 5 - COUNT 16 - COUNT 3 - MSG point-21 - ID 1 - PERSON 1 - DAT 2010-01-03 - VAL 2.30 - COUNT 4 - MSG point-21 - ID 2 - PERSON 2 - DAT 2010-01-04 - VAL 4.30 - COUNT 3 - MSG point-21 - ID 3 - PERSON 3 - DAT 2010-01-05 - VAL 6.30 - COUNT 3 - MSG point-21 - ID 4 - PERSON 4 - DAT 2010-01-06 - VAL 8.30 - COUNT 3 - MSG point-21 - ID 5 - PERSON 5 - DAT 2010-01-07 - VAL 10.30 - COUNT 3 - MSG point-21 - ID 6 - PERSON 1 - DAT 2010-02-02 - VAL 3.40 - COUNT 4 - MSG point-21 - ID 7 - PERSON 2 - DAT 2010-02-03 - VAL 6.40 - COUNT 3 - MSG point-21 - ID 8 - PERSON 3 - DAT 2010-02-04 - VAL 9.40 - COUNT 3 - MSG point-21 - ID 9 - PERSON 4 - DAT 2010-02-05 - VAL 12.40 - COUNT 3 - MSG point-21 - ID 10 - PERSON 5 - DAT 2010-02-06 - VAL 15.40 - COUNT 3 - MSG point-21 - ID 11 - PERSON 1 - DAT 2010-03-02 - VAL 3.40 - COUNT 4 - MSG point-21 - ID 12 - PERSON 2 - DAT 2010-03-03 - VAL 6.40 - COUNT 3 - MSG point-21 - ID 13 - PERSON 3 - DAT 2010-03-04 - VAL 9.40 - COUNT 3 - MSG point-21 - ID 14 - PERSON 4 - DAT 2010-03-05 - VAL 12.40 - COUNT 3 - MSG point-21 - ID 15 - PERSON 5 - DAT 2010-03-06 - VAL 15.40 - COUNT 3 - MSG point-21 - ID 16 - PERSON 1 - DAT - VAL - COUNT 4 - MSG point-22 - ID 1 - PERSON 1 - DAT 2010-01-03 - VAL 2.30 - COUNT 16 - COUNT 15 - COUNT 4 - COUNT 3 - COUNT 1 - COUNT 1 - MSG point-22 - ID 2 - PERSON 2 - DAT 2010-01-04 - VAL 4.30 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 1 - COUNT 1 - MSG point-22 - ID 3 - PERSON 3 - DAT 2010-01-05 - VAL 6.30 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 1 - COUNT 1 - MSG point-22 - ID 4 - PERSON 4 - DAT 2010-01-06 - VAL 8.30 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 1 - COUNT 1 - MSG point-22 - ID 5 - PERSON 5 - DAT 2010-01-07 - VAL 10.30 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 1 - COUNT 1 - MSG point-22 - ID 6 - PERSON 1 - DAT 2010-02-02 - VAL 3.40 - COUNT 16 - COUNT 15 - COUNT 4 - COUNT 3 - COUNT 1 - COUNT 1 - MSG point-22 - ID 7 - PERSON 2 - DAT 2010-02-03 - VAL 6.40 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 1 - COUNT 1 - MSG point-22 - ID 8 - PERSON 3 - DAT 2010-02-04 - VAL 9.40 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 1 - COUNT 1 - MSG point-22 - ID 9 - PERSON 4 - DAT 2010-02-05 - VAL 12.40 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 1 - COUNT 1 - MSG point-22 - ID 10 - PERSON 5 - DAT 2010-02-06 - VAL 15.40 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 1 - COUNT 1 - MSG point-22 - ID 11 - PERSON 1 - DAT 2010-03-02 - VAL 3.40 - COUNT 16 - COUNT 15 - COUNT 4 - COUNT 3 - COUNT 1 - COUNT 1 - MSG point-22 - ID 12 - PERSON 2 - DAT 2010-03-03 - VAL 6.40 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 1 - COUNT 1 - MSG point-22 - ID 13 - PERSON 3 - DAT 2010-03-04 - VAL 9.40 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 1 - COUNT 1 - MSG point-22 - ID 14 - PERSON 4 - DAT 2010-03-05 - VAL 12.40 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 1 - COUNT 1 - MSG point-22 - ID 15 - PERSON 5 - DAT 2010-03-06 - VAL 15.40 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 1 - COUNT 1 - MSG point-22 - ID 16 - PERSON 1 - DAT - VAL - COUNT 16 - COUNT 15 - COUNT 4 - COUNT 3 - COUNT 1 - COUNT 0 - MSG point-23 - ID 1 - PERSON 1 - DAT 2010-01-03 - VAL 2.30 - COUNT 16 - COUNT 15 - COUNT 4 - COUNT 3 - COUNT 5 - COUNT 5 - MSG point-23 - ID 2 - PERSON 2 - DAT 2010-01-04 - VAL 4.30 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 5 - COUNT 5 - MSG point-23 - ID 3 - PERSON 3 - DAT 2010-01-05 - VAL 6.30 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 5 - COUNT 5 - MSG point-23 - ID 4 - PERSON 4 - DAT 2010-01-06 - VAL 8.30 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 5 - COUNT 5 - MSG point-23 - ID 5 - PERSON 5 - DAT 2010-01-07 - VAL 10.30 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 5 - COUNT 5 - MSG point-23 - ID 6 - PERSON 1 - DAT 2010-02-02 - VAL 3.40 - COUNT 16 - COUNT 15 - COUNT 4 - COUNT 3 - COUNT 5 - COUNT 5 - MSG point-23 - ID 7 - PERSON 2 - DAT 2010-02-03 - VAL 6.40 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 5 - COUNT 5 - MSG point-23 - ID 8 - PERSON 3 - DAT 2010-02-04 - VAL 9.40 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 5 - COUNT 5 - MSG point-23 - ID 9 - PERSON 4 - DAT 2010-02-05 - VAL 12.40 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 5 - COUNT 5 - MSG point-23 - ID 10 - PERSON 5 - DAT 2010-02-06 - VAL 15.40 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 5 - COUNT 5 - MSG point-23 - ID 11 - PERSON 1 - DAT 2010-03-02 - VAL 3.40 - COUNT 16 - COUNT 15 - COUNT 4 - COUNT 3 - COUNT 5 - COUNT 5 - MSG point-23 - ID 12 - PERSON 2 - DAT 2010-03-03 - VAL 6.40 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 5 - COUNT 5 - MSG point-23 - ID 13 - PERSON 3 - DAT 2010-03-04 - VAL 9.40 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 5 - COUNT 5 - MSG point-23 - ID 14 - PERSON 4 - DAT 2010-03-05 - VAL 12.40 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 5 - COUNT 5 - MSG point-23 - ID 15 - PERSON 5 - DAT 2010-03-06 - VAL 15.40 - COUNT 16 - COUNT 15 - COUNT 3 - COUNT 3 - COUNT 5 - COUNT 5 - MSG point-23 - ID 16 - PERSON 1 - DAT - VAL - COUNT 16 - COUNT 15 - COUNT 4 - COUNT 3 - COUNT 1 - COUNT 0 - MSG point-24 - ID 1 - PERSON 1 - DAT 2010-01-03 - VAL 2.30 - MIN 2010-01-03 - MAX 2010-03-02 - MSG point-24 - ID 2 - PERSON 2 - DAT 2010-01-04 - VAL 4.30 - MIN 2010-01-04 - MAX 2010-03-03 - MSG point-24 - ID 3 - PERSON 3 - DAT 2010-01-05 - VAL 6.30 - MIN 2010-01-05 - MAX 2010-03-04 - MSG point-24 - ID 4 - PERSON 4 - DAT 2010-01-06 - VAL 8.30 - MIN 2010-01-06 - MAX 2010-03-05 - MSG point-24 - ID 5 - PERSON 5 - DAT 2010-01-07 - VAL 10.30 - MIN 2010-01-07 - MAX 2010-03-06 - MSG point-24 - ID 6 - PERSON 1 - DAT 2010-02-02 - VAL 3.40 - MIN 2010-01-03 - MAX 2010-03-02 - MSG point-24 - ID 7 - PERSON 2 - DAT 2010-02-03 - VAL 6.40 - MIN 2010-01-04 - MAX 2010-03-03 - MSG point-24 - ID 8 - PERSON 3 - DAT 2010-02-04 - VAL 9.40 - MIN 2010-01-05 - MAX 2010-03-04 - MSG point-24 - ID 9 - PERSON 4 - DAT 2010-02-05 - VAL 12.40 - MIN 2010-01-06 - MAX 2010-03-05 - MSG point-24 - ID 10 - PERSON 5 - DAT 2010-02-06 - VAL 15.40 - MIN 2010-01-07 - MAX 2010-03-06 - MSG point-24 - ID 11 - PERSON 1 - DAT 2010-03-02 - VAL 3.40 - MIN 2010-01-03 - MAX 2010-03-02 - MSG point-24 - ID 12 - PERSON 2 - DAT 2010-03-03 - VAL 6.40 - MIN 2010-01-04 - MAX 2010-03-03 - MSG point-24 - ID 13 - PERSON 3 - DAT 2010-03-04 - VAL 9.40 - MIN 2010-01-05 - MAX 2010-03-04 - MSG point-24 - ID 14 - PERSON 4 - DAT 2010-03-05 - VAL 12.40 - MIN 2010-01-06 - MAX 2010-03-05 - MSG point-24 - ID 15 - PERSON 5 - DAT 2010-03-06 - VAL 15.40 - MIN 2010-01-07 - MAX 2010-03-06 - MSG point-24 - ID 16 - PERSON 1 - DAT - VAL - MIN 2010-01-03 - MAX 2010-03-02 - MSG point-25 - PERSON 1 - MIN 2010-01-03 - MAX 2010-03-02 - MSG point-25 - PERSON 2 - MIN 2010-01-04 - MAX 2010-03-03 - MSG point-25 - PERSON 3 - MIN 2010-01-05 - MAX 2010-03-04 - MSG point-25 - PERSON 4 - MIN 2010-01-06 - MAX 2010-03-05 - MSG point-25 - PERSON 5 - MIN 2010-01-07 - MAX 2010-03-06 - MSG point-26 - PERSON 1 - COUNT 16 - COUNT 4 - MSG point-26 - PERSON 2 - COUNT 16 - COUNT 3 - MSG point-26 - PERSON 3 - COUNT 16 - COUNT 3 - MSG point-26 - PERSON 4 - COUNT 16 - COUNT 3 - MSG point-26 - PERSON 5 - COUNT 16 - COUNT 3 - MSG point-27 - PERSON 1 - COUNT 4 - COUNT 5 - COUNT 1 - COUNT 5 - MSG point-27 - PERSON 2 - COUNT 3 - COUNT 5 - COUNT 1 - COUNT 5 - MSG point-27 - PERSON 3 - COUNT 3 - COUNT 5 - COUNT 1 - COUNT 5 - MSG point-27 - PERSON 4 - COUNT 3 - COUNT 5 - COUNT 1 - COUNT 5 - MSG point-27 - PERSON 5 - COUNT 3 - COUNT 5 - COUNT 1 - COUNT 5 - MSG point-28 - PERSON 5 - COUNT 3 - COUNT 5 - COUNT 1 - COUNT 5 - MSG point-28 - PERSON 4 - COUNT 3 - COUNT 5 - COUNT 1 - COUNT 5 - MSG point-28 - PERSON 3 - COUNT 3 - COUNT 5 - COUNT 1 - COUNT 5 - MSG point-28 - PERSON 2 - COUNT 3 - COUNT 5 - COUNT 1 - COUNT 5 - MSG point-28 - PERSON 1 - COUNT 4 - COUNT 5 - COUNT 1 - COUNT 5 - MSG point-29 - PERSON 5 - C1 3 - C2 5 - C3 1 - C4 5 - MSG point-29 - PERSON 4 - C1 3 - C2 5 - C3 1 - C4 5 - MSG point-29 - PERSON 3 - C1 3 - C2 5 - C3 1 - C4 5 - MSG point-29 - PERSON 2 - C1 3 - C2 5 - C3 1 - C4 5 - MSG point-29 - PERSON 1 - C1 4 - C2 5 - C3 1 - C4 5 - MSG point-30 - PERSON 5 - COUNT 3 - COUNT 5 - COUNT 1 - COUNT 5 - MSG point-30 - PERSON 4 - COUNT 3 - COUNT 5 - COUNT 1 - COUNT 5 - MSG point-30 - PERSON 3 - COUNT 3 - COUNT 5 - COUNT 1 - COUNT 5 - MSG point-30 - PERSON 2 - COUNT 3 - COUNT 5 - COUNT 1 - COUNT 5 - MSG point-30 - PERSON 1 - COUNT 4 - COUNT 5 - COUNT 1 - COUNT 5 - MSG point-31 - PERSON 5 - COUNT 3 - COUNT 5 - COUNT 1 - COUNT 5 - COUNT 5 - MSG point-31 - PERSON 4 - COUNT 3 - COUNT 5 - COUNT 1 - COUNT 5 - COUNT 5 - MSG point-31 - PERSON 3 - COUNT 3 - COUNT 5 - COUNT 1 - COUNT 5 - COUNT 5 - MSG point-31 - PERSON 2 - COUNT 3 - COUNT 5 - COUNT 1 - COUNT 5 - COUNT 5 - MSG point-31 - PERSON 1 - COUNT 4 - COUNT 5 - COUNT 1 - COUNT 5 - COUNT 5 - MSG point-32 - PERSON 5 - COUNT 3 - COUNT 4 - COUNT 1 - COUNT 4 - COUNT 4 - MSG point-32 - PERSON 4 - COUNT 3 - COUNT 4 - COUNT 1 - COUNT 4 - COUNT 4 - MSG point-32 - PERSON 3 - COUNT 3 - COUNT 4 - COUNT 1 - COUNT 4 - COUNT 4 - MSG point-32 - PERSON 2 - COUNT 3 - COUNT 4 - COUNT 1 - COUNT 4 - COUNT 4 - MSG point-33 - PERSON 5 - SUM 41.10 - COUNT 5 - COUNT 1 - COUNT 5 - COUNT 5 - MSG point-33 - PERSON 4 - SUM 33.10 - COUNT 5 - COUNT 1 - COUNT 5 - COUNT 5 - MSG point-33 - PERSON 3 - SUM 25.10 - COUNT 5 - COUNT 1 - COUNT 5 - COUNT 5 - MSG point-33 - PERSON 2 - SUM 17.10 - COUNT 5 - COUNT 1 - COUNT 5 - COUNT 5 - MSG point-33 - PERSON 1 - SUM 9.10 - COUNT 5 - COUNT 1 - COUNT 5 - COUNT 5 - MSG point-34 - PERSON 3 - SUM 25.10 - COUNT 2 - COUNT 1 - COUNT 2 - COUNT 2 - MSG point-34 - PERSON 2 - SUM 17.10 - COUNT 2 - COUNT 1 - COUNT 2 - COUNT 2 +expected_stdout = """ + MSG point-01 + COUNT 16 + COUNT 15 + MIN 2.30 + MAX 15.40 + COUNT 10 + MIN 2.30 + MAX 15.40 + MSG point-02 + COUNT 16 + COUNT 15 + MIN 2.30 + MAX 15.40 + COUNT 10 + MIN 2.30 + MAX 15.40 + ID 1 + MSG point-02 + COUNT 16 + COUNT 15 + MIN 2.30 + MAX 15.40 + COUNT 10 + MIN 2.30 + MAX 15.40 + ID 2 + MSG point-02 + COUNT 16 + COUNT 15 + MIN 2.30 + MAX 15.40 + COUNT 10 + MIN 2.30 + MAX 15.40 + ID 3 + MSG point-02 + COUNT 16 + COUNT 15 + MIN 2.30 + MAX 15.40 + COUNT 10 + MIN 2.30 + MAX 15.40 + ID 4 + MSG point-02 + COUNT 16 + COUNT 15 + MIN 2.30 + MAX 15.40 + COUNT 10 + MIN 2.30 + MAX 15.40 + ID 5 + MSG point-02 + COUNT 16 + COUNT 15 + MIN 2.30 + MAX 15.40 + COUNT 10 + MIN 2.30 + MAX 15.40 + ID 6 + MSG point-02 + COUNT 16 + COUNT 15 + MIN 2.30 + MAX 15.40 + COUNT 10 + MIN 2.30 + MAX 15.40 + ID 7 + MSG point-02 + COUNT 16 + COUNT 15 + MIN 2.30 + MAX 15.40 + COUNT 10 + MIN 2.30 + MAX 15.40 + ID 8 + MSG point-02 + COUNT 16 + COUNT 15 + MIN 2.30 + MAX 15.40 + COUNT 10 + MIN 2.30 + MAX 15.40 + ID 9 + MSG point-02 + COUNT 16 + COUNT 15 + MIN 2.30 + MAX 15.40 + COUNT 10 + MIN 2.30 + MAX 15.40 + ID 10 + MSG point-02 + COUNT 16 + COUNT 15 + MIN 2.30 + MAX 15.40 + COUNT 10 + MIN 2.30 + MAX 15.40 + ID 11 + MSG point-02 + COUNT 16 + COUNT 15 + MIN 2.30 + MAX 15.40 + COUNT 10 + MIN 2.30 + MAX 15.40 + ID 12 + MSG point-02 + COUNT 16 + COUNT 15 + MIN 2.30 + MAX 15.40 + COUNT 10 + MIN 2.30 + MAX 15.40 + ID 13 + MSG point-02 + COUNT 16 + COUNT 15 + MIN 2.30 + MAX 15.40 + COUNT 10 + MIN 2.30 + MAX 15.40 + ID 14 + MSG point-02 + COUNT 16 + COUNT 15 + MIN 2.30 + MAX 15.40 + COUNT 10 + MIN 2.30 + MAX 15.40 + ID 15 + MSG point-02 + COUNT 16 + COUNT 15 + MIN 2.30 + MAX 15.40 + COUNT 10 + MIN 2.30 + MAX 15.40 + ID 16 + MSG point-04 + COUNT 4 + COUNT 3 + MIN 2.30 + MAX 3.40 + COUNT 2 + MIN 2.30 + MAX 3.40 + PERSON 1 + MSG point-04 + COUNT 3 + COUNT 3 + MIN 4.30 + MAX 6.40 + COUNT 2 + MIN 4.30 + MAX 6.40 + PERSON 2 + MSG point-04 + COUNT 3 + COUNT 3 + MIN 6.30 + MAX 9.40 + COUNT 2 + MIN 6.30 + MAX 9.40 + PERSON 3 + MSG point-04 + COUNT 3 + COUNT 3 + MIN 8.30 + MAX 12.40 + COUNT 2 + MIN 8.30 + MAX 12.40 + PERSON 4 + MSG point-04 + COUNT 3 + COUNT 3 + MIN 10.30 + MAX 15.40 + COUNT 2 + MIN 10.30 + MAX 15.40 + PERSON 5 + MSG point-05 + COUNT 3 + COUNT 3 + MIN 4.30 + MAX 6.40 + COUNT 2 + MIN 4.30 + MAX 6.40 + PERSON 2 + MSG point-05 + COUNT 3 + COUNT 3 + MIN 4.30 + MAX 6.40 + COUNT 2 + MIN 4.30 + MAX 6.40 + PERSON 2 + MSG point-05 + COUNT 3 + COUNT 3 + MIN 4.30 + MAX 6.40 + COUNT 2 + MIN 4.30 + MAX 6.40 + PERSON 2 + MSG point-05 + COUNT 3 + COUNT 3 + MIN 6.30 + MAX 9.40 + COUNT 2 + MIN 6.30 + MAX 9.40 + PERSON 3 + MSG point-05 + COUNT 3 + COUNT 3 + MIN 6.30 + MAX 9.40 + COUNT 2 + MIN 6.30 + MAX 9.40 + PERSON 3 + MSG point-05 + COUNT 3 + COUNT 3 + MIN 6.30 + MAX 9.40 + COUNT 2 + MIN 6.30 + MAX 9.40 + PERSON 3 + MSG point-05 + COUNT 3 + COUNT 3 + MIN 8.30 + MAX 12.40 + COUNT 2 + MIN 8.30 + MAX 12.40 + PERSON 4 + MSG point-05 + COUNT 3 + COUNT 3 + MIN 8.30 + MAX 12.40 + COUNT 2 + MIN 8.30 + MAX 12.40 + PERSON 4 + MSG point-05 + COUNT 3 + COUNT 3 + MIN 8.30 + MAX 12.40 + COUNT 2 + MIN 8.30 + MAX 12.40 + PERSON 4 + MSG point-05 + COUNT 3 + COUNT 3 + MIN 10.30 + MAX 15.40 + COUNT 2 + MIN 10.30 + MAX 15.40 + PERSON 5 + MSG point-05 + COUNT 3 + COUNT 3 + MIN 10.30 + MAX 15.40 + COUNT 2 + MIN 10.30 + MAX 15.40 + PERSON 5 + MSG point-05 + COUNT 3 + COUNT 3 + MIN 10.30 + MAX 15.40 + COUNT 2 + MIN 10.30 + MAX 15.40 + PERSON 5 + MSG point-05 + COUNT 4 + COUNT 3 + MIN 2.30 + MAX 3.40 + COUNT 2 + MIN 2.30 + MAX 3.40 + PERSON 1 + MSG point-05 + COUNT 4 + COUNT 3 + MIN 2.30 + MAX 3.40 + COUNT 2 + MIN 2.30 + MAX 3.40 + PERSON 1 + MSG point-05 + COUNT 4 + COUNT 3 + MIN 2.30 + MAX 3.40 + COUNT 2 + MIN 2.30 + MAX 3.40 + PERSON 1 + MSG point-05 + COUNT 4 + COUNT 3 + MIN 2.30 + MAX 3.40 + COUNT 2 + MIN 2.30 + MAX 3.40 + PERSON 1 + MSG point-06 + COUNT 4 + COUNT 3 + MIN 2.30 + MAX 3.40 + COUNT 2 + MIN 2.30 + MAX 3.40 + NAME Person 1 + MSG point-06 + COUNT 3 + COUNT 3 + MIN 4.30 + MAX 6.40 + COUNT 2 + MIN 4.30 + MAX 6.40 + NAME Person 2 + MSG point-06 + COUNT 3 + COUNT 3 + MIN 6.30 + MAX 9.40 + COUNT 2 + MIN 6.30 + MAX 9.40 + NAME Person 3 + MSG point-06 + COUNT 3 + COUNT 3 + MIN 8.30 + MAX 12.40 + COUNT 2 + MIN 8.30 + MAX 12.40 + NAME Person 4 + MSG point-06 + COUNT 3 + COUNT 3 + MIN 10.30 + MAX 15.40 + COUNT 2 + MIN 10.30 + MAX 15.40 + NAME Person 5 + MSG point-07 + COUNT 4 + COUNT 3 + MIN 2.30 + MAX 3.40 + COUNT 2 + MIN 2.30 + MAX 3.40 + NAME Person 1 + MSG point-07 + COUNT 3 + COUNT 3 + MIN 4.30 + MAX 6.40 + COUNT 2 + MIN 4.30 + MAX 6.40 + NAME Person 2 + MSG point-07 + COUNT 3 + COUNT 3 + MIN 6.30 + MAX 9.40 + COUNT 2 + MIN 6.30 + MAX 9.40 + NAME Person 3 + MSG point-07 + COUNT 3 + COUNT 3 + MIN 8.30 + MAX 12.40 + COUNT 2 + MIN 8.30 + MAX 12.40 + NAME Person 4 + MSG point-07 + COUNT 3 + COUNT 3 + MIN 10.30 + MAX 15.40 + COUNT 2 + MIN 10.30 + MAX 15.40 + NAME Person 5 + MSG point-07 + COUNT 4 + COUNT 3 + MIN 2.30 + MAX 3.40 + COUNT 2 + MIN 2.30 + MAX 3.40 + NAME Person 1 + MSG point-07 + COUNT 3 + COUNT 3 + MIN 4.30 + MAX 6.40 + COUNT 2 + MIN 4.30 + MAX 6.40 + NAME Person 2 + MSG point-07 + COUNT 3 + COUNT 3 + MIN 6.30 + MAX 9.40 + COUNT 2 + MIN 6.30 + MAX 9.40 + NAME Person 3 + MSG point-07 + COUNT 3 + COUNT 3 + MIN 8.30 + MAX 12.40 + COUNT 2 + MIN 8.30 + MAX 12.40 + NAME Person 4 + MSG point-07 + COUNT 3 + COUNT 3 + MIN 10.30 + MAX 15.40 + COUNT 2 + MIN 10.30 + MAX 15.40 + NAME Person 5 + MSG point-07 + COUNT 4 + COUNT 3 + MIN 2.30 + MAX 3.40 + COUNT 2 + MIN 2.30 + MAX 3.40 + NAME Person 1 + MSG point-07 + COUNT 3 + COUNT 3 + MIN 4.30 + MAX 6.40 + COUNT 2 + MIN 4.30 + MAX 6.40 + NAME Person 2 + MSG point-07 + COUNT 3 + COUNT 3 + MIN 6.30 + MAX 9.40 + COUNT 2 + MIN 6.30 + MAX 9.40 + NAME Person 3 + MSG point-07 + COUNT 3 + COUNT 3 + MIN 8.30 + MAX 12.40 + COUNT 2 + MIN 8.30 + MAX 12.40 + NAME Person 4 + MSG point-07 + COUNT 3 + COUNT 3 + MIN 10.30 + MAX 15.40 + COUNT 2 + MIN 10.30 + MAX 15.40 + NAME Person 5 + MSG point-07 + COUNT 4 + COUNT 3 + MIN 2.30 + MAX 3.40 + COUNT 2 + MIN 2.30 + MAX 3.40 + NAME Person 1 + MSG point-08 + PERSON 1 + COUNT 1 + MSG point-08 + PERSON 2 + COUNT 1 + MSG point-08 + PERSON 3 + COUNT 1 + MSG point-08 + PERSON 4 + COUNT 1 + MSG point-08 + PERSON 5 + COUNT 1 + MSG point-09 + PERSON 1 + COUNT 1 + MSG point-09 + PERSON 2 + COUNT 1 + MSG point-09 + PERSON 3 + COUNT 1 + MSG point-09 + PERSON 4 + COUNT 1 + MSG point-09 + PERSON 5 + COUNT 1 + MSG point-10 + X1 4 + X2 3 + X3 2.30 + X4 3.40 + X5 2 + X6 2.30 + X7 3.40 + X8 Person 1 + ID 1 + MSG point-10 + X1 4 + X2 3 + X3 2.30 + X4 3.40 + X5 2 + X6 2.30 + X7 3.40 + X8 Person 1 + ID 1 + MSG point-10 + X1 4 + X2 3 + X3 2.30 + X4 3.40 + X5 2 + X6 2.30 + X7 3.40 + X8 Person 1 + ID 1 + MSG point-10 + X1 4 + X2 3 + X3 2.30 + X4 3.40 + X5 2 + X6 2.30 + X7 3.40 + X8 Person 1 + ID 1 + MSG point-10 + X1 3 + X2 3 + X3 4.30 + X4 6.40 + X5 2 + X6 4.30 + X7 6.40 + X8 Person 2 + ID 2 + MSG point-10 + X1 3 + X2 3 + X3 4.30 + X4 6.40 + X5 2 + X6 4.30 + X7 6.40 + X8 Person 2 + ID 2 + MSG point-10 + X1 3 + X2 3 + X3 4.30 + X4 6.40 + X5 2 + X6 4.30 + X7 6.40 + X8 Person 2 + ID 2 + MSG point-10 + X1 3 + X2 3 + X3 6.30 + X4 9.40 + X5 2 + X6 6.30 + X7 9.40 + X8 Person 3 + ID 3 + MSG point-10 + X1 3 + X2 3 + X3 6.30 + X4 9.40 + X5 2 + X6 6.30 + X7 9.40 + X8 Person 3 + ID 3 + MSG point-10 + X1 3 + X2 3 + X3 6.30 + X4 9.40 + X5 2 + X6 6.30 + X7 9.40 + X8 Person 3 + ID 3 + MSG point-10 + X1 3 + X2 3 + X3 8.30 + X4 12.40 + X5 2 + X6 8.30 + X7 12.40 + X8 Person 4 + ID 4 + MSG point-10 + X1 3 + X2 3 + X3 8.30 + X4 12.40 + X5 2 + X6 8.30 + X7 12.40 + X8 Person 4 + ID 4 + MSG point-10 + X1 3 + X2 3 + X3 8.30 + X4 12.40 + X5 2 + X6 8.30 + X7 12.40 + X8 Person 4 + ID 4 + MSG point-10 + X1 3 + X2 3 + X3 10.30 + X4 15.40 + X5 2 + X6 10.30 + X7 15.40 + X8 Person 5 + ID 5 + MSG point-10 + X1 3 + X2 3 + X3 10.30 + X4 15.40 + X5 2 + X6 10.30 + X7 15.40 + X8 Person 5 + ID 5 + MSG point-10 + X1 3 + X2 3 + X3 10.30 + X4 15.40 + X5 2 + X6 10.30 + X7 15.40 + X8 Person 5 + ID 5 + MSG point-11 + X1 4 + X2 3 + X3 2.30 + X4 3.40 + X5 2 + X6 2.30 + X7 3.40 + X8 Person 1 + ID 1 + MSG point-11 + X1 4 + X2 3 + X3 2.30 + X4 3.40 + X5 2 + X6 2.30 + X7 3.40 + X8 Person 1 + ID 1 + MSG point-11 + X1 4 + X2 3 + X3 2.30 + X4 3.40 + X5 2 + X6 2.30 + X7 3.40 + X8 Person 1 + ID 1 + MSG point-11 + X1 4 + X2 3 + X3 2.30 + X4 3.40 + X5 2 + X6 2.30 + X7 3.40 + X8 Person 1 + ID 1 + MSG point-11 + X1 3 + X2 3 + X3 4.30 + X4 6.40 + X5 2 + X6 4.30 + X7 6.40 + X8 Person 2 + ID 2 + MSG point-11 + X1 3 + X2 3 + X3 4.30 + X4 6.40 + X5 2 + X6 4.30 + X7 6.40 + X8 Person 2 + ID 2 + MSG point-11 + X1 3 + X2 3 + X3 4.30 + X4 6.40 + X5 2 + X6 4.30 + X7 6.40 + X8 Person 2 + ID 2 + MSG point-11 + X1 3 + X2 3 + X3 6.30 + X4 9.40 + X5 2 + X6 6.30 + X7 9.40 + X8 Person 3 + ID 3 + MSG point-11 + X1 3 + X2 3 + X3 6.30 + X4 9.40 + X5 2 + X6 6.30 + X7 9.40 + X8 Person 3 + ID 3 + MSG point-11 + X1 3 + X2 3 + X3 6.30 + X4 9.40 + X5 2 + X6 6.30 + X7 9.40 + X8 Person 3 + ID 3 + MSG point-11 + X1 3 + X2 3 + X3 8.30 + X4 12.40 + X5 2 + X6 8.30 + X7 12.40 + X8 Person 4 + ID 4 + MSG point-11 + X1 3 + X2 3 + X3 8.30 + X4 12.40 + X5 2 + X6 8.30 + X7 12.40 + X8 Person 4 + ID 4 + MSG point-11 + X1 3 + X2 3 + X3 8.30 + X4 12.40 + X5 2 + X6 8.30 + X7 12.40 + X8 Person 4 + ID 4 + MSG point-11 + X1 3 + X2 3 + X3 10.30 + X4 15.40 + X5 2 + X6 10.30 + X7 15.40 + X8 Person 5 + ID 5 + MSG point-11 + X1 3 + X2 3 + X3 10.30 + X4 15.40 + X5 2 + X6 10.30 + X7 15.40 + X8 Person 5 + ID 5 + MSG point-11 + X1 3 + X2 3 + X3 10.30 + X4 15.40 + X5 2 + X6 10.30 + X7 15.40 + X8 Person 5 + ID 5 + MSG point-12 + X1 4 + X2 3 + X3 2.30 + X4 3.40 + X5 2 + X6 2.30 + X7 3.40 + X8 Person 1 + ID 1 + MSG point-12 + X1 4 + X2 3 + X3 2.30 + X4 3.40 + X5 2 + X6 2.30 + X7 3.40 + X8 Person 1 + ID 1 + MSG point-12 + X1 4 + X2 3 + X3 2.30 + X4 3.40 + X5 2 + X6 2.30 + X7 3.40 + X8 Person 1 + ID 1 + MSG point-12 + X1 4 + X2 3 + X3 2.30 + X4 3.40 + X5 2 + X6 2.30 + X7 3.40 + X8 Person 1 + ID 1 + MSG point-12 + X1 3 + X2 3 + X3 6.30 + X4 9.40 + X5 2 + X6 6.30 + X7 9.40 + X8 Person 3 + ID 3 + MSG point-12 + X1 3 + X2 3 + X3 6.30 + X4 9.40 + X5 2 + X6 6.30 + X7 9.40 + X8 Person 3 + ID 3 + MSG point-12 + X1 3 + X2 3 + X3 6.30 + X4 9.40 + X5 2 + X6 6.30 + X7 9.40 + X8 Person 3 + ID 3 + MSG point-13 + X3 2.30 + SUM 13.60 + MSG point-13 + X3 4.30 + SUM 19.20 + MSG point-13 + X3 6.30 + SUM 28.20 + MSG point-13 + X3 8.30 + SUM 37.20 + MSG point-13 + X3 10.30 + SUM 46.20 + MSG point-14 + X3 2.30 + SUM 13.60 + COUNT 5 + MSG point-14 + X3 4.30 + SUM 19.20 + COUNT 5 + MSG point-14 + X3 6.30 + SUM 28.20 + COUNT 5 + MSG point-14 + X3 8.30 + SUM 37.20 + COUNT 5 + MSG point-14 + X3 10.30 + SUM 46.20 + COUNT 5 + MSG point-15 + X3 2.30 + SUM 13.60 + SUM 144.40 + MSG point-15 + X3 4.30 + SUM 19.20 + SUM 144.40 + MSG point-15 + X3 6.30 + SUM 28.20 + SUM 144.40 + MSG point-15 + X3 8.30 + SUM 37.20 + SUM 144.40 + MSG point-15 + X3 10.30 + SUM 46.20 + SUM 144.40 + MSG point-17 + PERSON 1 + SUM 9.10 + COUNT 5 + MSG point-17 + PERSON 2 + SUM 17.10 + COUNT 5 + MSG point-17 + PERSON 3 + SUM 25.10 + COUNT 5 + MSG point-17 + PERSON 4 + SUM 33.10 + COUNT 5 + MSG point-17 + PERSON 5 + SUM 41.10 + COUNT 5 + MSG point-18 + PERSON 1 + NAME Person 1 + SUM 9.10 + COUNT 5 + SUM 125.50 + MSG point-18 + PERSON 2 + NAME Person 2 + SUM 17.10 + COUNT 5 + SUM 125.50 + MSG point-18 + PERSON 3 + NAME Person 3 + SUM 25.10 + COUNT 5 + SUM 125.50 + MSG point-18 + PERSON 4 + NAME Person 4 + SUM 33.10 + COUNT 5 + SUM 125.50 + MSG point-18 + PERSON 5 + NAME Person 5 + SUM 41.10 + COUNT 5 + SUM 125.50 + MSG point-19 + PERSON 1 + COUNT 16 + COUNT 4 + MSG point-19 + PERSON 1 + COUNT 16 + COUNT 4 + MSG point-19 + PERSON 1 + COUNT 16 + COUNT 4 + MSG point-19 + PERSON 1 + COUNT 16 + COUNT 4 + MSG point-19 + PERSON 2 + COUNT 16 + COUNT 3 + MSG point-19 + PERSON 2 + COUNT 16 + COUNT 3 + MSG point-19 + PERSON 2 + COUNT 16 + COUNT 3 + MSG point-19 + PERSON 3 + COUNT 16 + COUNT 3 + MSG point-19 + PERSON 3 + COUNT 16 + COUNT 3 + MSG point-19 + PERSON 3 + COUNT 16 + COUNT 3 + MSG point-19 + PERSON 4 + COUNT 16 + COUNT 3 + MSG point-19 + PERSON 4 + COUNT 16 + COUNT 3 + MSG point-19 + PERSON 4 + COUNT 16 + COUNT 3 + MSG point-19 + PERSON 5 + COUNT 16 + COUNT 3 + MSG point-19 + PERSON 5 + COUNT 16 + COUNT 3 + MSG point-19 + PERSON 5 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 1 + COUNT 16 + COUNT 4 + MSG point-20 + PERSON 1 + COUNT 16 + COUNT 4 + MSG point-20 + PERSON 1 + COUNT 16 + COUNT 4 + MSG point-20 + PERSON 1 + COUNT 16 + COUNT 4 + MSG point-20 + PERSON 1 + COUNT 16 + COUNT 4 + MSG point-20 + PERSON 1 + COUNT 16 + COUNT 4 + MSG point-20 + PERSON 1 + COUNT 16 + COUNT 4 + MSG point-20 + PERSON 1 + COUNT 16 + COUNT 4 + MSG point-20 + PERSON 2 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 2 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 2 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 2 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 2 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 2 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 3 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 3 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 3 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 3 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 3 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 3 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 4 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 4 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 4 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 4 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 4 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 4 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 5 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 5 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 5 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 5 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 5 + COUNT 16 + COUNT 3 + MSG point-20 + PERSON 5 + COUNT 16 + COUNT 3 + MSG point-21 + ID 1 + PERSON 1 + DAT 2010-01-03 + VAL 2.30 + COUNT 4 + MSG point-21 + ID 2 + PERSON 2 + DAT 2010-01-04 + VAL 4.30 + COUNT 3 + MSG point-21 + ID 3 + PERSON 3 + DAT 2010-01-05 + VAL 6.30 + COUNT 3 + MSG point-21 + ID 4 + PERSON 4 + DAT 2010-01-06 + VAL 8.30 + COUNT 3 + MSG point-21 + ID 5 + PERSON 5 + DAT 2010-01-07 + VAL 10.30 + COUNT 3 + MSG point-21 + ID 6 + PERSON 1 + DAT 2010-02-02 + VAL 3.40 + COUNT 4 + MSG point-21 + ID 7 + PERSON 2 + DAT 2010-02-03 + VAL 6.40 + COUNT 3 + MSG point-21 + ID 8 + PERSON 3 + DAT 2010-02-04 + VAL 9.40 + COUNT 3 + MSG point-21 + ID 9 + PERSON 4 + DAT 2010-02-05 + VAL 12.40 + COUNT 3 + MSG point-21 + ID 10 + PERSON 5 + DAT 2010-02-06 + VAL 15.40 + COUNT 3 + MSG point-21 + ID 11 + PERSON 1 + DAT 2010-03-02 + VAL 3.40 + COUNT 4 + MSG point-21 + ID 12 + PERSON 2 + DAT 2010-03-03 + VAL 6.40 + COUNT 3 + MSG point-21 + ID 13 + PERSON 3 + DAT 2010-03-04 + VAL 9.40 + COUNT 3 + MSG point-21 + ID 14 + PERSON 4 + DAT 2010-03-05 + VAL 12.40 + COUNT 3 + MSG point-21 + ID 15 + PERSON 5 + DAT 2010-03-06 + VAL 15.40 + COUNT 3 + MSG point-21 + ID 16 + PERSON 1 + DAT + VAL + COUNT 4 + MSG point-22 + ID 1 + PERSON 1 + DAT 2010-01-03 + VAL 2.30 + COUNT 16 + COUNT 15 + COUNT 4 + COUNT 3 + COUNT 1 + COUNT 1 + MSG point-22 + ID 2 + PERSON 2 + DAT 2010-01-04 + VAL 4.30 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 1 + COUNT 1 + MSG point-22 + ID 3 + PERSON 3 + DAT 2010-01-05 + VAL 6.30 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 1 + COUNT 1 + MSG point-22 + ID 4 + PERSON 4 + DAT 2010-01-06 + VAL 8.30 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 1 + COUNT 1 + MSG point-22 + ID 5 + PERSON 5 + DAT 2010-01-07 + VAL 10.30 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 1 + COUNT 1 + MSG point-22 + ID 6 + PERSON 1 + DAT 2010-02-02 + VAL 3.40 + COUNT 16 + COUNT 15 + COUNT 4 + COUNT 3 + COUNT 1 + COUNT 1 + MSG point-22 + ID 7 + PERSON 2 + DAT 2010-02-03 + VAL 6.40 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 1 + COUNT 1 + MSG point-22 + ID 8 + PERSON 3 + DAT 2010-02-04 + VAL 9.40 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 1 + COUNT 1 + MSG point-22 + ID 9 + PERSON 4 + DAT 2010-02-05 + VAL 12.40 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 1 + COUNT 1 + MSG point-22 + ID 10 + PERSON 5 + DAT 2010-02-06 + VAL 15.40 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 1 + COUNT 1 + MSG point-22 + ID 11 + PERSON 1 + DAT 2010-03-02 + VAL 3.40 + COUNT 16 + COUNT 15 + COUNT 4 + COUNT 3 + COUNT 1 + COUNT 1 + MSG point-22 + ID 12 + PERSON 2 + DAT 2010-03-03 + VAL 6.40 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 1 + COUNT 1 + MSG point-22 + ID 13 + PERSON 3 + DAT 2010-03-04 + VAL 9.40 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 1 + COUNT 1 + MSG point-22 + ID 14 + PERSON 4 + DAT 2010-03-05 + VAL 12.40 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 1 + COUNT 1 + MSG point-22 + ID 15 + PERSON 5 + DAT 2010-03-06 + VAL 15.40 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 1 + COUNT 1 + MSG point-22 + ID 16 + PERSON 1 + DAT + VAL + COUNT 16 + COUNT 15 + COUNT 4 + COUNT 3 + COUNT 1 + COUNT 0 + MSG point-23 + ID 1 + PERSON 1 + DAT 2010-01-03 + VAL 2.30 + COUNT 16 + COUNT 15 + COUNT 4 + COUNT 3 + COUNT 5 + COUNT 5 + MSG point-23 + ID 2 + PERSON 2 + DAT 2010-01-04 + VAL 4.30 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 5 + COUNT 5 + MSG point-23 + ID 3 + PERSON 3 + DAT 2010-01-05 + VAL 6.30 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 5 + COUNT 5 + MSG point-23 + ID 4 + PERSON 4 + DAT 2010-01-06 + VAL 8.30 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 5 + COUNT 5 + MSG point-23 + ID 5 + PERSON 5 + DAT 2010-01-07 + VAL 10.30 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 5 + COUNT 5 + MSG point-23 + ID 6 + PERSON 1 + DAT 2010-02-02 + VAL 3.40 + COUNT 16 + COUNT 15 + COUNT 4 + COUNT 3 + COUNT 5 + COUNT 5 + MSG point-23 + ID 7 + PERSON 2 + DAT 2010-02-03 + VAL 6.40 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 5 + COUNT 5 + MSG point-23 + ID 8 + PERSON 3 + DAT 2010-02-04 + VAL 9.40 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 5 + COUNT 5 + MSG point-23 + ID 9 + PERSON 4 + DAT 2010-02-05 + VAL 12.40 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 5 + COUNT 5 + MSG point-23 + ID 10 + PERSON 5 + DAT 2010-02-06 + VAL 15.40 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 5 + COUNT 5 + MSG point-23 + ID 11 + PERSON 1 + DAT 2010-03-02 + VAL 3.40 + COUNT 16 + COUNT 15 + COUNT 4 + COUNT 3 + COUNT 5 + COUNT 5 + MSG point-23 + ID 12 + PERSON 2 + DAT 2010-03-03 + VAL 6.40 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 5 + COUNT 5 + MSG point-23 + ID 13 + PERSON 3 + DAT 2010-03-04 + VAL 9.40 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 5 + COUNT 5 + MSG point-23 + ID 14 + PERSON 4 + DAT 2010-03-05 + VAL 12.40 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 5 + COUNT 5 + MSG point-23 + ID 15 + PERSON 5 + DAT 2010-03-06 + VAL 15.40 + COUNT 16 + COUNT 15 + COUNT 3 + COUNT 3 + COUNT 5 + COUNT 5 + MSG point-23 + ID 16 + PERSON 1 + DAT + VAL + COUNT 16 + COUNT 15 + COUNT 4 + COUNT 3 + COUNT 1 + COUNT 0 + MSG point-24 + ID 1 + PERSON 1 + DAT 2010-01-03 + VAL 2.30 + MIN 2010-01-03 + MAX 2010-03-02 + MSG point-24 + ID 2 + PERSON 2 + DAT 2010-01-04 + VAL 4.30 + MIN 2010-01-04 + MAX 2010-03-03 + MSG point-24 + ID 3 + PERSON 3 + DAT 2010-01-05 + VAL 6.30 + MIN 2010-01-05 + MAX 2010-03-04 + MSG point-24 + ID 4 + PERSON 4 + DAT 2010-01-06 + VAL 8.30 + MIN 2010-01-06 + MAX 2010-03-05 + MSG point-24 + ID 5 + PERSON 5 + DAT 2010-01-07 + VAL 10.30 + MIN 2010-01-07 + MAX 2010-03-06 + MSG point-24 + ID 6 + PERSON 1 + DAT 2010-02-02 + VAL 3.40 + MIN 2010-01-03 + MAX 2010-03-02 + MSG point-24 + ID 7 + PERSON 2 + DAT 2010-02-03 + VAL 6.40 + MIN 2010-01-04 + MAX 2010-03-03 + MSG point-24 + ID 8 + PERSON 3 + DAT 2010-02-04 + VAL 9.40 + MIN 2010-01-05 + MAX 2010-03-04 + MSG point-24 + ID 9 + PERSON 4 + DAT 2010-02-05 + VAL 12.40 + MIN 2010-01-06 + MAX 2010-03-05 + MSG point-24 + ID 10 + PERSON 5 + DAT 2010-02-06 + VAL 15.40 + MIN 2010-01-07 + MAX 2010-03-06 + MSG point-24 + ID 11 + PERSON 1 + DAT 2010-03-02 + VAL 3.40 + MIN 2010-01-03 + MAX 2010-03-02 + MSG point-24 + ID 12 + PERSON 2 + DAT 2010-03-03 + VAL 6.40 + MIN 2010-01-04 + MAX 2010-03-03 + MSG point-24 + ID 13 + PERSON 3 + DAT 2010-03-04 + VAL 9.40 + MIN 2010-01-05 + MAX 2010-03-04 + MSG point-24 + ID 14 + PERSON 4 + DAT 2010-03-05 + VAL 12.40 + MIN 2010-01-06 + MAX 2010-03-05 + MSG point-24 + ID 15 + PERSON 5 + DAT 2010-03-06 + VAL 15.40 + MIN 2010-01-07 + MAX 2010-03-06 + MSG point-24 + ID 16 + PERSON 1 + DAT + VAL + MIN 2010-01-03 + MAX 2010-03-02 + MSG point-25 + PERSON 1 + MIN 2010-01-03 + MAX 2010-03-02 + MSG point-25 + PERSON 2 + MIN 2010-01-04 + MAX 2010-03-03 + MSG point-25 + PERSON 3 + MIN 2010-01-05 + MAX 2010-03-04 + MSG point-25 + PERSON 4 + MIN 2010-01-06 + MAX 2010-03-05 + MSG point-25 + PERSON 5 + MIN 2010-01-07 + MAX 2010-03-06 + MSG point-26 + PERSON 1 + COUNT 16 + COUNT 4 + MSG point-26 + PERSON 2 + COUNT 16 + COUNT 3 + MSG point-26 + PERSON 3 + COUNT 16 + COUNT 3 + MSG point-26 + PERSON 4 + COUNT 16 + COUNT 3 + MSG point-26 + PERSON 5 + COUNT 16 + COUNT 3 + MSG point-27 + PERSON 1 + COUNT 4 + COUNT 5 + COUNT 1 + COUNT 5 + MSG point-27 + PERSON 2 + COUNT 3 + COUNT 5 + COUNT 1 + COUNT 5 + MSG point-27 + PERSON 3 + COUNT 3 + COUNT 5 + COUNT 1 + COUNT 5 + MSG point-27 + PERSON 4 + COUNT 3 + COUNT 5 + COUNT 1 + COUNT 5 + MSG point-27 + PERSON 5 + COUNT 3 + COUNT 5 + COUNT 1 + COUNT 5 + MSG point-28 + PERSON 5 + COUNT 3 + COUNT 5 + COUNT 1 + COUNT 5 + MSG point-28 + PERSON 4 + COUNT 3 + COUNT 5 + COUNT 1 + COUNT 5 + MSG point-28 + PERSON 3 + COUNT 3 + COUNT 5 + COUNT 1 + COUNT 5 + MSG point-28 + PERSON 2 + COUNT 3 + COUNT 5 + COUNT 1 + COUNT 5 + MSG point-28 + PERSON 1 + COUNT 4 + COUNT 5 + COUNT 1 + COUNT 5 + MSG point-29 + PERSON 5 + C1 3 + C2 5 + C3 1 + C4 5 + MSG point-29 + PERSON 4 + C1 3 + C2 5 + C3 1 + C4 5 + MSG point-29 + PERSON 3 + C1 3 + C2 5 + C3 1 + C4 5 + MSG point-29 + PERSON 2 + C1 3 + C2 5 + C3 1 + C4 5 + MSG point-29 + PERSON 1 + C1 4 + C2 5 + C3 1 + C4 5 + MSG point-30 + PERSON 5 + COUNT 3 + COUNT 5 + COUNT 1 + COUNT 5 + MSG point-30 + PERSON 4 + COUNT 3 + COUNT 5 + COUNT 1 + COUNT 5 + MSG point-30 + PERSON 3 + COUNT 3 + COUNT 5 + COUNT 1 + COUNT 5 + MSG point-30 + PERSON 2 + COUNT 3 + COUNT 5 + COUNT 1 + COUNT 5 + MSG point-30 + PERSON 1 + COUNT 4 + COUNT 5 + COUNT 1 + COUNT 5 + MSG point-31 + PERSON 5 + COUNT 3 + COUNT 5 + COUNT 1 + COUNT 5 + COUNT 5 + MSG point-31 + PERSON 4 + COUNT 3 + COUNT 5 + COUNT 1 + COUNT 5 + COUNT 5 + MSG point-31 + PERSON 3 + COUNT 3 + COUNT 5 + COUNT 1 + COUNT 5 + COUNT 5 + MSG point-31 + PERSON 2 + COUNT 3 + COUNT 5 + COUNT 1 + COUNT 5 + COUNT 5 + MSG point-31 + PERSON 1 + COUNT 4 + COUNT 5 + COUNT 1 + COUNT 5 + COUNT 5 + MSG point-32 + PERSON 5 + COUNT 3 + COUNT 4 + COUNT 1 + COUNT 4 + COUNT 4 + MSG point-32 + PERSON 4 + COUNT 3 + COUNT 4 + COUNT 1 + COUNT 4 + COUNT 4 + MSG point-32 + PERSON 3 + COUNT 3 + COUNT 4 + COUNT 1 + COUNT 4 + COUNT 4 + MSG point-32 + PERSON 2 + COUNT 3 + COUNT 4 + COUNT 1 + COUNT 4 + COUNT 4 + MSG point-33 + PERSON 5 + SUM 41.10 + COUNT 5 + COUNT 1 + COUNT 5 + COUNT 5 + MSG point-33 + PERSON 4 + SUM 33.10 + COUNT 5 + COUNT 1 + COUNT 5 + COUNT 5 + MSG point-33 + PERSON 3 + SUM 25.10 + COUNT 5 + COUNT 1 + COUNT 5 + COUNT 5 + MSG point-33 + PERSON 2 + SUM 17.10 + COUNT 5 + COUNT 1 + COUNT 5 + COUNT 5 + MSG point-33 + PERSON 1 + SUM 9.10 + COUNT 5 + COUNT 1 + COUNT 5 + COUNT 5 + MSG point-34 + PERSON 3 + SUM 25.10 + COUNT 2 + COUNT 1 + COUNT 2 + COUNT 2 + MSG point-34 + PERSON 2 + SUM 17.10 + COUNT 2 + COUNT 1 + COUNT 2 + COUNT 2 """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=3.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# test_script_1 +#--- +# +# import os +# import sys +# import subprocess +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# db_conn.close() +# +# with open( os.path.join(context['files_location'],'gtcs-window-func.sql'), 'r') as f: +# sql_init = f.read() +# +# sql_addi=''' +# set list on; +# select +# 'point-01' as msg, +# count(*), count(val), min(val), max(val), +# count(distinct val), min(distinct val), max(distinct val) +# from entries; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# +# select +# 'point-02' as msg, +# count(*) over (), count(val) over (), min(val) over (), max(val) over (), +# count(distinct val) over (), min(distinct val) over (), max(distinct val) over (), +# id +# from entries +# order by id; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-03' as msg, +# count(*) over (), count(val) over (), min(val) over (), max(val) over (), +# count(distinct val) over (), min(distinct val) over (), max(distinct val) over (), +# id +# from entries +# where 1 = 0 +# order by id; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-04' as msg, +# count(*), count(val), min(val), max(val), +# count(distinct val), min(distinct val), max(distinct val), +# person +# from entries +# group by person +# order by person; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-05' as msg, +# count(*) over (partition by person), +# count(val) over (partition by person), +# min(val) over (partition by person), +# max(val) over (partition by person), +# count(distinct val) over (partition by person), +# min(distinct val) over (partition by person), +# max(distinct val) over (partition by person), +# person +# from entries +# order by 1, 2, 3, 4, 5, 6, 7, 8, 9; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-06' as msg, +# count(*), +# count(e.val), +# min(e.val), +# max(e.val), +# count(distinct e.val), +# min(distinct e.val), +# max(distinct e.val), +# p.name +# from entries e +# join persons p on p.id = e.person +# group by p.name +# order by p.name; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-07' as msg, +# count(*) over (partition by p.id), +# count(e.val) over (partition by p.id), +# min(e.val) over (partition by p.id), +# max(e.val) over (partition by p.id), +# count(distinct e.val) over (partition by p.id), +# min(distinct e.val) over (partition by p.id), +# max(distinct e.val) over (partition by p.id), +# p.name +# from entries e +# join persons p on p.id = e.person +# order by e.id; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-08' as msg, +# person, +# count(person) over (partition by person) +# from entries +# group by person +# order by person; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-09' as msg, +# person, +# count(*) over (partition by person) +# from entries +# group by person +# order by person; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-10' as msg, +# v1.*, p.id +# from persons p +# join v1 on v1.x8 = p.name; +# +# select +# 'point-11' as msg, +# v1.*, p.id +# from persons p +# full join v1 on right(v1.x8, 1) = p.id; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-12' as msg, +# v1.*, p.id +# from persons p +# left join v1 on right(v1.x8, 1) = p.id +# where p.id in (1, 3); +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-13' as msg, +# x3, sum(x4) +# from v1 +# group by x3; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-14' as msg, +# x3, sum(x4), count(*)over() +# from v1 +# group by x3; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-15' as msg, +# x3, sum(x4), sum(sum(x4))over() +# from v1 +# group by x3; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-17' as msg, +# v2.person, sum(v2.val), count (*) over () +# from v2 +# join persons p +# on p.id = v2.person +# group by person; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-18' as msg, +# v3.person, v3.name, sum(v3.val), count (*) over (), sum(sum(v3.val)) over () +# from v3 +# join persons p +# on p.id = v3.person +# group by person, name; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-19' as msg, +# person, +# count(*) over (), +# count(*) over (partition by person) +# from entries +# order by 1, 2, 3, 4; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-20' as msg, +# person, +# count(*) over (), +# count(*) over (partition by person) +# from entries +# +# UNION ALL +# +# select +# 'point-20' as msg, +# person, +# count(*) over (), +# count(*) over (partition by person) +# from entries +# order by 1, 2, 3, 4; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-21' as msg, +# entries.*, +# count(*) over (partition by person || person) +# from entries +# order by 2; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-22' as msg, +# entries.*, +# count(*) over (), +# count(val) over (), +# count(*) over (partition by person), +# count(val) over (partition by person), +# count(*) over (partition by dat), +# count(val) over (partition by dat) +# from entries +# order by 2; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-23' as msg, +# entries.*, +# count(*) over (), +# count(val) over (), +# count(*) over (partition by person), +# count(val) over (partition by person), +# count(*) over (partition by extract(month from dat)), +# count(val) over (partition by extract(month from dat)) +# from entries +# order by 2; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-24' as msg, +# entries.*, +# min(dat) over (partition by person), +# max(dat) over (partition by person) +# from entries +# order by 2; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select distinct +# 'point-25' as msg, +# person, +# min(dat) over (partition by person), +# max(dat) over (partition by person) +# from entries; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select distinct +# 'point-26' as msg, +# person, +# count(*) over (), +# count(*) over (partition by person) +# from entries +# order by 2; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-27' as msg, +# person, +# count(*), +# count(*) over (), +# count(*) over (partition by person), +# count(*) over (partition by 1, 2, 3) +# from entries +# group by person +# order by 2; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-28' as msg, +# person, +# count(*), +# count(*) over (), +# count(*) over (partition by person), +# count(*) over (partition by 1, 2, 3) +# from entries +# group by person +# order by 2 desc; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select * +# from ( +# select +# 'point-29' as msg, +# person, +# count(*) c1, +# count(*) over () c2, +# count(*) over (partition by person) c3, +# count(*) over (partition by 1, 2, 3) c4 +# from entries +# group by person +# ) +# order by 2 desc; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-30' as msg, +# person, +# count(*), +# count(*) over (), +# count(*) over (partition by person), +# count(*) over (partition by 1, 2, 3) +# from entries +# group by person +# order by 2 desc; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-31' as msg, +# person, +# count(*), +# count(*) over (), +# count(*) over (partition by person), +# count(*) over (partition by 1, 2, 3), +# count(count(*)) over () +# from entries +# group by person +# order by 4 desc, 2 desc; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-32' as msg, +# person, +# count(*), +# count(*) over (), +# count(*) over (partition by person), +# count(*) over (partition by 1, 2, 3), +# count(count(*)) over () +# from entries +# group by person +# having count(*) = 3 +# order by 4 desc, 2 desc; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-33' as msg, +# person, +# sum(val), +# count(*) over (), +# count(*) over (partition by person), +# count(*) over (partition by 1, 2, 3), +# count(count(*)) over () +# from entries +# group by person +# order by 4 desc, 2 desc; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-34' as msg, +# person, +# sum(val), +# count(*) over (), +# count(*) over (partition by person), +# count(*) over (partition by 1, 2, 3), +# count(count(*)) over () +# from entries +# group by person +# having sum(val) between 16 and 26 +# order by 4 desc, 2 desc; +# +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# -- Test invalid usages. Following statements must raise error: +# --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# select +# 'point-35' as msg, +# person, +# sum(val) over (partition by count(*)) +# from entries; +# +# select +# 'point-36' as msg, +# person +# from entries +# where count(*) over () = 1; +# +# select +# 'point-37' as msg, +# person +# from entries +# group by person +# having count(*) over () = 1; +# ''' +# +# runProgram('isql', [ dsn], os.linesep.join( (sql_init, sql_addi) ) ) +#--- diff --git a/tests/functional/gtcs/test_window_func_02.py b/tests/functional/gtcs/test_window_func_02.py index 199eb3c0..af2e03df 100644 --- a/tests/functional/gtcs/test_window_func_02.py +++ b/tests/functional/gtcs/test_window_func_02.py @@ -1,121 +1,1302 @@ #coding:utf-8 -# -# id: functional.gtcs.window_func_02 -# title: GTCS/tests/FB_SQL_WINDOW_FUNC_02 - set of miscelaneous tests for verification of windowed functions. -# decription: -# Statements from this test are added to initial SQL which is stored in: ... -# bt-repo -# iles\\gtcs-window-func.sql -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/FB_SQL_WINDOW_FUNC_02.script -# -# Checked on 4.0.0.1854; 3.0.6.33277 -# -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: gtcs.window-func-02 +TITLE: Set of miscelaneous tests for verification of windowed functions +DESCRIPTION: + Statements from this test are added to initial SQL which is stored in: /files/gtcs-window-func.sql + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/FB_SQL_WINDOW_FUNC_02.script +FBTEST: functional.gtcs.window_func_02 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] +act = python_act('db', substitutions=[('[ \t]+', ' ')]) -init_script_1 = """""" +expected_stdout = """ + MSG point-01 + ID 1 + NAME Person 1 + SUM 1 + MSG point-01 + ID 2 + NAME Person 2 + SUM 2 + MSG point-01 + ID 3 + NAME Person 3 + SUM 3 + MSG point-01 + ID 4 + NAME Person 4 + SUM 4 + MSG point-01 + ID 5 + NAME Person 5 + SUM 5 + MSG point-02 + ID 1 + NAME Person 1 + SUM 5 + MSG point-02 + ID 2 + NAME Person 2 + SUM 4 + MSG point-02 + ID 3 + NAME Person 3 + SUM 3 + MSG point-02 + ID 4 + NAME Person 4 + SUM 2 + MSG point-02 + ID 5 + NAME Person 5 + SUM 1 + MSG point-03 + ID 1 + NAME Person 1 + SUM 1 + MSG point-03 + ID 2 + NAME Person 2 + SUM 2 + MSG point-03 + ID 3 + NAME Person 3 + SUM 3 + MSG point-03 + ID 4 + NAME Person 4 + SUM 4 + MSG point-03 + ID 5 + NAME Person 5 + SUM 5 + MSG point-04 + ID 5 + NAME Person 5 + SUM 5 + MSG point-04 + ID 4 + NAME Person 4 + SUM 4 + MSG point-04 + ID 3 + NAME Person 3 + SUM 3 + MSG point-04 + ID 2 + NAME Person 2 + SUM 2 + MSG point-04 + ID 1 + NAME Person 1 + SUM 1 + MSG point-05 + ID 5 + NAME Person 5 + SUM 1 + MSG point-05 + ID 4 + NAME Person 4 + SUM 2 + MSG point-05 + ID 3 + NAME Person 3 + SUM 3 + MSG point-05 + ID 2 + NAME Person 2 + SUM 4 + MSG point-05 + ID 1 + NAME Person 1 + SUM 5 + MSG point-06 + ID 5 + NAME Person 5 + S 1 + MSG point-06 + ID 4 + NAME Person 4 + S 2 + MSG point-06 + ID 3 + NAME Person 3 + S 3 + MSG point-06 + ID 2 + NAME Person 2 + S 4 + MSG point-06 + ID 1 + NAME Person 1 + S 5 + MSG point-07 + ID 1 + NAME Person 1 + SUM 1 + MSG point-07 + ID 2 + NAME Person 2 + SUM 3 + MSG point-07 + ID 3 + NAME Person 3 + SUM 6 + MSG point-07 + ID 4 + NAME Person 4 + SUM 10 + MSG point-07 + ID 5 + NAME Person 5 + SUM 15 + MSG point-08 + ID 1 + NAME Person 1 + SUM 1 + MSG point-08 + ID 2 + NAME Person 2 + SUM 1 + MSG point-08 + ID 3 + NAME Person 3 + SUM 2 + MSG point-08 + ID 4 + NAME Person 4 + SUM 2 + MSG point-08 + ID 5 + NAME Person 5 + SUM 3 + MSG point-09 + ID 1 + PERSON 1 + DAT 2010-01-03 + VAL 2.30 + AVG 3.03 + AVG 2.30 + MSG point-09 + ID 2 + PERSON 2 + DAT 2010-01-04 + VAL 4.30 + AVG 4.36 + AVG 3.30 + MSG point-09 + ID 3 + PERSON 3 + DAT 2010-01-05 + VAL 6.30 + AVG 5.70 + AVG 4.30 + MSG point-09 + ID 4 + PERSON 4 + DAT 2010-01-06 + VAL 8.30 + AVG 7.03 + AVG 5.30 + MSG point-09 + ID 5 + PERSON 5 + DAT 2010-01-07 + VAL 10.30 + AVG 8.36 + AVG 6.30 + MSG point-09 + ID 6 + PERSON 1 + DAT 2010-02-02 + VAL 3.40 + AVG 3.03 + AVG 5.81 + MSG point-09 + ID 7 + PERSON 2 + DAT 2010-02-03 + VAL 6.40 + AVG 4.36 + AVG 5.90 + MSG point-09 + ID 8 + PERSON 3 + DAT 2010-02-04 + VAL 9.40 + AVG 5.70 + AVG 6.33 + MSG point-09 + ID 9 + PERSON 4 + DAT 2010-02-05 + VAL 12.40 + AVG 7.03 + AVG 7.01 + MSG point-09 + ID 10 + PERSON 5 + DAT 2010-02-06 + VAL 15.40 + AVG 8.36 + AVG 7.85 + MSG point-09 + ID 11 + PERSON 1 + DAT 2010-03-02 + VAL 3.40 + AVG 3.03 + AVG 7.44 + MSG point-09 + ID 12 + PERSON 2 + DAT 2010-03-03 + VAL 6.40 + AVG 4.36 + AVG 7.35 + MSG point-09 + ID 13 + PERSON 3 + DAT 2010-03-04 + VAL 9.40 + AVG 5.70 + AVG 7.51 + MSG point-09 + ID 14 + PERSON 4 + DAT 2010-03-05 + VAL 12.40 + AVG 7.03 + AVG 7.86 + MSG point-09 + ID 15 + PERSON 5 + DAT 2010-03-06 + VAL 15.40 + AVG 8.36 + AVG 8.36 + MSG point-09 + ID 16 + PERSON 1 + DAT + VAL + AVG 3.03 + AVG + MSG point-10 + ID 1 + PERSON 1 + DAT 2010-01-03 + VAL 2.30 + AVG 3.03 + AVG 2.30 + MSG point-10 + ID 2 + PERSON 2 + DAT 2010-01-04 + VAL 4.30 + AVG 4.36 + AVG 3.30 + MSG point-10 + ID 3 + PERSON 3 + DAT 2010-01-05 + VAL 6.30 + AVG 5.70 + AVG 4.30 + MSG point-10 + ID 4 + PERSON 4 + DAT 2010-01-06 + VAL 8.30 + AVG 7.03 + AVG 5.30 + MSG point-10 + ID 5 + PERSON 5 + DAT 2010-01-07 + VAL 10.30 + AVG 8.36 + AVG 6.30 + MSG point-10 + ID 6 + PERSON 1 + DAT 2010-02-02 + VAL 3.40 + AVG 3.03 + AVG 5.81 + MSG point-10 + ID 7 + PERSON 2 + DAT 2010-02-03 + VAL 6.40 + AVG 4.36 + AVG 5.90 + MSG point-10 + ID 8 + PERSON 3 + DAT 2010-02-04 + VAL 9.40 + AVG 5.70 + AVG 6.33 + MSG point-10 + ID 9 + PERSON 4 + DAT 2010-02-05 + VAL 12.40 + AVG 7.03 + AVG 7.01 + MSG point-10 + ID 10 + PERSON 5 + DAT 2010-02-06 + VAL 15.40 + AVG 8.36 + AVG 7.85 + MSG point-10 + ID 11 + PERSON 1 + DAT 2010-03-02 + VAL 3.40 + AVG 3.03 + AVG 7.44 + MSG point-10 + ID 12 + PERSON 2 + DAT 2010-03-03 + VAL 6.40 + AVG 4.36 + AVG 7.35 + MSG point-10 + ID 13 + PERSON 3 + DAT 2010-03-04 + VAL 9.40 + AVG 5.70 + AVG 7.51 + MSG point-10 + ID 14 + PERSON 4 + DAT 2010-03-05 + VAL 12.40 + AVG 7.03 + AVG 7.86 + MSG point-10 + ID 15 + PERSON 5 + DAT 2010-03-06 + VAL 15.40 + AVG 8.36 + AVG 8.36 + MSG point-10 + ID 16 + PERSON 1 + DAT + VAL + AVG 3.03 + AVG 8.36 + MSG point-11 + ID 1 + PERSON 1 + DAT 2010-01-03 + VAL 2.30 + COUNT 3 + COUNT 4 + COUNT 0 + MSG point-11 + ID 2 + PERSON 2 + DAT 2010-01-04 + VAL 4.30 + COUNT 6 + COUNT 7 + COUNT 0 + MSG point-11 + ID 3 + PERSON 3 + DAT 2010-01-05 + VAL 6.30 + COUNT 9 + COUNT 10 + COUNT 0 + MSG point-11 + ID 4 + PERSON 4 + DAT 2010-01-06 + VAL 8.30 + COUNT 12 + COUNT 13 + COUNT 0 + MSG point-11 + ID 5 + PERSON 5 + DAT 2010-01-07 + VAL 10.30 + COUNT 15 + COUNT 16 + COUNT 0 + MSG point-11 + ID 6 + PERSON 1 + DAT 2010-02-02 + VAL 3.40 + COUNT 3 + COUNT 4 + COUNT 0 + MSG point-11 + ID 7 + PERSON 2 + DAT 2010-02-03 + VAL 6.40 + COUNT 6 + COUNT 7 + COUNT 0 + MSG point-11 + ID 8 + PERSON 3 + DAT 2010-02-04 + VAL 9.40 + COUNT 9 + COUNT 10 + COUNT 0 + MSG point-11 + ID 9 + PERSON 4 + DAT 2010-02-05 + VAL 12.40 + COUNT 12 + COUNT 13 + COUNT 0 + MSG point-11 + ID 10 + PERSON 5 + DAT 2010-02-06 + VAL 15.40 + COUNT 15 + COUNT 16 + COUNT 0 + MSG point-11 + ID 11 + PERSON 1 + DAT 2010-03-02 + VAL 3.40 + COUNT 3 + COUNT 4 + COUNT 0 + MSG point-11 + ID 12 + PERSON 2 + DAT 2010-03-03 + VAL 6.40 + COUNT 6 + COUNT 7 + COUNT 0 + MSG point-11 + ID 13 + PERSON 3 + DAT 2010-03-04 + VAL 9.40 + COUNT 9 + COUNT 10 + COUNT 0 + MSG point-11 + ID 14 + PERSON 4 + DAT 2010-03-05 + VAL 12.40 + COUNT 12 + COUNT 13 + COUNT 0 + MSG point-11 + ID 15 + PERSON 5 + DAT 2010-03-06 + VAL 15.40 + COUNT 15 + COUNT 16 + COUNT 0 + MSG point-11 + ID 16 + PERSON 1 + DAT + VAL + COUNT 3 + COUNT 4 + COUNT 0 + MSG point-12 + ID 1 + PERSON 1 + DAT 2010-01-03 + VAL 2.30 + COUNT 3 + COUNT 1 + COUNT 4 + COUNT 1 + COUNT 0 + COUNT 0 + MSG point-12 + ID 2 + PERSON 2 + DAT 2010-01-04 + VAL 4.30 + COUNT 6 + COUNT 2 + COUNT 7 + COUNT 2 + COUNT 0 + COUNT 0 + MSG point-12 + ID 3 + PERSON 3 + DAT 2010-01-05 + VAL 6.30 + COUNT 9 + COUNT 3 + COUNT 10 + COUNT 3 + COUNT 0 + COUNT 0 + MSG point-12 + ID 4 + PERSON 4 + DAT 2010-01-06 + VAL 8.30 + COUNT 12 + COUNT 4 + COUNT 13 + COUNT 4 + COUNT 0 + COUNT 0 + MSG point-12 + ID 5 + PERSON 5 + DAT 2010-01-07 + VAL 10.30 + COUNT 15 + COUNT 5 + COUNT 16 + COUNT 5 + COUNT 0 + COUNT 0 + MSG point-12 + ID 6 + PERSON 1 + DAT 2010-02-02 + VAL 3.40 + COUNT 3 + COUNT 6 + COUNT 4 + COUNT 6 + COUNT 0 + COUNT 0 + MSG point-12 + ID 7 + PERSON 2 + DAT 2010-02-03 + VAL 6.40 + COUNT 6 + COUNT 7 + COUNT 7 + COUNT 7 + COUNT 0 + COUNT 0 + MSG point-12 + ID 8 + PERSON 3 + DAT 2010-02-04 + VAL 9.40 + COUNT 9 + COUNT 8 + COUNT 10 + COUNT 8 + COUNT 0 + COUNT 0 + MSG point-12 + ID 9 + PERSON 4 + DAT 2010-02-05 + VAL 12.40 + COUNT 12 + COUNT 9 + COUNT 13 + COUNT 9 + COUNT 0 + COUNT 0 + MSG point-12 + ID 10 + PERSON 5 + DAT 2010-02-06 + VAL 15.40 + COUNT 15 + COUNT 10 + COUNT 16 + COUNT 10 + COUNT 0 + COUNT 0 + MSG point-12 + ID 11 + PERSON 1 + DAT 2010-03-02 + VAL 3.40 + COUNT 3 + COUNT 11 + COUNT 4 + COUNT 11 + COUNT 0 + COUNT 0 + MSG point-12 + ID 12 + PERSON 2 + DAT 2010-03-03 + VAL 6.40 + COUNT 6 + COUNT 12 + COUNT 7 + COUNT 12 + COUNT 0 + COUNT 0 + MSG point-12 + ID 13 + PERSON 3 + DAT 2010-03-04 + VAL 9.40 + COUNT 9 + COUNT 13 + COUNT 10 + COUNT 13 + COUNT 0 + COUNT 0 + MSG point-12 + ID 14 + PERSON 4 + DAT 2010-03-05 + VAL 12.40 + COUNT 12 + COUNT 14 + COUNT 13 + COUNT 14 + COUNT 0 + COUNT 0 + MSG point-12 + ID 15 + PERSON 5 + DAT 2010-03-06 + VAL 15.40 + COUNT 15 + COUNT 15 + COUNT 16 + COUNT 15 + COUNT 0 + COUNT 0 + MSG point-12 + ID 16 + PERSON 1 + DAT + VAL + COUNT 3 + COUNT 15 + COUNT 4 + COUNT 16 + COUNT 0 + COUNT 0 + MSG point-13 + ID 1 + PERSON 1 + DAT 2010-01-03 + VAL 2.30 + SUM 2.30 + SUM 2.30 + COUNT 4 + COUNT 1 + SUM 1 + SUM 1 + MSG point-13 + ID 2 + PERSON 2 + DAT 2010-01-04 + VAL 4.30 + SUM 4.30 + SUM 4.30 + COUNT 3 + COUNT 1 + SUM 2 + SUM 2 + MSG point-13 + ID 3 + PERSON 3 + DAT 2010-01-05 + VAL 6.30 + SUM 6.30 + SUM 6.30 + COUNT 3 + COUNT 1 + SUM 3 + SUM 3 + MSG point-13 + ID 4 + PERSON 4 + DAT 2010-01-06 + VAL 8.30 + SUM 8.30 + SUM 8.30 + COUNT 3 + COUNT 1 + SUM 4 + SUM 4 + MSG point-13 + ID 5 + PERSON 5 + DAT 2010-01-07 + VAL 10.30 + SUM 10.30 + SUM 10.30 + COUNT 3 + COUNT 1 + SUM 5 + SUM 5 + MSG point-13 + ID 6 + PERSON 1 + DAT 2010-02-02 + VAL 3.40 + SUM 3.40 + SUM 3.40 + COUNT 4 + COUNT 2 + SUM 6 + SUM 6 + MSG point-13 + ID 7 + PERSON 2 + DAT 2010-02-03 + VAL 6.40 + SUM 6.40 + SUM 6.40 + COUNT 3 + COUNT 2 + SUM 7 + SUM 7 + MSG point-13 + ID 8 + PERSON 3 + DAT 2010-02-04 + VAL 9.40 + SUM 9.40 + SUM 9.40 + COUNT 3 + COUNT 2 + SUM 8 + SUM 8 + MSG point-13 + ID 9 + PERSON 4 + DAT 2010-02-05 + VAL 12.40 + SUM 12.40 + SUM 12.40 + COUNT 3 + COUNT 2 + SUM 9 + SUM 9 + MSG point-13 + ID 10 + PERSON 5 + DAT 2010-02-06 + VAL 15.40 + SUM 15.40 + SUM 15.40 + COUNT 3 + COUNT 2 + SUM 10 + SUM 10 + MSG point-13 + ID 11 + PERSON 1 + DAT 2010-03-02 + VAL 3.40 + SUM 3.40 + SUM 3.40 + COUNT 4 + COUNT 3 + SUM 11 + SUM 11 + MSG point-13 + ID 12 + PERSON 2 + DAT 2010-03-03 + VAL 6.40 + SUM 6.40 + SUM 6.40 + COUNT 3 + COUNT 3 + SUM 12 + SUM 12 + MSG point-13 + ID 13 + PERSON 3 + DAT 2010-03-04 + VAL 9.40 + SUM 9.40 + SUM 9.40 + COUNT 3 + COUNT 3 + SUM 13 + SUM 13 + MSG point-13 + ID 14 + PERSON 4 + DAT 2010-03-05 + VAL 12.40 + SUM 12.40 + SUM 12.40 + COUNT 3 + COUNT 3 + SUM 14 + SUM 14 + MSG point-13 + ID 15 + PERSON 5 + DAT 2010-03-06 + VAL 15.40 + SUM 15.40 + SUM 15.40 + COUNT 3 + COUNT 3 + SUM 15 + SUM 15 + MSG point-13 + ID 16 + PERSON 1 + DAT + VAL + SUM + SUM + COUNT 4 + COUNT 4 + SUM 16 + SUM 16 + MSG point-14 + ID 1 + PERSON 1 + DAT 2010-01-03 + VAL 2.30 + SUM 31.50 + SUM 15 + SUM 125.50 + SUM 120 + SUM 15.10 + SUM 20 + MSG point-14 + ID 2 + PERSON 2 + DAT 2010-01-04 + VAL 4.30 + SUM 31.50 + SUM 15 + SUM 125.50 + SUM 120 + SUM 23.10 + SUM 23 + MSG point-14 + ID 3 + PERSON 3 + DAT 2010-01-05 + VAL 6.30 + SUM 31.50 + SUM 15 + SUM 125.50 + SUM 120 + SUM 31.10 + SUM 26 + MSG point-14 + ID 4 + PERSON 4 + DAT 2010-01-06 + VAL 8.30 + SUM 31.50 + SUM 15 + SUM 125.50 + SUM 120 + SUM 39.10 + SUM 29 + MSG point-14 + ID 5 + PERSON 5 + DAT 2010-01-07 + VAL 10.30 + SUM 31.50 + SUM 15 + SUM 125.50 + SUM 120 + SUM 10.30 + SUM 5 + MSG point-14 + ID 6 + PERSON 1 + DAT 2010-02-02 + VAL 3.40 + SUM 47.00 + SUM 40 + SUM 125.50 + SUM 120 + SUM 6.80 + SUM 17 + MSG point-14 + ID 7 + PERSON 2 + DAT 2010-02-03 + VAL 6.40 + SUM 47.00 + SUM 40 + SUM 125.50 + SUM 120 + SUM 15.10 + SUM 20 + MSG point-14 + ID 8 + PERSON 3 + DAT 2010-02-04 + VAL 9.40 + SUM 47.00 + SUM 40 + SUM 125.50 + SUM 120 + SUM 23.10 + SUM 23 + MSG point-14 + ID 9 + PERSON 4 + DAT 2010-02-05 + VAL 12.40 + SUM 47.00 + SUM 40 + SUM 125.50 + SUM 120 + SUM 31.10 + SUM 26 + MSG point-14 + ID 10 + PERSON 5 + DAT 2010-02-06 + VAL 15.40 + SUM 47.00 + SUM 40 + SUM 125.50 + SUM 120 + SUM 39.10 + SUM 29 + MSG point-14 + ID 11 + PERSON 1 + DAT 2010-03-02 + VAL 3.40 + SUM 47.00 + SUM 65 + SUM 125.50 + SUM 120 + SUM 6.80 + SUM 17 + MSG point-14 + ID 12 + PERSON 2 + DAT 2010-03-03 + VAL 6.40 + SUM 47.00 + SUM 65 + SUM 125.50 + SUM 120 + SUM 15.10 + SUM 20 + MSG point-14 + ID 13 + PERSON 3 + DAT 2010-03-04 + VAL 9.40 + SUM 47.00 + SUM 65 + SUM 125.50 + SUM 120 + SUM 23.10 + SUM 23 + MSG point-14 + ID 14 + PERSON 4 + DAT 2010-03-05 + VAL 12.40 + SUM 47.00 + SUM 65 + SUM 125.50 + SUM 120 + SUM 31.10 + SUM 26 + MSG point-14 + ID 15 + PERSON 5 + DAT 2010-03-06 + VAL 15.40 + SUM 47.00 + SUM 65 + SUM 125.50 + SUM 120 + SUM 39.10 + SUM 29 + MSG point-14 + ID 16 + PERSON 1 + DAT + VAL + SUM + SUM 16 + SUM + SUM 16 + SUM + SUM 16 + MSG point-15 + ID 1 + PERSON 1 + DAT 2010-01-03 + VAL 2.30 + MIN 1 + MAX 5 + MIN 2.30 + MAX 10.30 + MSG point-15 + ID 2 + PERSON 2 + DAT 2010-01-04 + VAL 4.30 + MIN 1 + MAX 5 + MIN 2.30 + MAX 10.30 + MSG point-15 + ID 3 + PERSON 3 + DAT 2010-01-05 + VAL 6.30 + MIN 1 + MAX 5 + MIN 2.30 + MAX 10.30 + MSG point-15 + ID 4 + PERSON 4 + DAT 2010-01-06 + VAL 8.30 + MIN 1 + MAX 5 + MIN 2.30 + MAX 10.30 + MSG point-15 + ID 5 + PERSON 5 + DAT 2010-01-07 + VAL 10.30 + MIN 1 + MAX 5 + MIN 2.30 + MAX 10.30 + MSG point-15 + ID 6 + PERSON 1 + DAT 2010-02-02 + VAL 3.40 + MIN 6 + MAX 10 + MIN 3.40 + MAX 15.40 + MSG point-15 + ID 7 + PERSON 2 + DAT 2010-02-03 + VAL 6.40 + MIN 6 + MAX 10 + MIN 3.40 + MAX 15.40 + MSG point-15 + ID 8 + PERSON 3 + DAT 2010-02-04 + VAL 9.40 + MIN 6 + MAX 10 + MIN 3.40 + MAX 15.40 + MSG point-15 + ID 9 + PERSON 4 + DAT 2010-02-05 + VAL 12.40 + MIN 6 + MAX 10 + MIN 3.40 + MAX 15.40 + MSG point-15 + ID 10 + PERSON 5 + DAT 2010-02-06 + VAL 15.40 + MIN 6 + MAX 10 + MIN 3.40 + MAX 15.40 + MSG point-15 + ID 11 + PERSON 1 + DAT 2010-03-02 + VAL 3.40 + MIN 11 + MAX 15 + MIN 3.40 + MAX 15.40 + MSG point-15 + ID 12 + PERSON 2 + DAT 2010-03-03 + VAL 6.40 + MIN 11 + MAX 15 + MIN 3.40 + MAX 15.40 + MSG point-15 + ID 13 + PERSON 3 + DAT 2010-03-04 + VAL 9.40 + MIN 11 + MAX 15 + MIN 3.40 + MAX 15.40 + MSG point-15 + ID 14 + PERSON 4 + DAT 2010-03-05 + VAL 12.40 + MIN 11 + MAX 15 + MIN 3.40 + MAX 15.40 + MSG point-15 + ID 15 + PERSON 5 + DAT 2010-03-06 + VAL 15.40 + MIN 11 + MAX 15 + MIN 3.40 + MAX 15.40 + MSG point-15 + ID 16 + PERSON 1 + DAT + VAL + MIN 16 + MAX 16 + MIN + MAX +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=3.0') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import sys # import subprocess -# +# # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password -# +# # db_conn.close() -# +# # with open( os.path.join(context['files_location'],'gtcs-window-func.sql'), 'r') as f: # sql_init = f.read() -# +# # sql_addi=''' # set list on; -# +# # select # 'point-01' as msg, # p.*, # sum(1) over (order by id) # from persons p # order by id; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# +# # select # 'point-02' as msg, # p.*, # sum(1) over (order by id desc) # from persons p # order by id; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# +# # select # 'point-03' as msg, # p.*, # sum(1) over (order by id) # from persons p # order by id; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# +# # select # 'point-04' as msg, # p.*, # sum(1) over (order by id) # from persons p # order by id desc; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# +# # select # 'point-05' as msg, # p.*, # sum(1) over (order by id desc) # from persons p # order by id desc; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# +# # select # 'point-06' as msg, # p.*, # sum(1) over (order by id desc) s # from persons p # order by s; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# +# # select # 'point-07' as msg, # p.*, # sum(id) over (order by id) # from persons p; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# +# # select # 'point-08' as msg, # p.*, # sum(mod(id, 2)) over (order by id) # from persons p; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# +# # select # 'point-09' as msg, # e.*, @@ -123,9 +1304,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # avg(val) over (order by dat nulls first) # from entries e # order by id; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# +# # select # 'point-10' as msg, # e.*, @@ -133,9 +1314,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # avg(val) over (order by dat nulls last) # from entries e # order by id; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# +# # select # 'point-11' as msg, # e.*, @@ -144,9 +1325,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # count(null) over (order by person) # from entries e # order by id; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# +# # select # 'point-12' as msg, # e.*, @@ -158,9 +1339,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # count(null) over (order by id) # from entries e # order by id; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# +# # select # 'point-13' as msg, # e.*, @@ -172,9 +1353,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # sum(id) over (partition by dat order by id) # from entries e # order by id; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# +# # select # 'point-14' as msg, # e.*, @@ -186,9 +1367,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # sum(id) over (partition by extract(day from dat)) # from entries e # order by id; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# +# # select # 'point-15' as msg, # e.*, @@ -199,1202 +1380,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # from entries e # order by id; # ''' -# +# # runProgram('isql', [ dsn], os.linesep.join( (sql_init, sql_addi) ) ) -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - MSG point-01 - ID 1 - NAME Person 1 - SUM 1 - MSG point-01 - ID 2 - NAME Person 2 - SUM 2 - MSG point-01 - ID 3 - NAME Person 3 - SUM 3 - MSG point-01 - ID 4 - NAME Person 4 - SUM 4 - MSG point-01 - ID 5 - NAME Person 5 - SUM 5 - MSG point-02 - ID 1 - NAME Person 1 - SUM 5 - MSG point-02 - ID 2 - NAME Person 2 - SUM 4 - MSG point-02 - ID 3 - NAME Person 3 - SUM 3 - MSG point-02 - ID 4 - NAME Person 4 - SUM 2 - MSG point-02 - ID 5 - NAME Person 5 - SUM 1 - MSG point-03 - ID 1 - NAME Person 1 - SUM 1 - MSG point-03 - ID 2 - NAME Person 2 - SUM 2 - MSG point-03 - ID 3 - NAME Person 3 - SUM 3 - MSG point-03 - ID 4 - NAME Person 4 - SUM 4 - MSG point-03 - ID 5 - NAME Person 5 - SUM 5 - MSG point-04 - ID 5 - NAME Person 5 - SUM 5 - MSG point-04 - ID 4 - NAME Person 4 - SUM 4 - MSG point-04 - ID 3 - NAME Person 3 - SUM 3 - MSG point-04 - ID 2 - NAME Person 2 - SUM 2 - MSG point-04 - ID 1 - NAME Person 1 - SUM 1 - MSG point-05 - ID 5 - NAME Person 5 - SUM 1 - MSG point-05 - ID 4 - NAME Person 4 - SUM 2 - MSG point-05 - ID 3 - NAME Person 3 - SUM 3 - MSG point-05 - ID 2 - NAME Person 2 - SUM 4 - MSG point-05 - ID 1 - NAME Person 1 - SUM 5 - MSG point-06 - ID 5 - NAME Person 5 - S 1 - MSG point-06 - ID 4 - NAME Person 4 - S 2 - MSG point-06 - ID 3 - NAME Person 3 - S 3 - MSG point-06 - ID 2 - NAME Person 2 - S 4 - MSG point-06 - ID 1 - NAME Person 1 - S 5 - MSG point-07 - ID 1 - NAME Person 1 - SUM 1 - MSG point-07 - ID 2 - NAME Person 2 - SUM 3 - MSG point-07 - ID 3 - NAME Person 3 - SUM 6 - MSG point-07 - ID 4 - NAME Person 4 - SUM 10 - MSG point-07 - ID 5 - NAME Person 5 - SUM 15 - MSG point-08 - ID 1 - NAME Person 1 - SUM 1 - MSG point-08 - ID 2 - NAME Person 2 - SUM 1 - MSG point-08 - ID 3 - NAME Person 3 - SUM 2 - MSG point-08 - ID 4 - NAME Person 4 - SUM 2 - MSG point-08 - ID 5 - NAME Person 5 - SUM 3 - MSG point-09 - ID 1 - PERSON 1 - DAT 2010-01-03 - VAL 2.30 - AVG 3.03 - AVG 2.30 - MSG point-09 - ID 2 - PERSON 2 - DAT 2010-01-04 - VAL 4.30 - AVG 4.36 - AVG 3.30 - MSG point-09 - ID 3 - PERSON 3 - DAT 2010-01-05 - VAL 6.30 - AVG 5.70 - AVG 4.30 - MSG point-09 - ID 4 - PERSON 4 - DAT 2010-01-06 - VAL 8.30 - AVG 7.03 - AVG 5.30 - MSG point-09 - ID 5 - PERSON 5 - DAT 2010-01-07 - VAL 10.30 - AVG 8.36 - AVG 6.30 - MSG point-09 - ID 6 - PERSON 1 - DAT 2010-02-02 - VAL 3.40 - AVG 3.03 - AVG 5.81 - MSG point-09 - ID 7 - PERSON 2 - DAT 2010-02-03 - VAL 6.40 - AVG 4.36 - AVG 5.90 - MSG point-09 - ID 8 - PERSON 3 - DAT 2010-02-04 - VAL 9.40 - AVG 5.70 - AVG 6.33 - MSG point-09 - ID 9 - PERSON 4 - DAT 2010-02-05 - VAL 12.40 - AVG 7.03 - AVG 7.01 - MSG point-09 - ID 10 - PERSON 5 - DAT 2010-02-06 - VAL 15.40 - AVG 8.36 - AVG 7.85 - MSG point-09 - ID 11 - PERSON 1 - DAT 2010-03-02 - VAL 3.40 - AVG 3.03 - AVG 7.44 - MSG point-09 - ID 12 - PERSON 2 - DAT 2010-03-03 - VAL 6.40 - AVG 4.36 - AVG 7.35 - MSG point-09 - ID 13 - PERSON 3 - DAT 2010-03-04 - VAL 9.40 - AVG 5.70 - AVG 7.51 - MSG point-09 - ID 14 - PERSON 4 - DAT 2010-03-05 - VAL 12.40 - AVG 7.03 - AVG 7.86 - MSG point-09 - ID 15 - PERSON 5 - DAT 2010-03-06 - VAL 15.40 - AVG 8.36 - AVG 8.36 - MSG point-09 - ID 16 - PERSON 1 - DAT - VAL - AVG 3.03 - AVG - MSG point-10 - ID 1 - PERSON 1 - DAT 2010-01-03 - VAL 2.30 - AVG 3.03 - AVG 2.30 - MSG point-10 - ID 2 - PERSON 2 - DAT 2010-01-04 - VAL 4.30 - AVG 4.36 - AVG 3.30 - MSG point-10 - ID 3 - PERSON 3 - DAT 2010-01-05 - VAL 6.30 - AVG 5.70 - AVG 4.30 - MSG point-10 - ID 4 - PERSON 4 - DAT 2010-01-06 - VAL 8.30 - AVG 7.03 - AVG 5.30 - MSG point-10 - ID 5 - PERSON 5 - DAT 2010-01-07 - VAL 10.30 - AVG 8.36 - AVG 6.30 - MSG point-10 - ID 6 - PERSON 1 - DAT 2010-02-02 - VAL 3.40 - AVG 3.03 - AVG 5.81 - MSG point-10 - ID 7 - PERSON 2 - DAT 2010-02-03 - VAL 6.40 - AVG 4.36 - AVG 5.90 - MSG point-10 - ID 8 - PERSON 3 - DAT 2010-02-04 - VAL 9.40 - AVG 5.70 - AVG 6.33 - MSG point-10 - ID 9 - PERSON 4 - DAT 2010-02-05 - VAL 12.40 - AVG 7.03 - AVG 7.01 - MSG point-10 - ID 10 - PERSON 5 - DAT 2010-02-06 - VAL 15.40 - AVG 8.36 - AVG 7.85 - MSG point-10 - ID 11 - PERSON 1 - DAT 2010-03-02 - VAL 3.40 - AVG 3.03 - AVG 7.44 - MSG point-10 - ID 12 - PERSON 2 - DAT 2010-03-03 - VAL 6.40 - AVG 4.36 - AVG 7.35 - MSG point-10 - ID 13 - PERSON 3 - DAT 2010-03-04 - VAL 9.40 - AVG 5.70 - AVG 7.51 - MSG point-10 - ID 14 - PERSON 4 - DAT 2010-03-05 - VAL 12.40 - AVG 7.03 - AVG 7.86 - MSG point-10 - ID 15 - PERSON 5 - DAT 2010-03-06 - VAL 15.40 - AVG 8.36 - AVG 8.36 - MSG point-10 - ID 16 - PERSON 1 - DAT - VAL - AVG 3.03 - AVG 8.36 - MSG point-11 - ID 1 - PERSON 1 - DAT 2010-01-03 - VAL 2.30 - COUNT 3 - COUNT 4 - COUNT 0 - MSG point-11 - ID 2 - PERSON 2 - DAT 2010-01-04 - VAL 4.30 - COUNT 6 - COUNT 7 - COUNT 0 - MSG point-11 - ID 3 - PERSON 3 - DAT 2010-01-05 - VAL 6.30 - COUNT 9 - COUNT 10 - COUNT 0 - MSG point-11 - ID 4 - PERSON 4 - DAT 2010-01-06 - VAL 8.30 - COUNT 12 - COUNT 13 - COUNT 0 - MSG point-11 - ID 5 - PERSON 5 - DAT 2010-01-07 - VAL 10.30 - COUNT 15 - COUNT 16 - COUNT 0 - MSG point-11 - ID 6 - PERSON 1 - DAT 2010-02-02 - VAL 3.40 - COUNT 3 - COUNT 4 - COUNT 0 - MSG point-11 - ID 7 - PERSON 2 - DAT 2010-02-03 - VAL 6.40 - COUNT 6 - COUNT 7 - COUNT 0 - MSG point-11 - ID 8 - PERSON 3 - DAT 2010-02-04 - VAL 9.40 - COUNT 9 - COUNT 10 - COUNT 0 - MSG point-11 - ID 9 - PERSON 4 - DAT 2010-02-05 - VAL 12.40 - COUNT 12 - COUNT 13 - COUNT 0 - MSG point-11 - ID 10 - PERSON 5 - DAT 2010-02-06 - VAL 15.40 - COUNT 15 - COUNT 16 - COUNT 0 - MSG point-11 - ID 11 - PERSON 1 - DAT 2010-03-02 - VAL 3.40 - COUNT 3 - COUNT 4 - COUNT 0 - MSG point-11 - ID 12 - PERSON 2 - DAT 2010-03-03 - VAL 6.40 - COUNT 6 - COUNT 7 - COUNT 0 - MSG point-11 - ID 13 - PERSON 3 - DAT 2010-03-04 - VAL 9.40 - COUNT 9 - COUNT 10 - COUNT 0 - MSG point-11 - ID 14 - PERSON 4 - DAT 2010-03-05 - VAL 12.40 - COUNT 12 - COUNT 13 - COUNT 0 - MSG point-11 - ID 15 - PERSON 5 - DAT 2010-03-06 - VAL 15.40 - COUNT 15 - COUNT 16 - COUNT 0 - MSG point-11 - ID 16 - PERSON 1 - DAT - VAL - COUNT 3 - COUNT 4 - COUNT 0 - MSG point-12 - ID 1 - PERSON 1 - DAT 2010-01-03 - VAL 2.30 - COUNT 3 - COUNT 1 - COUNT 4 - COUNT 1 - COUNT 0 - COUNT 0 - MSG point-12 - ID 2 - PERSON 2 - DAT 2010-01-04 - VAL 4.30 - COUNT 6 - COUNT 2 - COUNT 7 - COUNT 2 - COUNT 0 - COUNT 0 - MSG point-12 - ID 3 - PERSON 3 - DAT 2010-01-05 - VAL 6.30 - COUNT 9 - COUNT 3 - COUNT 10 - COUNT 3 - COUNT 0 - COUNT 0 - MSG point-12 - ID 4 - PERSON 4 - DAT 2010-01-06 - VAL 8.30 - COUNT 12 - COUNT 4 - COUNT 13 - COUNT 4 - COUNT 0 - COUNT 0 - MSG point-12 - ID 5 - PERSON 5 - DAT 2010-01-07 - VAL 10.30 - COUNT 15 - COUNT 5 - COUNT 16 - COUNT 5 - COUNT 0 - COUNT 0 - MSG point-12 - ID 6 - PERSON 1 - DAT 2010-02-02 - VAL 3.40 - COUNT 3 - COUNT 6 - COUNT 4 - COUNT 6 - COUNT 0 - COUNT 0 - MSG point-12 - ID 7 - PERSON 2 - DAT 2010-02-03 - VAL 6.40 - COUNT 6 - COUNT 7 - COUNT 7 - COUNT 7 - COUNT 0 - COUNT 0 - MSG point-12 - ID 8 - PERSON 3 - DAT 2010-02-04 - VAL 9.40 - COUNT 9 - COUNT 8 - COUNT 10 - COUNT 8 - COUNT 0 - COUNT 0 - MSG point-12 - ID 9 - PERSON 4 - DAT 2010-02-05 - VAL 12.40 - COUNT 12 - COUNT 9 - COUNT 13 - COUNT 9 - COUNT 0 - COUNT 0 - MSG point-12 - ID 10 - PERSON 5 - DAT 2010-02-06 - VAL 15.40 - COUNT 15 - COUNT 10 - COUNT 16 - COUNT 10 - COUNT 0 - COUNT 0 - MSG point-12 - ID 11 - PERSON 1 - DAT 2010-03-02 - VAL 3.40 - COUNT 3 - COUNT 11 - COUNT 4 - COUNT 11 - COUNT 0 - COUNT 0 - MSG point-12 - ID 12 - PERSON 2 - DAT 2010-03-03 - VAL 6.40 - COUNT 6 - COUNT 12 - COUNT 7 - COUNT 12 - COUNT 0 - COUNT 0 - MSG point-12 - ID 13 - PERSON 3 - DAT 2010-03-04 - VAL 9.40 - COUNT 9 - COUNT 13 - COUNT 10 - COUNT 13 - COUNT 0 - COUNT 0 - MSG point-12 - ID 14 - PERSON 4 - DAT 2010-03-05 - VAL 12.40 - COUNT 12 - COUNT 14 - COUNT 13 - COUNT 14 - COUNT 0 - COUNT 0 - MSG point-12 - ID 15 - PERSON 5 - DAT 2010-03-06 - VAL 15.40 - COUNT 15 - COUNT 15 - COUNT 16 - COUNT 15 - COUNT 0 - COUNT 0 - MSG point-12 - ID 16 - PERSON 1 - DAT - VAL - COUNT 3 - COUNT 15 - COUNT 4 - COUNT 16 - COUNT 0 - COUNT 0 - MSG point-13 - ID 1 - PERSON 1 - DAT 2010-01-03 - VAL 2.30 - SUM 2.30 - SUM 2.30 - COUNT 4 - COUNT 1 - SUM 1 - SUM 1 - MSG point-13 - ID 2 - PERSON 2 - DAT 2010-01-04 - VAL 4.30 - SUM 4.30 - SUM 4.30 - COUNT 3 - COUNT 1 - SUM 2 - SUM 2 - MSG point-13 - ID 3 - PERSON 3 - DAT 2010-01-05 - VAL 6.30 - SUM 6.30 - SUM 6.30 - COUNT 3 - COUNT 1 - SUM 3 - SUM 3 - MSG point-13 - ID 4 - PERSON 4 - DAT 2010-01-06 - VAL 8.30 - SUM 8.30 - SUM 8.30 - COUNT 3 - COUNT 1 - SUM 4 - SUM 4 - MSG point-13 - ID 5 - PERSON 5 - DAT 2010-01-07 - VAL 10.30 - SUM 10.30 - SUM 10.30 - COUNT 3 - COUNT 1 - SUM 5 - SUM 5 - MSG point-13 - ID 6 - PERSON 1 - DAT 2010-02-02 - VAL 3.40 - SUM 3.40 - SUM 3.40 - COUNT 4 - COUNT 2 - SUM 6 - SUM 6 - MSG point-13 - ID 7 - PERSON 2 - DAT 2010-02-03 - VAL 6.40 - SUM 6.40 - SUM 6.40 - COUNT 3 - COUNT 2 - SUM 7 - SUM 7 - MSG point-13 - ID 8 - PERSON 3 - DAT 2010-02-04 - VAL 9.40 - SUM 9.40 - SUM 9.40 - COUNT 3 - COUNT 2 - SUM 8 - SUM 8 - MSG point-13 - ID 9 - PERSON 4 - DAT 2010-02-05 - VAL 12.40 - SUM 12.40 - SUM 12.40 - COUNT 3 - COUNT 2 - SUM 9 - SUM 9 - MSG point-13 - ID 10 - PERSON 5 - DAT 2010-02-06 - VAL 15.40 - SUM 15.40 - SUM 15.40 - COUNT 3 - COUNT 2 - SUM 10 - SUM 10 - MSG point-13 - ID 11 - PERSON 1 - DAT 2010-03-02 - VAL 3.40 - SUM 3.40 - SUM 3.40 - COUNT 4 - COUNT 3 - SUM 11 - SUM 11 - MSG point-13 - ID 12 - PERSON 2 - DAT 2010-03-03 - VAL 6.40 - SUM 6.40 - SUM 6.40 - COUNT 3 - COUNT 3 - SUM 12 - SUM 12 - MSG point-13 - ID 13 - PERSON 3 - DAT 2010-03-04 - VAL 9.40 - SUM 9.40 - SUM 9.40 - COUNT 3 - COUNT 3 - SUM 13 - SUM 13 - MSG point-13 - ID 14 - PERSON 4 - DAT 2010-03-05 - VAL 12.40 - SUM 12.40 - SUM 12.40 - COUNT 3 - COUNT 3 - SUM 14 - SUM 14 - MSG point-13 - ID 15 - PERSON 5 - DAT 2010-03-06 - VAL 15.40 - SUM 15.40 - SUM 15.40 - COUNT 3 - COUNT 3 - SUM 15 - SUM 15 - MSG point-13 - ID 16 - PERSON 1 - DAT - VAL - SUM - SUM - COUNT 4 - COUNT 4 - SUM 16 - SUM 16 - MSG point-14 - ID 1 - PERSON 1 - DAT 2010-01-03 - VAL 2.30 - SUM 31.50 - SUM 15 - SUM 125.50 - SUM 120 - SUM 15.10 - SUM 20 - MSG point-14 - ID 2 - PERSON 2 - DAT 2010-01-04 - VAL 4.30 - SUM 31.50 - SUM 15 - SUM 125.50 - SUM 120 - SUM 23.10 - SUM 23 - MSG point-14 - ID 3 - PERSON 3 - DAT 2010-01-05 - VAL 6.30 - SUM 31.50 - SUM 15 - SUM 125.50 - SUM 120 - SUM 31.10 - SUM 26 - MSG point-14 - ID 4 - PERSON 4 - DAT 2010-01-06 - VAL 8.30 - SUM 31.50 - SUM 15 - SUM 125.50 - SUM 120 - SUM 39.10 - SUM 29 - MSG point-14 - ID 5 - PERSON 5 - DAT 2010-01-07 - VAL 10.30 - SUM 31.50 - SUM 15 - SUM 125.50 - SUM 120 - SUM 10.30 - SUM 5 - MSG point-14 - ID 6 - PERSON 1 - DAT 2010-02-02 - VAL 3.40 - SUM 47.00 - SUM 40 - SUM 125.50 - SUM 120 - SUM 6.80 - SUM 17 - MSG point-14 - ID 7 - PERSON 2 - DAT 2010-02-03 - VAL 6.40 - SUM 47.00 - SUM 40 - SUM 125.50 - SUM 120 - SUM 15.10 - SUM 20 - MSG point-14 - ID 8 - PERSON 3 - DAT 2010-02-04 - VAL 9.40 - SUM 47.00 - SUM 40 - SUM 125.50 - SUM 120 - SUM 23.10 - SUM 23 - MSG point-14 - ID 9 - PERSON 4 - DAT 2010-02-05 - VAL 12.40 - SUM 47.00 - SUM 40 - SUM 125.50 - SUM 120 - SUM 31.10 - SUM 26 - MSG point-14 - ID 10 - PERSON 5 - DAT 2010-02-06 - VAL 15.40 - SUM 47.00 - SUM 40 - SUM 125.50 - SUM 120 - SUM 39.10 - SUM 29 - MSG point-14 - ID 11 - PERSON 1 - DAT 2010-03-02 - VAL 3.40 - SUM 47.00 - SUM 65 - SUM 125.50 - SUM 120 - SUM 6.80 - SUM 17 - MSG point-14 - ID 12 - PERSON 2 - DAT 2010-03-03 - VAL 6.40 - SUM 47.00 - SUM 65 - SUM 125.50 - SUM 120 - SUM 15.10 - SUM 20 - MSG point-14 - ID 13 - PERSON 3 - DAT 2010-03-04 - VAL 9.40 - SUM 47.00 - SUM 65 - SUM 125.50 - SUM 120 - SUM 23.10 - SUM 23 - MSG point-14 - ID 14 - PERSON 4 - DAT 2010-03-05 - VAL 12.40 - SUM 47.00 - SUM 65 - SUM 125.50 - SUM 120 - SUM 31.10 - SUM 26 - MSG point-14 - ID 15 - PERSON 5 - DAT 2010-03-06 - VAL 15.40 - SUM 47.00 - SUM 65 - SUM 125.50 - SUM 120 - SUM 39.10 - SUM 29 - MSG point-14 - ID 16 - PERSON 1 - DAT - VAL - SUM - SUM 16 - SUM - SUM 16 - SUM - SUM 16 - MSG point-15 - ID 1 - PERSON 1 - DAT 2010-01-03 - VAL 2.30 - MIN 1 - MAX 5 - MIN 2.30 - MAX 10.30 - MSG point-15 - ID 2 - PERSON 2 - DAT 2010-01-04 - VAL 4.30 - MIN 1 - MAX 5 - MIN 2.30 - MAX 10.30 - MSG point-15 - ID 3 - PERSON 3 - DAT 2010-01-05 - VAL 6.30 - MIN 1 - MAX 5 - MIN 2.30 - MAX 10.30 - MSG point-15 - ID 4 - PERSON 4 - DAT 2010-01-06 - VAL 8.30 - MIN 1 - MAX 5 - MIN 2.30 - MAX 10.30 - MSG point-15 - ID 5 - PERSON 5 - DAT 2010-01-07 - VAL 10.30 - MIN 1 - MAX 5 - MIN 2.30 - MAX 10.30 - MSG point-15 - ID 6 - PERSON 1 - DAT 2010-02-02 - VAL 3.40 - MIN 6 - MAX 10 - MIN 3.40 - MAX 15.40 - MSG point-15 - ID 7 - PERSON 2 - DAT 2010-02-03 - VAL 6.40 - MIN 6 - MAX 10 - MIN 3.40 - MAX 15.40 - MSG point-15 - ID 8 - PERSON 3 - DAT 2010-02-04 - VAL 9.40 - MIN 6 - MAX 10 - MIN 3.40 - MAX 15.40 - MSG point-15 - ID 9 - PERSON 4 - DAT 2010-02-05 - VAL 12.40 - MIN 6 - MAX 10 - MIN 3.40 - MAX 15.40 - MSG point-15 - ID 10 - PERSON 5 - DAT 2010-02-06 - VAL 15.40 - MIN 6 - MAX 10 - MIN 3.40 - MAX 15.40 - MSG point-15 - ID 11 - PERSON 1 - DAT 2010-03-02 - VAL 3.40 - MIN 11 - MAX 15 - MIN 3.40 - MAX 15.40 - MSG point-15 - ID 12 - PERSON 2 - DAT 2010-03-03 - VAL 6.40 - MIN 11 - MAX 15 - MIN 3.40 - MAX 15.40 - MSG point-15 - ID 13 - PERSON 3 - DAT 2010-03-04 - VAL 9.40 - MIN 11 - MAX 15 - MIN 3.40 - MAX 15.40 - MSG point-15 - ID 14 - PERSON 4 - DAT 2010-03-05 - VAL 12.40 - MIN 11 - MAX 15 - MIN 3.40 - MAX 15.40 - MSG point-15 - ID 15 - PERSON 5 - DAT 2010-03-06 - VAL 15.40 - MIN 11 - MAX 15 - MIN 3.40 - MAX 15.40 - MSG point-15 - ID 16 - PERSON 1 - DAT - VAL - MIN 16 - MAX 16 - MIN - MAX -""" - -@pytest.mark.version('>=3.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/gtcs/test_window_func_03.py b/tests/functional/gtcs/test_window_func_03.py index a8551014..fccc6c74 100644 --- a/tests/functional/gtcs/test_window_func_03.py +++ b/tests/functional/gtcs/test_window_func_03.py @@ -1,51 +1,490 @@ #coding:utf-8 -# -# id: functional.gtcs.window_func_03 -# title: GTCS/tests/FB_SQL_WINDOW_FUNC_03 - set of miscelaneous tests for verification of windowed functions. -# decription: -# Statements from this test are added to initial SQL which is stored in: ... -# bt-repo -# iles\\gtcs-window-func.sql -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/FB_SQL_WINDOW_FUNC_03.script -# -# Checked on 4.0.0.1854; 3.0.6.33277 -# -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: gtcs.window-func-03 +TITLE: Set of miscelaneous tests for verification of windowed functions +DESCRIPTION: + Statements from this test are added to initial SQL which is stored in: /files/gtcs-window-func.sql + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/FB_SQL_WINDOW_FUNC_03.script +FBTEST: functional.gtcs.window_func_03 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] +act = python_act('db', substitutions=[('[ \t]+', ' ')]) -init_script_1 = """""" +expected_stdout = """ + MSG point-01 + ID 1 + NAME Person 1 + DENSE_RANK 1 + DENSE_RANK 5 + RANK 1 + RANK 5 + ROW_NUMBER 1 + ROW_NUMBER 5 + MSG point-01 + ID 2 + NAME Person 2 + DENSE_RANK 2 + DENSE_RANK 4 + RANK 2 + RANK 4 + ROW_NUMBER 2 + ROW_NUMBER 4 + MSG point-01 + ID 3 + NAME Person 3 + DENSE_RANK 3 + DENSE_RANK 3 + RANK 3 + RANK 3 + ROW_NUMBER 3 + ROW_NUMBER 3 + MSG point-01 + ID 4 + NAME Person 4 + DENSE_RANK 4 + DENSE_RANK 2 + RANK 4 + RANK 2 + ROW_NUMBER 4 + ROW_NUMBER 2 + MSG point-01 + ID 5 + NAME Person 5 + DENSE_RANK 5 + DENSE_RANK 1 + RANK 5 + RANK 1 + ROW_NUMBER 5 + ROW_NUMBER 1 + MSG point-02 + ID 1 + PERSON 1 + DAT 2010-01-03 + VAL 2.30 + DENSE_RANK 2 + RANK 2 + ROW_NUMBER 2 + MSG point-02 + ID 2 + PERSON 2 + DAT 2010-01-04 + VAL 4.30 + DENSE_RANK 4 + RANK 5 + ROW_NUMBER 5 + MSG point-02 + ID 3 + PERSON 3 + DAT 2010-01-05 + VAL 6.30 + DENSE_RANK 5 + RANK 6 + ROW_NUMBER 6 + MSG point-02 + ID 4 + PERSON 4 + DAT 2010-01-06 + VAL 8.30 + DENSE_RANK 7 + RANK 9 + ROW_NUMBER 9 + MSG point-02 + ID 5 + PERSON 5 + DAT 2010-01-07 + VAL 10.30 + DENSE_RANK 9 + RANK 12 + ROW_NUMBER 12 + MSG point-02 + ID 6 + PERSON 1 + DAT 2010-02-02 + VAL 3.40 + DENSE_RANK 3 + RANK 3 + ROW_NUMBER 3 + MSG point-02 + ID 7 + PERSON 2 + DAT 2010-02-03 + VAL 6.40 + DENSE_RANK 6 + RANK 7 + ROW_NUMBER 7 + MSG point-02 + ID 8 + PERSON 3 + DAT 2010-02-04 + VAL 9.40 + DENSE_RANK 8 + RANK 10 + ROW_NUMBER 10 + MSG point-02 + ID 9 + PERSON 4 + DAT 2010-02-05 + VAL 12.40 + DENSE_RANK 10 + RANK 13 + ROW_NUMBER 13 + MSG point-02 + ID 10 + PERSON 5 + DAT 2010-02-06 + VAL 15.40 + DENSE_RANK 11 + RANK 15 + ROW_NUMBER 15 + MSG point-02 + ID 11 + PERSON 1 + DAT 2010-03-02 + VAL 3.40 + DENSE_RANK 3 + RANK 3 + ROW_NUMBER 4 + MSG point-02 + ID 12 + PERSON 2 + DAT 2010-03-03 + VAL 6.40 + DENSE_RANK 6 + RANK 7 + ROW_NUMBER 8 + MSG point-02 + ID 13 + PERSON 3 + DAT 2010-03-04 + VAL 9.40 + DENSE_RANK 8 + RANK 10 + ROW_NUMBER 11 + MSG point-02 + ID 14 + PERSON 4 + DAT 2010-03-05 + VAL 12.40 + DENSE_RANK 10 + RANK 13 + ROW_NUMBER 14 + MSG point-02 + ID 15 + PERSON 5 + DAT 2010-03-06 + VAL 15.40 + DENSE_RANK 11 + RANK 15 + ROW_NUMBER 16 + MSG point-02 + ID 16 + PERSON 1 + DAT + VAL + DENSE_RANK 1 + RANK 1 + ROW_NUMBER 1 + MSG point-03 + ID 1 + PERSON 1 + DAT 2010-01-03 + VAL 2.30 + DENSE_RANK 2 + RANK 2 + ROW_NUMBER 2 + MSG point-03 + ID 2 + PERSON 2 + DAT 2010-01-04 + VAL 4.30 + DENSE_RANK 1 + RANK 1 + ROW_NUMBER 1 + MSG point-03 + ID 3 + PERSON 3 + DAT 2010-01-05 + VAL 6.30 + DENSE_RANK 1 + RANK 1 + ROW_NUMBER 1 + MSG point-03 + ID 4 + PERSON 4 + DAT 2010-01-06 + VAL 8.30 + DENSE_RANK 1 + RANK 1 + ROW_NUMBER 1 + MSG point-03 + ID 5 + PERSON 5 + DAT 2010-01-07 + VAL 10.30 + DENSE_RANK 1 + RANK 1 + ROW_NUMBER 1 + MSG point-03 + ID 6 + PERSON 1 + DAT 2010-02-02 + VAL 3.40 + DENSE_RANK 3 + RANK 3 + ROW_NUMBER 3 + MSG point-03 + ID 7 + PERSON 2 + DAT 2010-02-03 + VAL 6.40 + DENSE_RANK 2 + RANK 2 + ROW_NUMBER 2 + MSG point-03 + ID 8 + PERSON 3 + DAT 2010-02-04 + VAL 9.40 + DENSE_RANK 2 + RANK 2 + ROW_NUMBER 2 + MSG point-03 + ID 9 + PERSON 4 + DAT 2010-02-05 + VAL 12.40 + DENSE_RANK 2 + RANK 2 + ROW_NUMBER 2 + MSG point-03 + ID 10 + PERSON 5 + DAT 2010-02-06 + VAL 15.40 + DENSE_RANK 2 + RANK 2 + ROW_NUMBER 2 + MSG point-03 + ID 11 + PERSON 1 + DAT 2010-03-02 + VAL 3.40 + DENSE_RANK 4 + RANK 4 + ROW_NUMBER 4 + MSG point-03 + ID 12 + PERSON 2 + DAT 2010-03-03 + VAL 6.40 + DENSE_RANK 3 + RANK 3 + ROW_NUMBER 3 + MSG point-03 + ID 13 + PERSON 3 + DAT 2010-03-04 + VAL 9.40 + DENSE_RANK 3 + RANK 3 + ROW_NUMBER 3 + MSG point-03 + ID 14 + PERSON 4 + DAT 2010-03-05 + VAL 12.40 + DENSE_RANK 3 + RANK 3 + ROW_NUMBER 3 + MSG point-03 + ID 15 + PERSON 5 + DAT 2010-03-06 + VAL 15.40 + DENSE_RANK 3 + RANK 3 + ROW_NUMBER 3 + MSG point-03 + ID 16 + PERSON 1 + DAT + VAL + DENSE_RANK 1 + RANK 1 + ROW_NUMBER 1 + MSG point-04 + PERSON 1 + SUM 4 + SUM 4 + SUM 10 + MSG point-04 + PERSON 2 + SUM 3 + SUM 3 + SUM 6 + MSG point-04 + PERSON 3 + SUM 3 + SUM 3 + SUM 6 + MSG point-04 + PERSON 4 + SUM 3 + SUM 3 + SUM 6 + MSG point-04 + PERSON 5 + SUM 3 + SUM 3 + SUM 6 + MSG point-05 + SUM 16 + SUM 16 + SUM 34 + MSG point-06 + ID 1 + PERSON 1 + DAT 2010-01-03 + VAL 2.30 + SRN 1 + ROW_NUMBER 1 + MSG point-06 + ID 2 + PERSON 2 + DAT 2010-01-04 + VAL 4.30 + SRN 1 + ROW_NUMBER 2 + MSG point-06 + ID 3 + PERSON 3 + DAT 2010-01-05 + VAL 6.30 + SRN 1 + ROW_NUMBER 3 + MSG point-06 + ID 4 + PERSON 4 + DAT 2010-01-06 + VAL 8.30 + SRN 1 + ROW_NUMBER 4 + MSG point-06 + ID 5 + PERSON 5 + DAT 2010-01-07 + VAL 10.30 + SRN 1 + ROW_NUMBER 5 + MSG point-06 + ID 6 + PERSON 1 + DAT 2010-02-02 + VAL 3.40 + SRN 2 + ROW_NUMBER 6 + MSG point-06 + ID 7 + PERSON 2 + DAT 2010-02-03 + VAL 6.40 + SRN 2 + ROW_NUMBER 7 + MSG point-06 + ID 8 + PERSON 3 + DAT 2010-02-04 + VAL 9.40 + SRN 2 + ROW_NUMBER 8 + MSG point-06 + ID 9 + PERSON 4 + DAT 2010-02-05 + VAL 12.40 + SRN 2 + ROW_NUMBER 9 + MSG point-06 + ID 10 + PERSON 5 + DAT 2010-02-06 + VAL 15.40 + SRN 2 + ROW_NUMBER 10 + MSG point-06 + ID 11 + PERSON 1 + DAT 2010-03-02 + VAL 3.40 + SRN 3 + ROW_NUMBER 11 + MSG point-06 + ID 12 + PERSON 2 + DAT 2010-03-03 + VAL 6.40 + SRN 3 + ROW_NUMBER 12 + MSG point-06 + ID 13 + PERSON 3 + DAT 2010-03-04 + VAL 9.40 + SRN 3 + ROW_NUMBER 13 + MSG point-06 + ID 14 + PERSON 4 + DAT 2010-03-05 + VAL 12.40 + SRN 3 + ROW_NUMBER 14 + MSG point-06 + ID 15 + PERSON 5 + DAT 2010-03-06 + VAL 15.40 + SRN 3 + ROW_NUMBER 15 + MSG point-06 + ID 16 + PERSON 1 + DAT + VAL + SRN 4 + ROW_NUMBER 16 +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=3.0') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import sys # import subprocess -# +# # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password -# +# # db_conn.close() -# +# # with open( os.path.join(context['files_location'],'gtcs-window-func.sql'), 'r') as f: # sql_init = f.read() -# +# # sql_addi=''' # set list on; -# +# # select # 'point-01' as msg, # p.*, @@ -57,9 +496,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # row_number() over (order by id desc) # from persons p # order by id; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# +# # select # 'point-02' as msg, # e.*, @@ -68,9 +507,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # row_number() over (order by val nulls first) # from entries e # order by id; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# +# # select # 'point-03' as msg, # e.*, @@ -79,9 +518,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # row_number() over (partition by person order by val nulls first, dat nulls first) # from entries e # order by id; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# +# # select # 'point-04' as msg, # person, @@ -99,9 +538,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # group by # person # order by person; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# +# # select # 'point-05' as msg, # sum(dr), @@ -115,9 +554,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # row_number() over (partition by person) rn # from entries e # ) x; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# +# # select # 'point-06' as msg, # id, @@ -139,460 +578,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # val # order by id; # ''' -# +# # runProgram('isql', [ dsn], os.linesep.join( (sql_init, sql_addi) ) ) -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - MSG point-01 - ID 1 - NAME Person 1 - DENSE_RANK 1 - DENSE_RANK 5 - RANK 1 - RANK 5 - ROW_NUMBER 1 - ROW_NUMBER 5 - MSG point-01 - ID 2 - NAME Person 2 - DENSE_RANK 2 - DENSE_RANK 4 - RANK 2 - RANK 4 - ROW_NUMBER 2 - ROW_NUMBER 4 - MSG point-01 - ID 3 - NAME Person 3 - DENSE_RANK 3 - DENSE_RANK 3 - RANK 3 - RANK 3 - ROW_NUMBER 3 - ROW_NUMBER 3 - MSG point-01 - ID 4 - NAME Person 4 - DENSE_RANK 4 - DENSE_RANK 2 - RANK 4 - RANK 2 - ROW_NUMBER 4 - ROW_NUMBER 2 - MSG point-01 - ID 5 - NAME Person 5 - DENSE_RANK 5 - DENSE_RANK 1 - RANK 5 - RANK 1 - ROW_NUMBER 5 - ROW_NUMBER 1 - MSG point-02 - ID 1 - PERSON 1 - DAT 2010-01-03 - VAL 2.30 - DENSE_RANK 2 - RANK 2 - ROW_NUMBER 2 - MSG point-02 - ID 2 - PERSON 2 - DAT 2010-01-04 - VAL 4.30 - DENSE_RANK 4 - RANK 5 - ROW_NUMBER 5 - MSG point-02 - ID 3 - PERSON 3 - DAT 2010-01-05 - VAL 6.30 - DENSE_RANK 5 - RANK 6 - ROW_NUMBER 6 - MSG point-02 - ID 4 - PERSON 4 - DAT 2010-01-06 - VAL 8.30 - DENSE_RANK 7 - RANK 9 - ROW_NUMBER 9 - MSG point-02 - ID 5 - PERSON 5 - DAT 2010-01-07 - VAL 10.30 - DENSE_RANK 9 - RANK 12 - ROW_NUMBER 12 - MSG point-02 - ID 6 - PERSON 1 - DAT 2010-02-02 - VAL 3.40 - DENSE_RANK 3 - RANK 3 - ROW_NUMBER 3 - MSG point-02 - ID 7 - PERSON 2 - DAT 2010-02-03 - VAL 6.40 - DENSE_RANK 6 - RANK 7 - ROW_NUMBER 7 - MSG point-02 - ID 8 - PERSON 3 - DAT 2010-02-04 - VAL 9.40 - DENSE_RANK 8 - RANK 10 - ROW_NUMBER 10 - MSG point-02 - ID 9 - PERSON 4 - DAT 2010-02-05 - VAL 12.40 - DENSE_RANK 10 - RANK 13 - ROW_NUMBER 13 - MSG point-02 - ID 10 - PERSON 5 - DAT 2010-02-06 - VAL 15.40 - DENSE_RANK 11 - RANK 15 - ROW_NUMBER 15 - MSG point-02 - ID 11 - PERSON 1 - DAT 2010-03-02 - VAL 3.40 - DENSE_RANK 3 - RANK 3 - ROW_NUMBER 4 - MSG point-02 - ID 12 - PERSON 2 - DAT 2010-03-03 - VAL 6.40 - DENSE_RANK 6 - RANK 7 - ROW_NUMBER 8 - MSG point-02 - ID 13 - PERSON 3 - DAT 2010-03-04 - VAL 9.40 - DENSE_RANK 8 - RANK 10 - ROW_NUMBER 11 - MSG point-02 - ID 14 - PERSON 4 - DAT 2010-03-05 - VAL 12.40 - DENSE_RANK 10 - RANK 13 - ROW_NUMBER 14 - MSG point-02 - ID 15 - PERSON 5 - DAT 2010-03-06 - VAL 15.40 - DENSE_RANK 11 - RANK 15 - ROW_NUMBER 16 - MSG point-02 - ID 16 - PERSON 1 - DAT - VAL - DENSE_RANK 1 - RANK 1 - ROW_NUMBER 1 - MSG point-03 - ID 1 - PERSON 1 - DAT 2010-01-03 - VAL 2.30 - DENSE_RANK 2 - RANK 2 - ROW_NUMBER 2 - MSG point-03 - ID 2 - PERSON 2 - DAT 2010-01-04 - VAL 4.30 - DENSE_RANK 1 - RANK 1 - ROW_NUMBER 1 - MSG point-03 - ID 3 - PERSON 3 - DAT 2010-01-05 - VAL 6.30 - DENSE_RANK 1 - RANK 1 - ROW_NUMBER 1 - MSG point-03 - ID 4 - PERSON 4 - DAT 2010-01-06 - VAL 8.30 - DENSE_RANK 1 - RANK 1 - ROW_NUMBER 1 - MSG point-03 - ID 5 - PERSON 5 - DAT 2010-01-07 - VAL 10.30 - DENSE_RANK 1 - RANK 1 - ROW_NUMBER 1 - MSG point-03 - ID 6 - PERSON 1 - DAT 2010-02-02 - VAL 3.40 - DENSE_RANK 3 - RANK 3 - ROW_NUMBER 3 - MSG point-03 - ID 7 - PERSON 2 - DAT 2010-02-03 - VAL 6.40 - DENSE_RANK 2 - RANK 2 - ROW_NUMBER 2 - MSG point-03 - ID 8 - PERSON 3 - DAT 2010-02-04 - VAL 9.40 - DENSE_RANK 2 - RANK 2 - ROW_NUMBER 2 - MSG point-03 - ID 9 - PERSON 4 - DAT 2010-02-05 - VAL 12.40 - DENSE_RANK 2 - RANK 2 - ROW_NUMBER 2 - MSG point-03 - ID 10 - PERSON 5 - DAT 2010-02-06 - VAL 15.40 - DENSE_RANK 2 - RANK 2 - ROW_NUMBER 2 - MSG point-03 - ID 11 - PERSON 1 - DAT 2010-03-02 - VAL 3.40 - DENSE_RANK 4 - RANK 4 - ROW_NUMBER 4 - MSG point-03 - ID 12 - PERSON 2 - DAT 2010-03-03 - VAL 6.40 - DENSE_RANK 3 - RANK 3 - ROW_NUMBER 3 - MSG point-03 - ID 13 - PERSON 3 - DAT 2010-03-04 - VAL 9.40 - DENSE_RANK 3 - RANK 3 - ROW_NUMBER 3 - MSG point-03 - ID 14 - PERSON 4 - DAT 2010-03-05 - VAL 12.40 - DENSE_RANK 3 - RANK 3 - ROW_NUMBER 3 - MSG point-03 - ID 15 - PERSON 5 - DAT 2010-03-06 - VAL 15.40 - DENSE_RANK 3 - RANK 3 - ROW_NUMBER 3 - MSG point-03 - ID 16 - PERSON 1 - DAT - VAL - DENSE_RANK 1 - RANK 1 - ROW_NUMBER 1 - MSG point-04 - PERSON 1 - SUM 4 - SUM 4 - SUM 10 - MSG point-04 - PERSON 2 - SUM 3 - SUM 3 - SUM 6 - MSG point-04 - PERSON 3 - SUM 3 - SUM 3 - SUM 6 - MSG point-04 - PERSON 4 - SUM 3 - SUM 3 - SUM 6 - MSG point-04 - PERSON 5 - SUM 3 - SUM 3 - SUM 6 - MSG point-05 - SUM 16 - SUM 16 - SUM 34 - MSG point-06 - ID 1 - PERSON 1 - DAT 2010-01-03 - VAL 2.30 - SRN 1 - ROW_NUMBER 1 - MSG point-06 - ID 2 - PERSON 2 - DAT 2010-01-04 - VAL 4.30 - SRN 1 - ROW_NUMBER 2 - MSG point-06 - ID 3 - PERSON 3 - DAT 2010-01-05 - VAL 6.30 - SRN 1 - ROW_NUMBER 3 - MSG point-06 - ID 4 - PERSON 4 - DAT 2010-01-06 - VAL 8.30 - SRN 1 - ROW_NUMBER 4 - MSG point-06 - ID 5 - PERSON 5 - DAT 2010-01-07 - VAL 10.30 - SRN 1 - ROW_NUMBER 5 - MSG point-06 - ID 6 - PERSON 1 - DAT 2010-02-02 - VAL 3.40 - SRN 2 - ROW_NUMBER 6 - MSG point-06 - ID 7 - PERSON 2 - DAT 2010-02-03 - VAL 6.40 - SRN 2 - ROW_NUMBER 7 - MSG point-06 - ID 8 - PERSON 3 - DAT 2010-02-04 - VAL 9.40 - SRN 2 - ROW_NUMBER 8 - MSG point-06 - ID 9 - PERSON 4 - DAT 2010-02-05 - VAL 12.40 - SRN 2 - ROW_NUMBER 9 - MSG point-06 - ID 10 - PERSON 5 - DAT 2010-02-06 - VAL 15.40 - SRN 2 - ROW_NUMBER 10 - MSG point-06 - ID 11 - PERSON 1 - DAT 2010-03-02 - VAL 3.40 - SRN 3 - ROW_NUMBER 11 - MSG point-06 - ID 12 - PERSON 2 - DAT 2010-03-03 - VAL 6.40 - SRN 3 - ROW_NUMBER 12 - MSG point-06 - ID 13 - PERSON 3 - DAT 2010-03-04 - VAL 9.40 - SRN 3 - ROW_NUMBER 13 - MSG point-06 - ID 14 - PERSON 4 - DAT 2010-03-05 - VAL 12.40 - SRN 3 - ROW_NUMBER 14 - MSG point-06 - ID 15 - PERSON 5 - DAT 2010-03-06 - VAL 15.40 - SRN 3 - ROW_NUMBER 15 - MSG point-06 - ID 16 - PERSON 1 - DAT - VAL - SRN 4 - ROW_NUMBER 16 -""" - -@pytest.mark.version('>=3.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/gtcs/test_window_func_04.py b/tests/functional/gtcs/test_window_func_04.py index 04c4b3b6..b2629fb5 100644 --- a/tests/functional/gtcs/test_window_func_04.py +++ b/tests/functional/gtcs/test_window_func_04.py @@ -1,51 +1,592 @@ #coding:utf-8 -# -# id: functional.gtcs.window_func_04 -# title: GTCS/tests/FB_SQL_WINDOW_FUNC_04 - set of miscelaneous tests for verification of windowed functions. -# decription: -# Statements from this test are added to initial SQL which is stored in: ... -# bt-repo -# iles\\gtcs-window-func.sql -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/FB_SQL_WINDOW_FUNC_04.script -# -# Checked on 4.0.0.1854; 3.0.6.33277 -# -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: gtcs.window-func-04 +TITLE: Set of miscelaneous tests for verification of windowed functions +DESCRIPTION: + Statements from this test are added to initial SQL which is stored in: /files/gtcs-window-func.sql + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/FB_SQL_WINDOW_FUNC_04.script +FBTEST: functional.gtcs.window_func_04 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] +act = python_act('db', substitutions=[('[ \t]+', ' ')]) -init_script_1 = """""" +expected_stdout = """ + MSG point-1 + ID 1 + PERSON 1 + DAT 2010-01-03 + VAL 2.30 + FIRST_VALUE + FIRST_VALUE 2.30 + FIRST_VALUE 2.30 + NTH_VALUE + NTH_VALUE 2.30 + NTH_VALUE 2.30 + NTH_VALUE 2.30 + NTH_VALUE + NTH_VALUE + LAST_VALUE 2.30 + LAST_VALUE 2.30 + LAST_VALUE 2.30 + MSG point-1 + ID 2 + PERSON 2 + DAT 2010-01-04 + VAL 4.30 + FIRST_VALUE + FIRST_VALUE 2.30 + FIRST_VALUE 4.30 + NTH_VALUE + NTH_VALUE 2.30 + NTH_VALUE 4.30 + NTH_VALUE 2.30 + NTH_VALUE 3.40 + NTH_VALUE + LAST_VALUE 4.30 + LAST_VALUE 4.30 + LAST_VALUE 4.30 + MSG point-1 + ID 3 + PERSON 3 + DAT 2010-01-05 + VAL 6.30 + FIRST_VALUE + FIRST_VALUE 2.30 + FIRST_VALUE 6.30 + NTH_VALUE + NTH_VALUE 2.30 + NTH_VALUE 6.30 + NTH_VALUE 2.30 + NTH_VALUE 3.40 + NTH_VALUE + LAST_VALUE 6.30 + LAST_VALUE 6.30 + LAST_VALUE 6.30 + MSG point-1 + ID 4 + PERSON 4 + DAT 2010-01-06 + VAL 8.30 + FIRST_VALUE + FIRST_VALUE 2.30 + FIRST_VALUE 8.30 + NTH_VALUE + NTH_VALUE 2.30 + NTH_VALUE 8.30 + NTH_VALUE 2.30 + NTH_VALUE 3.40 + NTH_VALUE + LAST_VALUE 8.30 + LAST_VALUE 8.30 + LAST_VALUE 8.30 + MSG point-1 + ID 5 + PERSON 5 + DAT 2010-01-07 + VAL 10.30 + FIRST_VALUE + FIRST_VALUE 2.30 + FIRST_VALUE 10.30 + NTH_VALUE + NTH_VALUE 2.30 + NTH_VALUE 10.30 + NTH_VALUE 2.30 + NTH_VALUE 3.40 + NTH_VALUE + LAST_VALUE 10.30 + LAST_VALUE 10.30 + LAST_VALUE 10.30 + MSG point-1 + ID 6 + PERSON 1 + DAT 2010-02-02 + VAL 3.40 + FIRST_VALUE + FIRST_VALUE 2.30 + FIRST_VALUE 2.30 + NTH_VALUE + NTH_VALUE 2.30 + NTH_VALUE 2.30 + NTH_VALUE 2.30 + NTH_VALUE 3.40 + NTH_VALUE 3.40 + LAST_VALUE 3.40 + LAST_VALUE 3.40 + LAST_VALUE 3.40 + MSG point-1 + ID 7 + PERSON 2 + DAT 2010-02-03 + VAL 6.40 + FIRST_VALUE + FIRST_VALUE 2.30 + FIRST_VALUE 4.30 + NTH_VALUE + NTH_VALUE 2.30 + NTH_VALUE 4.30 + NTH_VALUE 2.30 + NTH_VALUE 3.40 + NTH_VALUE 6.40 + LAST_VALUE 6.40 + LAST_VALUE 6.40 + LAST_VALUE 6.40 + MSG point-1 + ID 8 + PERSON 3 + DAT 2010-02-04 + VAL 9.40 + FIRST_VALUE + FIRST_VALUE 2.30 + FIRST_VALUE 6.30 + NTH_VALUE + NTH_VALUE 2.30 + NTH_VALUE 6.30 + NTH_VALUE 2.30 + NTH_VALUE 3.40 + NTH_VALUE 9.40 + LAST_VALUE 9.40 + LAST_VALUE 9.40 + LAST_VALUE 9.40 + MSG point-1 + ID 9 + PERSON 4 + DAT 2010-02-05 + VAL 12.40 + FIRST_VALUE + FIRST_VALUE 2.30 + FIRST_VALUE 8.30 + NTH_VALUE + NTH_VALUE 2.30 + NTH_VALUE 8.30 + NTH_VALUE 2.30 + NTH_VALUE 3.40 + NTH_VALUE 12.40 + LAST_VALUE 12.40 + LAST_VALUE 12.40 + LAST_VALUE 12.40 + MSG point-1 + ID 10 + PERSON 5 + DAT 2010-02-06 + VAL 15.40 + FIRST_VALUE + FIRST_VALUE 2.30 + FIRST_VALUE 10.30 + NTH_VALUE + NTH_VALUE 2.30 + NTH_VALUE 10.30 + NTH_VALUE 2.30 + NTH_VALUE 3.40 + NTH_VALUE 15.40 + LAST_VALUE 15.40 + LAST_VALUE 15.40 + LAST_VALUE 15.40 + MSG point-1 + ID 11 + PERSON 1 + DAT 2010-03-02 + VAL 3.40 + FIRST_VALUE + FIRST_VALUE 2.30 + FIRST_VALUE 2.30 + NTH_VALUE + NTH_VALUE 2.30 + NTH_VALUE 2.30 + NTH_VALUE 2.30 + NTH_VALUE 3.40 + NTH_VALUE 3.40 + LAST_VALUE 3.40 + LAST_VALUE 3.40 + LAST_VALUE 3.40 + MSG point-1 + ID 12 + PERSON 2 + DAT 2010-03-03 + VAL 6.40 + FIRST_VALUE + FIRST_VALUE 2.30 + FIRST_VALUE 4.30 + NTH_VALUE + NTH_VALUE 2.30 + NTH_VALUE 4.30 + NTH_VALUE 2.30 + NTH_VALUE 3.40 + NTH_VALUE 6.40 + LAST_VALUE 6.40 + LAST_VALUE 6.40 + LAST_VALUE 6.40 + MSG point-1 + ID 13 + PERSON 3 + DAT 2010-03-04 + VAL 9.40 + FIRST_VALUE + FIRST_VALUE 2.30 + FIRST_VALUE 6.30 + NTH_VALUE + NTH_VALUE 2.30 + NTH_VALUE 6.30 + NTH_VALUE 2.30 + NTH_VALUE 3.40 + NTH_VALUE 9.40 + LAST_VALUE 9.40 + LAST_VALUE 9.40 + LAST_VALUE 9.40 + MSG point-1 + ID 14 + PERSON 4 + DAT 2010-03-05 + VAL 12.40 + FIRST_VALUE + FIRST_VALUE 2.30 + FIRST_VALUE 8.30 + NTH_VALUE + NTH_VALUE 2.30 + NTH_VALUE 8.30 + NTH_VALUE 2.30 + NTH_VALUE 3.40 + NTH_VALUE 12.40 + LAST_VALUE 12.40 + LAST_VALUE 12.40 + LAST_VALUE 12.40 + MSG point-1 + ID 15 + PERSON 5 + DAT 2010-03-06 + VAL 15.40 + FIRST_VALUE + FIRST_VALUE 2.30 + FIRST_VALUE 10.30 + NTH_VALUE + NTH_VALUE 2.30 + NTH_VALUE 10.30 + NTH_VALUE 2.30 + NTH_VALUE 3.40 + NTH_VALUE 15.40 + LAST_VALUE 15.40 + LAST_VALUE 15.40 + LAST_VALUE 15.40 + MSG point-1 + ID 16 + PERSON 1 + DAT + VAL + FIRST_VALUE + FIRST_VALUE 2.30 + FIRST_VALUE 2.30 + NTH_VALUE + NTH_VALUE 2.30 + NTH_VALUE 2.30 + NTH_VALUE + NTH_VALUE 3.40 + NTH_VALUE 3.40 + LAST_VALUE + LAST_VALUE + LAST_VALUE + MSG point-2 + PERSON 1 + SUM 9.10 + FIRST_VALUE 41.10 + NTH_VALUE 41.10 + NTH_VALUE 33.10 + LAST_VALUE 9.10 + MSG point-2 + PERSON 2 + SUM 17.10 + FIRST_VALUE 41.10 + NTH_VALUE 41.10 + NTH_VALUE 33.10 + LAST_VALUE 17.10 + MSG point-2 + PERSON 3 + SUM 25.10 + FIRST_VALUE 41.10 + NTH_VALUE 41.10 + NTH_VALUE 33.10 + LAST_VALUE 25.10 + MSG point-2 + PERSON 4 + SUM 33.10 + FIRST_VALUE 41.10 + NTH_VALUE 41.10 + NTH_VALUE 33.10 + LAST_VALUE 33.10 + MSG point-2 + PERSON 5 + SUM 41.10 + FIRST_VALUE 41.10 + NTH_VALUE 41.10 + NTH_VALUE + LAST_VALUE 41.10 + MSG point-3 + ID 1 + PERSON 1 + DAT 2010-01-03 + VAL 2.30 + LAG + LEAD 3.40 + LAG + LEAD 4.30 + LAG + LAG -2.30 + LEAD 4.30 + MSG point-3 + ID 2 + PERSON 2 + DAT 2010-01-04 + VAL 4.30 + LAG 3.40 + LEAD 6.30 + LAG 3.40 + LEAD 6.40 + LAG 3.40 + LAG 3.40 + LEAD 6.40 + MSG point-3 + ID 3 + PERSON 3 + DAT 2010-01-05 + VAL 6.30 + LAG 4.30 + LEAD 6.40 + LAG 3.40 + LEAD 8.30 + LAG 4.30 + LAG 3.40 + LEAD 8.30 + MSG point-3 + ID 4 + PERSON 4 + DAT 2010-01-06 + VAL 8.30 + LAG 6.40 + LEAD 9.40 + LAG 6.40 + LEAD 10.30 + LAG 6.40 + LAG 6.40 + LEAD 10.30 + MSG point-3 + ID 5 + PERSON 5 + DAT 2010-01-07 + VAL 10.30 + LAG 9.40 + LEAD 12.40 + LAG 9.40 + LEAD 15.40 + LAG 9.40 + LAG 9.40 + LEAD 15.40 + MSG point-3 + ID 6 + PERSON 1 + DAT 2010-02-02 + VAL 3.40 + LAG 2.30 + LEAD 3.40 + LAG + LEAD 6.30 + LAG 2.30 + LAG + LEAD 6.30 + MSG point-3 + ID 7 + PERSON 2 + DAT 2010-02-03 + VAL 6.40 + LAG 6.30 + LEAD 6.40 + LAG 4.30 + LEAD 9.40 + LAG 6.30 + LAG 4.30 + LEAD 9.40 + MSG point-3 + ID 8 + PERSON 3 + DAT 2010-02-04 + VAL 9.40 + LAG 8.30 + LEAD 9.40 + LAG 6.40 + LEAD 12.40 + LAG 8.30 + LAG 6.40 + LEAD 12.40 + MSG point-3 + ID 9 + PERSON 4 + DAT 2010-02-05 + VAL 12.40 + LAG 10.30 + LEAD 12.40 + LAG 9.40 + LEAD 15.40 + LAG 10.30 + LAG 9.40 + LEAD 15.40 + MSG point-3 + ID 10 + PERSON 5 + DAT 2010-02-06 + VAL 15.40 + LAG 12.40 + LEAD 15.40 + LAG 12.40 + LEAD + LAG 12.40 + LAG 12.40 + LEAD -1.00 + MSG point-3 + ID 11 + PERSON 1 + DAT 2010-03-02 + VAL 3.40 + LAG 3.40 + LEAD 4.30 + LAG 2.30 + LEAD 6.40 + LAG 3.40 + LAG 2.30 + LEAD 6.40 + MSG point-3 + ID 12 + PERSON 2 + DAT 2010-03-03 + VAL 6.40 + LAG 6.40 + LEAD 8.30 + LAG 6.30 + LEAD 9.40 + LAG 6.40 + LAG 6.30 + LEAD 9.40 + MSG point-3 + ID 13 + PERSON 3 + DAT 2010-03-04 + VAL 9.40 + LAG 9.40 + LEAD 10.30 + LAG 8.30 + LEAD 12.40 + LAG 9.40 + LAG 8.30 + LEAD 12.40 + MSG point-3 + ID 14 + PERSON 4 + DAT 2010-03-05 + VAL 12.40 + LAG 12.40 + LEAD 15.40 + LAG 10.30 + LEAD + LAG 12.40 + LAG 10.30 + LEAD -1.00 + MSG point-3 + ID 15 + PERSON 5 + DAT 2010-03-06 + VAL 15.40 + LAG 15.40 + LEAD + LAG 12.40 + LEAD + LAG 15.40 + LAG 12.40 + LEAD -1.00 + MSG point-3 + ID 16 + PERSON 1 + DAT + VAL + LAG + LEAD 2.30 + LAG + LEAD 3.40 + LAG + LAG + LEAD 3.40 + MSG point-4 + PERSON 1 + SUM 9.10 + LAG + LEAD 17.10 + LAG + LEAD 33.10 + LAG -9.10 + LEAD 33.10 + MSG point-4 + PERSON 2 + SUM 17.10 + LAG 9.10 + LEAD 25.10 + LAG + LEAD 41.10 + LAG -17.10 + LEAD 41.10 + MSG point-4 + PERSON 3 + SUM 25.10 + LAG 17.10 + LEAD 33.10 + LAG 9.10 + LEAD + LAG 9.10 + LEAD -1.00 + MSG point-4 + PERSON 4 + SUM 33.10 + LAG 25.10 + LEAD 41.10 + LAG 17.10 + LEAD + LAG 17.10 + LEAD -1.00 + MSG point-4 + PERSON 5 + SUM 41.10 + LAG 33.10 + LEAD + LAG 25.10 + LEAD + LAG 25.10 + LEAD -1.00 +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=3.0') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import sys # import subprocess -# +# # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password -# +# # db_conn.close() -# +# # with open( os.path.join(context['files_location'],'gtcs-window-func.sql'), 'r') as f: # sql_init = f.read() -# +# # sql_addi=''' # set list on; -# +# # select # 'point-1' as msg, # e.*, @@ -63,9 +604,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # last_value(val) over (partition by e.person order by val nulls last, id) # from entries e # order by id; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# +# # select # 'point-2' as msg, # person, @@ -77,9 +618,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # from entries # group by person # order by person; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# +# # select # 'point-3' as msg, # e.*, @@ -92,9 +633,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # lead(val, 3, -1.00) over (order by val nulls first, id) # from entries e # order by id; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# +# # select # 'point-4' as msg, # person, @@ -108,564 +649,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # from entries # group by person # order by person; -# +# # ''' -# +# # runProgram('isql', [ dsn], os.linesep.join( (sql_init, sql_addi) ) ) -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - MSG point-1 - ID 1 - PERSON 1 - DAT 2010-01-03 - VAL 2.30 - FIRST_VALUE - FIRST_VALUE 2.30 - FIRST_VALUE 2.30 - NTH_VALUE - NTH_VALUE 2.30 - NTH_VALUE 2.30 - NTH_VALUE 2.30 - NTH_VALUE - NTH_VALUE - LAST_VALUE 2.30 - LAST_VALUE 2.30 - LAST_VALUE 2.30 - MSG point-1 - ID 2 - PERSON 2 - DAT 2010-01-04 - VAL 4.30 - FIRST_VALUE - FIRST_VALUE 2.30 - FIRST_VALUE 4.30 - NTH_VALUE - NTH_VALUE 2.30 - NTH_VALUE 4.30 - NTH_VALUE 2.30 - NTH_VALUE 3.40 - NTH_VALUE - LAST_VALUE 4.30 - LAST_VALUE 4.30 - LAST_VALUE 4.30 - MSG point-1 - ID 3 - PERSON 3 - DAT 2010-01-05 - VAL 6.30 - FIRST_VALUE - FIRST_VALUE 2.30 - FIRST_VALUE 6.30 - NTH_VALUE - NTH_VALUE 2.30 - NTH_VALUE 6.30 - NTH_VALUE 2.30 - NTH_VALUE 3.40 - NTH_VALUE - LAST_VALUE 6.30 - LAST_VALUE 6.30 - LAST_VALUE 6.30 - MSG point-1 - ID 4 - PERSON 4 - DAT 2010-01-06 - VAL 8.30 - FIRST_VALUE - FIRST_VALUE 2.30 - FIRST_VALUE 8.30 - NTH_VALUE - NTH_VALUE 2.30 - NTH_VALUE 8.30 - NTH_VALUE 2.30 - NTH_VALUE 3.40 - NTH_VALUE - LAST_VALUE 8.30 - LAST_VALUE 8.30 - LAST_VALUE 8.30 - MSG point-1 - ID 5 - PERSON 5 - DAT 2010-01-07 - VAL 10.30 - FIRST_VALUE - FIRST_VALUE 2.30 - FIRST_VALUE 10.30 - NTH_VALUE - NTH_VALUE 2.30 - NTH_VALUE 10.30 - NTH_VALUE 2.30 - NTH_VALUE 3.40 - NTH_VALUE - LAST_VALUE 10.30 - LAST_VALUE 10.30 - LAST_VALUE 10.30 - MSG point-1 - ID 6 - PERSON 1 - DAT 2010-02-02 - VAL 3.40 - FIRST_VALUE - FIRST_VALUE 2.30 - FIRST_VALUE 2.30 - NTH_VALUE - NTH_VALUE 2.30 - NTH_VALUE 2.30 - NTH_VALUE 2.30 - NTH_VALUE 3.40 - NTH_VALUE 3.40 - LAST_VALUE 3.40 - LAST_VALUE 3.40 - LAST_VALUE 3.40 - MSG point-1 - ID 7 - PERSON 2 - DAT 2010-02-03 - VAL 6.40 - FIRST_VALUE - FIRST_VALUE 2.30 - FIRST_VALUE 4.30 - NTH_VALUE - NTH_VALUE 2.30 - NTH_VALUE 4.30 - NTH_VALUE 2.30 - NTH_VALUE 3.40 - NTH_VALUE 6.40 - LAST_VALUE 6.40 - LAST_VALUE 6.40 - LAST_VALUE 6.40 - MSG point-1 - ID 8 - PERSON 3 - DAT 2010-02-04 - VAL 9.40 - FIRST_VALUE - FIRST_VALUE 2.30 - FIRST_VALUE 6.30 - NTH_VALUE - NTH_VALUE 2.30 - NTH_VALUE 6.30 - NTH_VALUE 2.30 - NTH_VALUE 3.40 - NTH_VALUE 9.40 - LAST_VALUE 9.40 - LAST_VALUE 9.40 - LAST_VALUE 9.40 - MSG point-1 - ID 9 - PERSON 4 - DAT 2010-02-05 - VAL 12.40 - FIRST_VALUE - FIRST_VALUE 2.30 - FIRST_VALUE 8.30 - NTH_VALUE - NTH_VALUE 2.30 - NTH_VALUE 8.30 - NTH_VALUE 2.30 - NTH_VALUE 3.40 - NTH_VALUE 12.40 - LAST_VALUE 12.40 - LAST_VALUE 12.40 - LAST_VALUE 12.40 - MSG point-1 - ID 10 - PERSON 5 - DAT 2010-02-06 - VAL 15.40 - FIRST_VALUE - FIRST_VALUE 2.30 - FIRST_VALUE 10.30 - NTH_VALUE - NTH_VALUE 2.30 - NTH_VALUE 10.30 - NTH_VALUE 2.30 - NTH_VALUE 3.40 - NTH_VALUE 15.40 - LAST_VALUE 15.40 - LAST_VALUE 15.40 - LAST_VALUE 15.40 - MSG point-1 - ID 11 - PERSON 1 - DAT 2010-03-02 - VAL 3.40 - FIRST_VALUE - FIRST_VALUE 2.30 - FIRST_VALUE 2.30 - NTH_VALUE - NTH_VALUE 2.30 - NTH_VALUE 2.30 - NTH_VALUE 2.30 - NTH_VALUE 3.40 - NTH_VALUE 3.40 - LAST_VALUE 3.40 - LAST_VALUE 3.40 - LAST_VALUE 3.40 - MSG point-1 - ID 12 - PERSON 2 - DAT 2010-03-03 - VAL 6.40 - FIRST_VALUE - FIRST_VALUE 2.30 - FIRST_VALUE 4.30 - NTH_VALUE - NTH_VALUE 2.30 - NTH_VALUE 4.30 - NTH_VALUE 2.30 - NTH_VALUE 3.40 - NTH_VALUE 6.40 - LAST_VALUE 6.40 - LAST_VALUE 6.40 - LAST_VALUE 6.40 - MSG point-1 - ID 13 - PERSON 3 - DAT 2010-03-04 - VAL 9.40 - FIRST_VALUE - FIRST_VALUE 2.30 - FIRST_VALUE 6.30 - NTH_VALUE - NTH_VALUE 2.30 - NTH_VALUE 6.30 - NTH_VALUE 2.30 - NTH_VALUE 3.40 - NTH_VALUE 9.40 - LAST_VALUE 9.40 - LAST_VALUE 9.40 - LAST_VALUE 9.40 - MSG point-1 - ID 14 - PERSON 4 - DAT 2010-03-05 - VAL 12.40 - FIRST_VALUE - FIRST_VALUE 2.30 - FIRST_VALUE 8.30 - NTH_VALUE - NTH_VALUE 2.30 - NTH_VALUE 8.30 - NTH_VALUE 2.30 - NTH_VALUE 3.40 - NTH_VALUE 12.40 - LAST_VALUE 12.40 - LAST_VALUE 12.40 - LAST_VALUE 12.40 - MSG point-1 - ID 15 - PERSON 5 - DAT 2010-03-06 - VAL 15.40 - FIRST_VALUE - FIRST_VALUE 2.30 - FIRST_VALUE 10.30 - NTH_VALUE - NTH_VALUE 2.30 - NTH_VALUE 10.30 - NTH_VALUE 2.30 - NTH_VALUE 3.40 - NTH_VALUE 15.40 - LAST_VALUE 15.40 - LAST_VALUE 15.40 - LAST_VALUE 15.40 - MSG point-1 - ID 16 - PERSON 1 - DAT - VAL - FIRST_VALUE - FIRST_VALUE 2.30 - FIRST_VALUE 2.30 - NTH_VALUE - NTH_VALUE 2.30 - NTH_VALUE 2.30 - NTH_VALUE - NTH_VALUE 3.40 - NTH_VALUE 3.40 - LAST_VALUE - LAST_VALUE - LAST_VALUE - MSG point-2 - PERSON 1 - SUM 9.10 - FIRST_VALUE 41.10 - NTH_VALUE 41.10 - NTH_VALUE 33.10 - LAST_VALUE 9.10 - MSG point-2 - PERSON 2 - SUM 17.10 - FIRST_VALUE 41.10 - NTH_VALUE 41.10 - NTH_VALUE 33.10 - LAST_VALUE 17.10 - MSG point-2 - PERSON 3 - SUM 25.10 - FIRST_VALUE 41.10 - NTH_VALUE 41.10 - NTH_VALUE 33.10 - LAST_VALUE 25.10 - MSG point-2 - PERSON 4 - SUM 33.10 - FIRST_VALUE 41.10 - NTH_VALUE 41.10 - NTH_VALUE 33.10 - LAST_VALUE 33.10 - MSG point-2 - PERSON 5 - SUM 41.10 - FIRST_VALUE 41.10 - NTH_VALUE 41.10 - NTH_VALUE - LAST_VALUE 41.10 - MSG point-3 - ID 1 - PERSON 1 - DAT 2010-01-03 - VAL 2.30 - LAG - LEAD 3.40 - LAG - LEAD 4.30 - LAG - LAG -2.30 - LEAD 4.30 - MSG point-3 - ID 2 - PERSON 2 - DAT 2010-01-04 - VAL 4.30 - LAG 3.40 - LEAD 6.30 - LAG 3.40 - LEAD 6.40 - LAG 3.40 - LAG 3.40 - LEAD 6.40 - MSG point-3 - ID 3 - PERSON 3 - DAT 2010-01-05 - VAL 6.30 - LAG 4.30 - LEAD 6.40 - LAG 3.40 - LEAD 8.30 - LAG 4.30 - LAG 3.40 - LEAD 8.30 - MSG point-3 - ID 4 - PERSON 4 - DAT 2010-01-06 - VAL 8.30 - LAG 6.40 - LEAD 9.40 - LAG 6.40 - LEAD 10.30 - LAG 6.40 - LAG 6.40 - LEAD 10.30 - MSG point-3 - ID 5 - PERSON 5 - DAT 2010-01-07 - VAL 10.30 - LAG 9.40 - LEAD 12.40 - LAG 9.40 - LEAD 15.40 - LAG 9.40 - LAG 9.40 - LEAD 15.40 - MSG point-3 - ID 6 - PERSON 1 - DAT 2010-02-02 - VAL 3.40 - LAG 2.30 - LEAD 3.40 - LAG - LEAD 6.30 - LAG 2.30 - LAG - LEAD 6.30 - MSG point-3 - ID 7 - PERSON 2 - DAT 2010-02-03 - VAL 6.40 - LAG 6.30 - LEAD 6.40 - LAG 4.30 - LEAD 9.40 - LAG 6.30 - LAG 4.30 - LEAD 9.40 - MSG point-3 - ID 8 - PERSON 3 - DAT 2010-02-04 - VAL 9.40 - LAG 8.30 - LEAD 9.40 - LAG 6.40 - LEAD 12.40 - LAG 8.30 - LAG 6.40 - LEAD 12.40 - MSG point-3 - ID 9 - PERSON 4 - DAT 2010-02-05 - VAL 12.40 - LAG 10.30 - LEAD 12.40 - LAG 9.40 - LEAD 15.40 - LAG 10.30 - LAG 9.40 - LEAD 15.40 - MSG point-3 - ID 10 - PERSON 5 - DAT 2010-02-06 - VAL 15.40 - LAG 12.40 - LEAD 15.40 - LAG 12.40 - LEAD - LAG 12.40 - LAG 12.40 - LEAD -1.00 - MSG point-3 - ID 11 - PERSON 1 - DAT 2010-03-02 - VAL 3.40 - LAG 3.40 - LEAD 4.30 - LAG 2.30 - LEAD 6.40 - LAG 3.40 - LAG 2.30 - LEAD 6.40 - MSG point-3 - ID 12 - PERSON 2 - DAT 2010-03-03 - VAL 6.40 - LAG 6.40 - LEAD 8.30 - LAG 6.30 - LEAD 9.40 - LAG 6.40 - LAG 6.30 - LEAD 9.40 - MSG point-3 - ID 13 - PERSON 3 - DAT 2010-03-04 - VAL 9.40 - LAG 9.40 - LEAD 10.30 - LAG 8.30 - LEAD 12.40 - LAG 9.40 - LAG 8.30 - LEAD 12.40 - MSG point-3 - ID 14 - PERSON 4 - DAT 2010-03-05 - VAL 12.40 - LAG 12.40 - LEAD 15.40 - LAG 10.30 - LEAD - LAG 12.40 - LAG 10.30 - LEAD -1.00 - MSG point-3 - ID 15 - PERSON 5 - DAT 2010-03-06 - VAL 15.40 - LAG 15.40 - LEAD - LAG 12.40 - LEAD - LAG 15.40 - LAG 12.40 - LEAD -1.00 - MSG point-3 - ID 16 - PERSON 1 - DAT - VAL - LAG - LEAD 2.30 - LAG - LEAD 3.40 - LAG - LAG - LEAD 3.40 - MSG point-4 - PERSON 1 - SUM 9.10 - LAG - LEAD 17.10 - LAG - LEAD 33.10 - LAG -9.10 - LEAD 33.10 - MSG point-4 - PERSON 2 - SUM 17.10 - LAG 9.10 - LEAD 25.10 - LAG - LEAD 41.10 - LAG -17.10 - LEAD 41.10 - MSG point-4 - PERSON 3 - SUM 25.10 - LAG 17.10 - LEAD 33.10 - LAG 9.10 - LEAD - LAG 9.10 - LEAD -1.00 - MSG point-4 - PERSON 4 - SUM 33.10 - LAG 25.10 - LEAD 41.10 - LAG 17.10 - LEAD - LAG 17.10 - LEAD -1.00 - MSG point-4 - PERSON 5 - SUM 41.10 - LAG 33.10 - LEAD - LAG 25.10 - LEAD - LAG 25.10 - LEAD -1.00 -""" - -@pytest.mark.version('>=3.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/gtcs/test_window_func_05.py b/tests/functional/gtcs/test_window_func_05.py index f8107f24..01753792 100644 --- a/tests/functional/gtcs/test_window_func_05.py +++ b/tests/functional/gtcs/test_window_func_05.py @@ -1,62 +1,1687 @@ #coding:utf-8 -# -# id: functional.gtcs.window_func_05 -# title: GTCS/tests/FB_SQL_WINDOW_FUNC_05 - set of miscelaneous tests for verification of windowed functions. -# decription: -# Statements from this test are added to initial SQL which is stored in: ... -# bt-repo -# iles\\gtcs-window-func.sql -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/FB_SQL_WINDOW_FUNC_05.script -# -# ::: NB ::: This test used functionality that exists in FB 4.0+. -# -# Checked on 4.0.0.1854. -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: None + +""" +ID: gtcs.window-func-05 +TITLE: Set of miscelaneous tests for verification of windowed functions +DESCRIPTION: + Statements from this test are added to initial SQL which is stored in: /files/gtcs-window-func.sql + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/FB_SQL_WINDOW_FUNC_05.script + + This test used functionality that exists in FB 4.0+. +FBTEST: functional.gtcs.window_func_05 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] +act = python_act('db', substitutions=[('[ \t]+', ' ')]) -init_script_1 = """""" +expected_stdout = """ + MSG point-01 + N1 1 + N2 1 + X1 1 + X2 10111 + X3 1 + X4 + X5 + X6 10111 + MSG point-01 + N1 1 + N2 10 + X1 11 + X2 10110 + X3 10 + X4 + X5 + X6 10110 + MSG point-01 + N1 2 + N2 20 + X1 20 + X2 20 + X3 20 + X4 + X5 + X6 20 + MSG point-01 + N1 + N2 50 + X1 50 + X2 101110 + X3 50 + X4 + X5 + X6 101110 + MSG point-01 + N1 + N2 60 + X1 110 + X2 101060 + X3 60 + X4 + X5 + X6 101060 + MSG point-01 + N1 1 + N2 100 + X1 111 + X2 10100 + X3 100 + X4 + X5 + X6 10100 + MSG point-01 + N1 3 + N2 300 + X1 300 + X2 300 + X3 300 + X4 + X5 + X6 300 + MSG point-01 + N1 5 + N2 500 + X1 500 + X2 500 + X3 500 + X4 + X5 + X6 500 + MSG point-01 + N1 + N2 1000 + X1 1110 + X2 101000 + X3 1000 + X4 + X5 + X6 101000 + MSG point-01 + N1 1 + N2 10000 + X1 10111 + X2 10000 + X3 10000 + X4 + X5 + X6 10000 + MSG point-01 + N1 + N2 100000 + X1 101110 + X2 100000 + X3 100000 + X4 + X5 + X6 100000 + MSG point-02 + N1 1 + N2 1 + X1 1 + X2 10111 + X3 1 + X4 10100 + X5 + X6 10111 + MSG point-02 + N1 1 + N2 10 + X1 11 + X2 10110 + X3 10 + X4 10000 + X5 + X6 10111 + MSG point-02 + N1 2 + N2 20 + X1 20 + X2 20 + X3 20 + X4 + X5 + X6 20 + MSG point-02 + N1 + N2 50 + X1 50 + X2 101110 + X3 50 + X4 101000 + X5 + X6 101110 + MSG point-02 + N1 + N2 60 + X1 110 + X2 101060 + X3 60 + X4 100000 + X5 + X6 101110 + MSG point-02 + N1 1 + N2 100 + X1 111 + X2 10100 + X3 100 + X4 + X5 1 + X6 10111 + MSG point-02 + N1 3 + N2 300 + X1 300 + X2 300 + X3 300 + X4 + X5 + X6 300 + MSG point-02 + N1 5 + N2 500 + X1 500 + X2 500 + X3 500 + X4 + X5 + X6 500 + MSG point-02 + N1 + N2 1000 + X1 1110 + X2 101000 + X3 1000 + X4 + X5 50 + X6 101110 + MSG point-02 + N1 1 + N2 10000 + X1 10111 + X2 10000 + X3 10000 + X4 + X5 11 + X6 10111 + MSG point-02 + N1 + N2 100000 + X1 101110 + X2 100000 + X3 100000 + X4 + X5 110 + X6 101110 + MSG point-03 + N1 + N2 50 + X1 101110 + X2 112041 + X3 101110 + X4 101110 + X5 101110 + X6 112041 + MSG point-03 + N1 + N2 60 + X1 101110 + X2 112041 + X3 101110 + X4 101110 + X5 101110 + X6 112041 + MSG point-03 + N1 + N2 1000 + X1 101110 + X2 112041 + X3 101110 + X4 101110 + X5 101110 + X6 112041 + MSG point-03 + N1 + N2 100000 + X1 101110 + X2 112041 + X3 101110 + X4 101110 + X5 101110 + X6 112041 + MSG point-03 + N1 1 + N2 1 + X1 111221 + X2 10931 + X3 10111 + X4 300 + X5 + X6 10931 + MSG point-03 + N1 1 + N2 10 + X1 111221 + X2 10931 + X3 10111 + X4 300 + X5 + X6 10931 + MSG point-03 + N1 1 + N2 100 + X1 111221 + X2 10931 + X3 10111 + X4 300 + X5 + X6 10931 + MSG point-03 + N1 1 + N2 10000 + X1 111221 + X2 10931 + X3 10111 + X4 300 + X5 + X6 10931 + MSG point-03 + N1 2 + N2 20 + X1 111241 + X2 820 + X3 20 + X4 500 + X5 + X6 10931 + MSG point-03 + N1 3 + N2 300 + X1 111541 + X2 800 + X3 300 + X4 500 + X5 10111 + X6 10931 + MSG point-03 + N1 5 + N2 500 + X1 112041 + X2 500 + X3 500 + X4 + X5 320 + X6 820 + MSG point-04 + N1 + N2 50 + X1 50 + X2 112041 + X3 50 + X4 101000 + X5 + X6 112041 + MSG point-04 + N1 + N2 60 + X1 110 + X2 111991 + X3 60 + X4 100001 + X5 + X6 112041 + MSG point-04 + N1 + N2 1000 + X1 1110 + X2 111931 + X3 1000 + X4 11 + X5 50 + X6 112041 + MSG point-04 + N1 + N2 100000 + X1 101110 + X2 110931 + X3 100000 + X4 110 + X5 110 + X6 112041 + MSG point-04 + N1 1 + N2 1 + X1 101111 + X2 10931 + X3 1 + X4 10100 + X5 1060 + X6 111991 + MSG point-04 + N1 1 + N2 10 + X1 101121 + X2 10930 + X3 10 + X4 10020 + X5 101000 + X6 111931 + MSG point-04 + N1 1 + N2 100 + X1 101221 + X2 10920 + X3 100 + X4 320 + X5 100001 + X6 110931 + MSG point-04 + N1 1 + N2 10000 + X1 111221 + X2 10820 + X3 10000 + X4 800 + X5 11 + X6 10931 + MSG point-04 + N1 2 + N2 20 + X1 111241 + X2 820 + X3 20 + X4 500 + X5 110 + X6 10930 + MSG point-04 + N1 3 + N2 300 + X1 111541 + X2 800 + X3 300 + X4 + X5 10100 + X6 10920 + MSG point-04 + N1 5 + N2 500 + X1 112041 + X2 500 + X3 500 + X4 + X5 10020 + X6 10820 + MSG point-05 + N1 + N2 50 + X1 101110 + X2 112041 + X3 101110 + X4 101110 + X5 101110 + X6 112041 + MSG point-05 + N1 + N2 60 + X1 101110 + X2 112041 + X3 101110 + X4 101110 + X5 101110 + X6 112041 + MSG point-05 + N1 + N2 1000 + X1 101110 + X2 112041 + X3 101110 + X4 101110 + X5 101110 + X6 112041 + MSG point-05 + N1 + N2 100000 + X1 101110 + X2 112041 + X3 101110 + X4 101110 + X5 101110 + X6 112041 + MSG point-05 + N1 1 + N2 1 + X1 111221 + X2 10931 + X3 10111 + X4 300 + X5 + X6 10931 + MSG point-05 + N1 1 + N2 10 + X1 111221 + X2 10931 + X3 10111 + X4 300 + X5 + X6 10931 + MSG point-05 + N1 1 + N2 100 + X1 111221 + X2 10931 + X3 10111 + X4 300 + X5 + X6 10931 + MSG point-05 + N1 1 + N2 10000 + X1 111221 + X2 10931 + X3 10111 + X4 300 + X5 + X6 10931 + MSG point-05 + N1 2 + N2 20 + X1 111241 + X2 820 + X3 20 + X4 500 + X5 + X6 10931 + MSG point-05 + N1 3 + N2 300 + X1 111541 + X2 800 + X3 300 + X4 500 + X5 10111 + X6 10931 + MSG point-05 + N1 5 + N2 500 + X1 112041 + X2 500 + X3 500 + X4 + X5 320 + X6 820 + MSG point-06 + N1 + N2 50 + X1 50 + X2 112041 + X3 50 + X4 101000 + X5 + X6 112041 + MSG point-06 + N1 + N2 60 + X1 110 + X2 111991 + X3 60 + X4 100001 + X5 + X6 112041 + MSG point-06 + N1 + N2 1000 + X1 1110 + X2 111931 + X3 1000 + X4 11 + X5 50 + X6 112041 + MSG point-06 + N1 + N2 100000 + X1 101110 + X2 110931 + X3 100000 + X4 110 + X5 110 + X6 112041 + MSG point-06 + N1 1 + N2 1 + X1 101111 + X2 10931 + X3 1 + X4 10100 + X5 1060 + X6 111991 + MSG point-06 + N1 1 + N2 10 + X1 101121 + X2 10930 + X3 10 + X4 10020 + X5 101000 + X6 111931 + MSG point-06 + N1 1 + N2 100 + X1 101221 + X2 10920 + X3 100 + X4 320 + X5 100001 + X6 110931 + MSG point-06 + N1 1 + N2 10000 + X1 111221 + X2 10820 + X3 10000 + X4 800 + X5 11 + X6 10931 + MSG point-06 + N1 2 + N2 20 + X1 111241 + X2 820 + X3 20 + X4 500 + X5 110 + X6 10930 + MSG point-06 + N1 3 + N2 300 + X1 111541 + X2 800 + X3 300 + X4 + X5 10100 + X6 10920 + MSG point-06 + N1 5 + N2 500 + X1 112041 + X2 500 + X3 500 + X4 + X5 10020 + X6 10820 + MSG point-07 + N1 1 + N2 1 + X1 10111 + X2 112041 + X3 10111 + X4 300 + X5 + X6 112041 + MSG point-07 + N1 1 + N2 10 + X1 10111 + X2 112041 + X3 10111 + X4 300 + X5 + X6 112041 + MSG point-07 + N1 1 + N2 100 + X1 10111 + X2 112041 + X3 10111 + X4 300 + X5 + X6 112041 + MSG point-07 + N1 1 + N2 10000 + X1 10111 + X2 112041 + X3 10111 + X4 300 + X5 + X6 112041 + MSG point-07 + N1 2 + N2 20 + X1 10131 + X2 101930 + X3 20 + X4 500 + X5 + X6 112041 + MSG point-07 + N1 3 + N2 300 + X1 10431 + X2 101910 + X3 300 + X4 500 + X5 10111 + X6 112041 + MSG point-07 + N1 5 + N2 500 + X1 10931 + X2 101610 + X3 500 + X4 + X5 320 + X6 101930 + MSG point-07 + N1 + N2 50 + X1 112041 + X2 101110 + X3 101110 + X4 101110 + X5 101110 + X6 101110 + MSG point-07 + N1 + N2 60 + X1 112041 + X2 101110 + X3 101110 + X4 101110 + X5 101110 + X6 101110 + MSG point-07 + N1 + N2 1000 + X1 112041 + X2 101110 + X3 101110 + X4 101110 + X5 101110 + X6 101110 + MSG point-07 + N1 + N2 100000 + X1 112041 + X2 101110 + X3 101110 + X4 101110 + X5 101110 + X6 101110 + MSG point-08 + N1 1 + N2 1 + X1 1 + X2 112041 + X3 1 + X4 10100 + X5 + X6 112041 + MSG point-08 + N1 1 + N2 10 + X1 11 + X2 112040 + X3 10 + X4 10020 + X5 + X6 112041 + MSG point-08 + N1 1 + N2 100 + X1 111 + X2 112030 + X3 100 + X4 320 + X5 1 + X6 112041 + MSG point-08 + N1 1 + N2 10000 + X1 10111 + X2 111930 + X3 10000 + X4 800 + X5 11 + X6 112041 + MSG point-08 + N1 2 + N2 20 + X1 10131 + X2 101930 + X3 20 + X4 550 + X5 110 + X6 112040 + MSG point-08 + N1 3 + N2 300 + X1 10431 + X2 101910 + X3 300 + X4 110 + X5 10100 + X6 112030 + MSG point-08 + N1 5 + N2 500 + X1 10931 + X2 101610 + X3 500 + X4 1060 + X5 10020 + X6 111930 + MSG point-08 + N1 + N2 50 + X1 10981 + X2 101110 + X3 50 + X4 101000 + X5 320 + X6 101930 + MSG point-08 + N1 + N2 60 + X1 11041 + X2 101060 + X3 60 + X4 100000 + X5 800 + X6 101910 + MSG point-08 + N1 + N2 1000 + X1 12041 + X2 101000 + X3 1000 + X4 + X5 550 + X6 101610 + MSG point-08 + N1 + N2 100000 + X1 112041 + X2 100000 + X3 100000 + X4 + X5 110 + X6 101110 + MSG point-09 + N1 5 + N2 500 + X1 500 + X2 112041 + X3 500 + X4 320 + X5 + X6 112041 + MSG point-09 + N1 3 + N2 300 + X1 800 + X2 111541 + X3 300 + X4 10111 + X5 500 + X6 112041 + MSG point-09 + N1 2 + N2 20 + X1 820 + X2 111241 + X3 20 + X4 + X5 500 + X6 112041 + MSG point-09 + N1 1 + N2 1 + X1 10931 + X2 111221 + X3 10111 + X4 + X5 300 + X6 111541 + MSG point-09 + N1 1 + N2 10 + X1 10931 + X2 111221 + X3 10111 + X4 + X5 300 + X6 111541 + MSG point-09 + N1 1 + N2 100 + X1 10931 + X2 111221 + X3 10111 + X4 + X5 300 + X6 111541 + MSG point-09 + N1 1 + N2 10000 + X1 10931 + X2 111221 + X3 10111 + X4 + X5 300 + X6 111541 + MSG point-09 + N1 + N2 50 + X1 112041 + X2 101110 + X3 101110 + X4 101110 + X5 101110 + X6 101110 + MSG point-09 + N1 + N2 60 + X1 112041 + X2 101110 + X3 101110 + X4 101110 + X5 101110 + X6 101110 + MSG point-09 + N1 + N2 1000 + X1 112041 + X2 101110 + X3 101110 + X4 101110 + X5 101110 + X6 101110 + MSG point-09 + N1 + N2 100000 + X1 112041 + X2 101110 + X3 101110 + X4 101110 + X5 101110 + X6 101110 + MSG point-10 + N1 5 + N2 500 + X1 500 + X2 112041 + X3 500 + X4 21 + X5 + X6 112041 + MSG point-10 + N1 3 + N2 300 + X1 800 + X2 111541 + X3 300 + X4 11 + X5 + X6 112041 + MSG point-10 + N1 2 + N2 20 + X1 820 + X2 111241 + X3 20 + X4 110 + X5 500 + X6 112041 + MSG point-10 + N1 1 + N2 1 + X1 821 + X2 111221 + X3 1 + X4 10100 + X5 800 + X6 112041 + MSG point-10 + N1 1 + N2 10 + X1 831 + X2 111220 + X3 10 + X4 10050 + X5 320 + X6 111541 + MSG point-10 + N1 1 + N2 100 + X1 931 + X2 111210 + X3 100 + X4 110 + X5 21 + X6 111241 + MSG point-10 + N1 1 + N2 10000 + X1 10931 + X2 111110 + X3 10000 + X4 1060 + X5 11 + X6 111221 + MSG point-10 + N1 + N2 50 + X1 10981 + X2 101110 + X3 50 + X4 101000 + X5 110 + X6 111220 + MSG point-10 + N1 + N2 60 + X1 11041 + X2 101060 + X3 60 + X4 100000 + X5 10100 + X6 111210 + MSG point-10 + N1 + N2 1000 + X1 12041 + X2 101000 + X3 1000 + X4 + X5 10050 + X6 111110 + MSG point-10 + N1 + N2 100000 + X1 112041 + X2 100000 + X3 100000 + X4 + X5 110 + X6 101110 + MSG point-11 + N1 + N2 50 + X1 101110 + X2 112041 + X3 101110 + X4 101110 + X5 101110 + X6 112041 + MSG point-11 + N1 + N2 60 + X1 101110 + X2 112041 + X3 101110 + X4 101110 + X5 101110 + X6 112041 + MSG point-11 + N1 + N2 1000 + X1 101110 + X2 112041 + X3 101110 + X4 101110 + X5 101110 + X6 112041 + MSG point-11 + N1 + N2 100000 + X1 101110 + X2 112041 + X3 101110 + X4 101110 + X5 101110 + X6 112041 + MSG point-11 + N1 5 + N2 500 + X1 101610 + X2 10931 + X3 500 + X4 320 + X5 + X6 10931 + MSG point-11 + N1 3 + N2 300 + X1 101910 + X2 10431 + X3 300 + X4 10111 + X5 500 + X6 10931 + MSG point-11 + N1 2 + N2 20 + X1 101930 + X2 10131 + X3 20 + X4 + X5 500 + X6 10931 + MSG point-11 + N1 1 + N2 1 + X1 112041 + X2 10111 + X3 10111 + X4 + X5 300 + X6 10431 + MSG point-11 + N1 1 + N2 10 + X1 112041 + X2 10111 + X3 10111 + X4 + X5 300 + X6 10431 + MSG point-11 + N1 1 + N2 100 + X1 112041 + X2 10111 + X3 10111 + X4 + X5 300 + X6 10431 + MSG point-11 + N1 1 + N2 10000 + X1 112041 + X2 10111 + X3 10111 + X4 + X5 300 + X6 10431 + MSG point-12 + N1 + N2 50 + X1 50 + X2 112041 + X3 50 + X4 101000 + X5 + X6 112041 + MSG point-12 + N1 + N2 60 + X1 110 + X2 111991 + X3 60 + X4 100500 + X5 + X6 112041 + MSG point-12 + N1 + N2 1000 + X1 1110 + X2 111931 + X3 1000 + X4 800 + X5 50 + X6 112041 + MSG point-12 + N1 + N2 100000 + X1 101110 + X2 110931 + X3 100000 + X4 320 + X5 110 + X6 112041 + MSG point-12 + N1 5 + N2 500 + X1 101610 + X2 10931 + X3 500 + X4 21 + X5 1060 + X6 111991 + MSG point-12 + N1 3 + N2 300 + X1 101910 + X2 10431 + X3 300 + X4 11 + X5 101000 + X6 111931 + MSG point-12 + N1 2 + N2 20 + X1 101930 + X2 10131 + X3 20 + X4 110 + X5 100500 + X6 110931 + MSG point-12 + N1 1 + N2 1 + X1 101931 + X2 10111 + X3 1 + X4 10100 + X5 800 + X6 10931 + MSG point-12 + N1 1 + N2 10 + X1 101941 + X2 10110 + X3 10 + X4 10000 + X5 320 + X6 10431 + MSG point-12 + N1 1 + N2 100 + X1 102041 + X2 10100 + X3 100 + X4 + X5 21 + X6 10131 + MSG point-12 + N1 1 + N2 10000 + X1 112041 + X2 10000 + X3 10000 + X4 + X5 11 + X6 10111 + MSG point-13 + N1 5 + N2 500 + X1 500 + X2 112041 + X3 500 + X4 320 + X5 + X6 112041 + MSG point-13 + N1 3 + N2 300 + X1 800 + X2 111541 + X3 300 + X4 10111 + X5 500 + X6 112041 + MSG point-13 + N1 2 + N2 20 + X1 820 + X2 111241 + X3 20 + X4 + X5 500 + X6 112041 + MSG point-13 + N1 1 + N2 1 + X1 10931 + X2 111221 + X3 10111 + X4 + X5 300 + X6 111541 + MSG point-13 + N1 1 + N2 10 + X1 10931 + X2 111221 + X3 10111 + X4 + X5 300 + X6 111541 + MSG point-13 + N1 1 + N2 100 + X1 10931 + X2 111221 + X3 10111 + X4 + X5 300 + X6 111541 + MSG point-13 + N1 1 + N2 10000 + X1 10931 + X2 111221 + X3 10111 + X4 + X5 300 + X6 111541 + MSG point-13 + N1 + N2 50 + X1 112041 + X2 101110 + X3 101110 + X4 101110 + X5 101110 + X6 101110 + MSG point-13 + N1 + N2 60 + X1 112041 + X2 101110 + X3 101110 + X4 101110 + X5 101110 + X6 101110 + MSG point-13 + N1 + N2 1000 + X1 112041 + X2 101110 + X3 101110 + X4 101110 + X5 101110 + X6 101110 + MSG point-13 + N1 + N2 100000 + X1 112041 + X2 101110 + X3 101110 + X4 101110 + X5 101110 + X6 101110 + MSG point-14 + N1 5 + N2 500 + X1 500 + X2 112041 + X3 500 + X4 21 + X5 + X6 112041 + MSG point-14 + N1 3 + N2 300 + X1 800 + X2 111541 + X3 300 + X4 11 + X5 + X6 112041 + MSG point-14 + N1 2 + N2 20 + X1 820 + X2 111241 + X3 20 + X4 110 + X5 500 + X6 112041 + MSG point-14 + N1 1 + N2 1 + X1 821 + X2 111221 + X3 1 + X4 10100 + X5 800 + X6 112041 + MSG point-14 + N1 1 + N2 10 + X1 831 + X2 111220 + X3 10 + X4 10050 + X5 320 + X6 111541 + MSG point-14 + N1 1 + N2 100 + X1 931 + X2 111210 + X3 100 + X4 110 + X5 21 + X6 111241 + MSG point-14 + N1 1 + N2 10000 + X1 10931 + X2 111110 + X3 10000 + X4 1060 + X5 11 + X6 111221 + MSG point-14 + N1 + N2 50 + X1 10981 + X2 101110 + X3 50 + X4 101000 + X5 110 + X6 111220 + MSG point-14 + N1 + N2 60 + X1 11041 + X2 101060 + X3 60 + X4 100000 + X5 10100 + X6 111210 + MSG point-14 + N1 + N2 1000 + X1 12041 + X2 101000 + X3 1000 + X4 + X5 10050 + X6 111110 + MSG point-14 + N1 + N2 100000 + X1 112041 + X2 100000 + X3 100000 + X4 + X5 110 + X6 101110 + MSG point-15 + N1 + N2 50 + X1 4 + X2 101110 + X3 4 + X4 101110 + MSG point-15 + N1 + N2 60 + X1 4 + X2 101110 + X3 4 + X4 101110 + MSG point-15 + N1 + N2 1000 + X1 4 + X2 101110 + X3 4 + X4 101110 + MSG point-15 + N1 + N2 100000 + X1 4 + X2 101110 + X3 4 + X4 101110 + MSG point-15 + N1 1 + N2 1 + X1 0 + X2 + X3 1 + X4 20 + MSG point-15 + N1 1 + N2 10 + X1 0 + X2 + X3 1 + X4 20 + MSG point-15 + N1 1 + N2 100 + X1 0 + X2 + X3 1 + X4 20 + MSG point-15 + N1 1 + N2 10000 + X1 0 + X2 + X3 1 + X4 20 + MSG point-15 + N1 2 + N2 20 + X1 0 + X2 + X3 0 + X4 + MSG point-15 + N1 3 + N2 300 + X1 0 + X2 + X3 0 + X4 + MSG point-15 + N1 5 + N2 500 + X1 0 + X2 + X3 0 + X4 + MSG point-16 + N1 + N2 50 + X1 1 + X2 50 + X3 1 + X4 50 + MSG point-16 + N1 + N2 60 + X1 1 + X2 60 + X3 1 + X4 60 + MSG point-16 + N1 + N2 1000 + X1 1 + X2 1000 + X3 1 + X4 1000 + MSG point-16 + N1 + N2 100000 + X1 1 + X2 100000 + X3 1 + X4 100000 + MSG point-16 + N1 1 + N2 1 + X1 1 + X2 100000 + X3 1 + X4 10 + MSG point-16 + N1 1 + N2 10 + X1 1 + X2 1 + X3 1 + X4 100 + MSG point-16 + N1 1 + N2 100 + X1 1 + X2 10 + X3 1 + X4 10000 + MSG point-16 + N1 1 + N2 10000 + X1 1 + X2 100 + X3 1 + X4 20 + MSG point-16 + N1 2 + N2 20 + X1 1 + X2 100 + X3 1 + X4 500 + MSG point-16 + N1 3 + N2 300 + X1 1 + X2 100 + X3 0 + X4 + MSG point-16 + N1 5 + N2 500 + X1 1 + X2 10 + X3 0 + X4 + MSG point-17 + N1 1 + N2 1 + X1 1 + X2 10111 + X3 1 + X4 11 + X5 10111 + MSG point-17 + N1 1 + N2 10 + X1 11 + X2 10110 + X3 10 + X4 11 + X5 10110 + MSG point-17 + N1 2 + N2 20 + X1 20 + X2 20 + X3 20 + X4 20 + X5 20 + MSG point-17 + N1 + N2 50 + X1 50 + X2 101110 + X3 50 + X4 110 + X5 101110 + MSG point-17 + N1 + N2 60 + X1 110 + X2 101060 + X3 60 + X4 110 + X5 101060 + MSG point-17 + N1 1 + N2 100 + X1 111 + X2 10100 + X3 100 + X4 100 + X5 10100 + MSG point-17 + N1 3 + N2 300 + X1 300 + X2 300 + X3 300 + X4 300 + X5 300 + MSG point-17 + N1 5 + N2 500 + X1 500 + X2 500 + X3 500 + X4 500 + X5 500 + MSG point-17 + N1 + N2 1000 + X1 1110 + X2 101000 + X3 1000 + X4 1000 + X5 101000 + MSG point-17 + N1 1 + N2 10000 + X1 10111 + X2 10000 + X3 10000 + X4 10000 + X5 10000 + MSG point-17 + N1 + N2 100000 + X1 101110 + X2 100000 + X3 100000 + X4 100000 + X5 100000 +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=4.0') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import sys # import subprocess -# +# # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password -# +# # db_conn.close() -# +# # # NOT NEEDED FOR THIS TEST: # ########################### # # with open( os.path.join(context['files_location'],'gtcs-window-func.sql'), 'r') as f: # # sql_init = f.read() -# +# # sql_init = '' # sql_addi=''' # set list on; -# +# # recreate table t1 ( # n1 integer, # n2 integer # ); # commit; -# +# # insert into t1 values (null, 100000); # insert into t1 values (null, 1000); # insert into t1 values (1, 1); @@ -69,8 +1694,8 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # insert into t1 values (null, 50); # insert into t1 values (null, 60); # commit; -# -# select +# +# select # 'point-01' as msg, # n1, # n2, @@ -82,10 +1707,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # sum(n2) over (partition by n1 order by n2 range between 3 preceding and unbounded following) x6 # from t1 # order by n2, n1; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select +# +# select # 'point-02' as msg, # n1, # n2, @@ -97,10 +1722,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # sum(n2) over (partition by n1 order by n2, n1 rows between 3 preceding and unbounded following) x6 # from t1 # order by n2, n1; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select +# +# select # 'point-03' as msg, # n1, # n2, @@ -112,10 +1737,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # sum(n2) over (order by n1 range between 3 preceding and unbounded following) x6 # from t1 # order by n1, n2; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select +# +# select # 'point-04' as msg, # n1, # n2, @@ -127,10 +1752,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # sum(n2) over (order by n1, n2 rows between 3 preceding and unbounded following) x6 # from t1 # order by n1, n2; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select +# +# select # 'point-05' as msg, # n1, # n2, @@ -142,10 +1767,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # sum(n2) over (order by n1 nulls first range between 3 preceding and unbounded following) x6 # from t1 # order by n1 nulls first, n2; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select +# +# select # 'point-06' as msg, # n1, # n2, @@ -157,10 +1782,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # sum(n2) over (order by n1 nulls first, n2 rows between 3 preceding and unbounded following) x6 # from t1 # order by n1 nulls first, n2; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select +# +# select # 'point-07' as msg, # n1, # n2, @@ -172,10 +1797,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # sum(n2) over (order by n1 nulls last range between 3 preceding and unbounded following) x6 # from t1 # order by n1 nulls last, n2; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select +# +# select # 'point-08' as msg, # n1, # n2, @@ -187,10 +1812,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # sum(n2) over (order by n1 nulls last, n2 rows between 3 preceding and unbounded following) x6 # from t1 # order by n1 nulls last, n2; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select +# +# select # 'point-09' as msg, # n1, # n2, @@ -202,10 +1827,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # sum(n2) over (order by n1 desc range between 3 preceding and unbounded following) x6 # from t1 # order by n1 desc, n2; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select +# +# select # 'point-10' as msg, # n1, # n2, @@ -217,10 +1842,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # sum(n2) over (order by n1 desc, n2 rows between 3 preceding and unbounded following) x6 # from t1 # order by n1 desc, n2; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select +# +# select # 'point-11' as msg, # n1, # n2, @@ -232,10 +1857,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # sum(n2) over (order by n1 desc nulls first range between 3 preceding and unbounded following) x6 # from t1 # order by n1 desc nulls first, n2; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select +# +# select # 'point-12' as msg, # n1, # n2, @@ -247,10 +1872,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # sum(n2) over (order by n1 desc nulls first, n2 rows between 3 preceding and unbounded following) x6 # from t1 # order by n1 desc nulls first, n2; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select +# +# select # 'point-13' as msg, # n1, # n2, @@ -262,10 +1887,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # sum(n2) over (order by n1 desc nulls last range between 3 preceding and unbounded following) x6 # from t1 # order by n1 desc nulls last, n2; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select +# +# select # 'point-14' as msg, # n1, # n2, @@ -277,10 +1902,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # sum(n2) over (order by n1 desc nulls last, n2 rows between 3 preceding and unbounded following) x6 # from t1 # order by n1 desc nulls last, n2; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select +# +# select # 'point-15' as msg, # n1, # n2, @@ -290,10 +1915,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # sum(n2) over (order by n1 nulls first range between coalesce(n1, 0) following and coalesce(n1, 0) following) x4 # from t1 # order by n1 nulls first, n2; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select +# +# select # 'point-16' as msg, # n1, # n2, @@ -303,10 +1928,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # sum(n2) over (order by n1 nulls first, n2 rows between coalesce(n1, 0) following and coalesce(n1, 0) following) x4 # from t1 # order by n1 nulls first, n2; -# +# # --+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# select +# +# select # 'point-17' as msg, # n1, # n2, @@ -318,1646 +1943,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # from t1 # order by n2, n1; # ''' -# +# # runProgram('isql', [ dsn], os.linesep.join( (sql_init, sql_addi) ) ) -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - MSG point-01 - N1 1 - N2 1 - X1 1 - X2 10111 - X3 1 - X4 - X5 - X6 10111 - MSG point-01 - N1 1 - N2 10 - X1 11 - X2 10110 - X3 10 - X4 - X5 - X6 10110 - MSG point-01 - N1 2 - N2 20 - X1 20 - X2 20 - X3 20 - X4 - X5 - X6 20 - MSG point-01 - N1 - N2 50 - X1 50 - X2 101110 - X3 50 - X4 - X5 - X6 101110 - MSG point-01 - N1 - N2 60 - X1 110 - X2 101060 - X3 60 - X4 - X5 - X6 101060 - MSG point-01 - N1 1 - N2 100 - X1 111 - X2 10100 - X3 100 - X4 - X5 - X6 10100 - MSG point-01 - N1 3 - N2 300 - X1 300 - X2 300 - X3 300 - X4 - X5 - X6 300 - MSG point-01 - N1 5 - N2 500 - X1 500 - X2 500 - X3 500 - X4 - X5 - X6 500 - MSG point-01 - N1 - N2 1000 - X1 1110 - X2 101000 - X3 1000 - X4 - X5 - X6 101000 - MSG point-01 - N1 1 - N2 10000 - X1 10111 - X2 10000 - X3 10000 - X4 - X5 - X6 10000 - MSG point-01 - N1 - N2 100000 - X1 101110 - X2 100000 - X3 100000 - X4 - X5 - X6 100000 - MSG point-02 - N1 1 - N2 1 - X1 1 - X2 10111 - X3 1 - X4 10100 - X5 - X6 10111 - MSG point-02 - N1 1 - N2 10 - X1 11 - X2 10110 - X3 10 - X4 10000 - X5 - X6 10111 - MSG point-02 - N1 2 - N2 20 - X1 20 - X2 20 - X3 20 - X4 - X5 - X6 20 - MSG point-02 - N1 - N2 50 - X1 50 - X2 101110 - X3 50 - X4 101000 - X5 - X6 101110 - MSG point-02 - N1 - N2 60 - X1 110 - X2 101060 - X3 60 - X4 100000 - X5 - X6 101110 - MSG point-02 - N1 1 - N2 100 - X1 111 - X2 10100 - X3 100 - X4 - X5 1 - X6 10111 - MSG point-02 - N1 3 - N2 300 - X1 300 - X2 300 - X3 300 - X4 - X5 - X6 300 - MSG point-02 - N1 5 - N2 500 - X1 500 - X2 500 - X3 500 - X4 - X5 - X6 500 - MSG point-02 - N1 - N2 1000 - X1 1110 - X2 101000 - X3 1000 - X4 - X5 50 - X6 101110 - MSG point-02 - N1 1 - N2 10000 - X1 10111 - X2 10000 - X3 10000 - X4 - X5 11 - X6 10111 - MSG point-02 - N1 - N2 100000 - X1 101110 - X2 100000 - X3 100000 - X4 - X5 110 - X6 101110 - MSG point-03 - N1 - N2 50 - X1 101110 - X2 112041 - X3 101110 - X4 101110 - X5 101110 - X6 112041 - MSG point-03 - N1 - N2 60 - X1 101110 - X2 112041 - X3 101110 - X4 101110 - X5 101110 - X6 112041 - MSG point-03 - N1 - N2 1000 - X1 101110 - X2 112041 - X3 101110 - X4 101110 - X5 101110 - X6 112041 - MSG point-03 - N1 - N2 100000 - X1 101110 - X2 112041 - X3 101110 - X4 101110 - X5 101110 - X6 112041 - MSG point-03 - N1 1 - N2 1 - X1 111221 - X2 10931 - X3 10111 - X4 300 - X5 - X6 10931 - MSG point-03 - N1 1 - N2 10 - X1 111221 - X2 10931 - X3 10111 - X4 300 - X5 - X6 10931 - MSG point-03 - N1 1 - N2 100 - X1 111221 - X2 10931 - X3 10111 - X4 300 - X5 - X6 10931 - MSG point-03 - N1 1 - N2 10000 - X1 111221 - X2 10931 - X3 10111 - X4 300 - X5 - X6 10931 - MSG point-03 - N1 2 - N2 20 - X1 111241 - X2 820 - X3 20 - X4 500 - X5 - X6 10931 - MSG point-03 - N1 3 - N2 300 - X1 111541 - X2 800 - X3 300 - X4 500 - X5 10111 - X6 10931 - MSG point-03 - N1 5 - N2 500 - X1 112041 - X2 500 - X3 500 - X4 - X5 320 - X6 820 - MSG point-04 - N1 - N2 50 - X1 50 - X2 112041 - X3 50 - X4 101000 - X5 - X6 112041 - MSG point-04 - N1 - N2 60 - X1 110 - X2 111991 - X3 60 - X4 100001 - X5 - X6 112041 - MSG point-04 - N1 - N2 1000 - X1 1110 - X2 111931 - X3 1000 - X4 11 - X5 50 - X6 112041 - MSG point-04 - N1 - N2 100000 - X1 101110 - X2 110931 - X3 100000 - X4 110 - X5 110 - X6 112041 - MSG point-04 - N1 1 - N2 1 - X1 101111 - X2 10931 - X3 1 - X4 10100 - X5 1060 - X6 111991 - MSG point-04 - N1 1 - N2 10 - X1 101121 - X2 10930 - X3 10 - X4 10020 - X5 101000 - X6 111931 - MSG point-04 - N1 1 - N2 100 - X1 101221 - X2 10920 - X3 100 - X4 320 - X5 100001 - X6 110931 - MSG point-04 - N1 1 - N2 10000 - X1 111221 - X2 10820 - X3 10000 - X4 800 - X5 11 - X6 10931 - MSG point-04 - N1 2 - N2 20 - X1 111241 - X2 820 - X3 20 - X4 500 - X5 110 - X6 10930 - MSG point-04 - N1 3 - N2 300 - X1 111541 - X2 800 - X3 300 - X4 - X5 10100 - X6 10920 - MSG point-04 - N1 5 - N2 500 - X1 112041 - X2 500 - X3 500 - X4 - X5 10020 - X6 10820 - MSG point-05 - N1 - N2 50 - X1 101110 - X2 112041 - X3 101110 - X4 101110 - X5 101110 - X6 112041 - MSG point-05 - N1 - N2 60 - X1 101110 - X2 112041 - X3 101110 - X4 101110 - X5 101110 - X6 112041 - MSG point-05 - N1 - N2 1000 - X1 101110 - X2 112041 - X3 101110 - X4 101110 - X5 101110 - X6 112041 - MSG point-05 - N1 - N2 100000 - X1 101110 - X2 112041 - X3 101110 - X4 101110 - X5 101110 - X6 112041 - MSG point-05 - N1 1 - N2 1 - X1 111221 - X2 10931 - X3 10111 - X4 300 - X5 - X6 10931 - MSG point-05 - N1 1 - N2 10 - X1 111221 - X2 10931 - X3 10111 - X4 300 - X5 - X6 10931 - MSG point-05 - N1 1 - N2 100 - X1 111221 - X2 10931 - X3 10111 - X4 300 - X5 - X6 10931 - MSG point-05 - N1 1 - N2 10000 - X1 111221 - X2 10931 - X3 10111 - X4 300 - X5 - X6 10931 - MSG point-05 - N1 2 - N2 20 - X1 111241 - X2 820 - X3 20 - X4 500 - X5 - X6 10931 - MSG point-05 - N1 3 - N2 300 - X1 111541 - X2 800 - X3 300 - X4 500 - X5 10111 - X6 10931 - MSG point-05 - N1 5 - N2 500 - X1 112041 - X2 500 - X3 500 - X4 - X5 320 - X6 820 - MSG point-06 - N1 - N2 50 - X1 50 - X2 112041 - X3 50 - X4 101000 - X5 - X6 112041 - MSG point-06 - N1 - N2 60 - X1 110 - X2 111991 - X3 60 - X4 100001 - X5 - X6 112041 - MSG point-06 - N1 - N2 1000 - X1 1110 - X2 111931 - X3 1000 - X4 11 - X5 50 - X6 112041 - MSG point-06 - N1 - N2 100000 - X1 101110 - X2 110931 - X3 100000 - X4 110 - X5 110 - X6 112041 - MSG point-06 - N1 1 - N2 1 - X1 101111 - X2 10931 - X3 1 - X4 10100 - X5 1060 - X6 111991 - MSG point-06 - N1 1 - N2 10 - X1 101121 - X2 10930 - X3 10 - X4 10020 - X5 101000 - X6 111931 - MSG point-06 - N1 1 - N2 100 - X1 101221 - X2 10920 - X3 100 - X4 320 - X5 100001 - X6 110931 - MSG point-06 - N1 1 - N2 10000 - X1 111221 - X2 10820 - X3 10000 - X4 800 - X5 11 - X6 10931 - MSG point-06 - N1 2 - N2 20 - X1 111241 - X2 820 - X3 20 - X4 500 - X5 110 - X6 10930 - MSG point-06 - N1 3 - N2 300 - X1 111541 - X2 800 - X3 300 - X4 - X5 10100 - X6 10920 - MSG point-06 - N1 5 - N2 500 - X1 112041 - X2 500 - X3 500 - X4 - X5 10020 - X6 10820 - MSG point-07 - N1 1 - N2 1 - X1 10111 - X2 112041 - X3 10111 - X4 300 - X5 - X6 112041 - MSG point-07 - N1 1 - N2 10 - X1 10111 - X2 112041 - X3 10111 - X4 300 - X5 - X6 112041 - MSG point-07 - N1 1 - N2 100 - X1 10111 - X2 112041 - X3 10111 - X4 300 - X5 - X6 112041 - MSG point-07 - N1 1 - N2 10000 - X1 10111 - X2 112041 - X3 10111 - X4 300 - X5 - X6 112041 - MSG point-07 - N1 2 - N2 20 - X1 10131 - X2 101930 - X3 20 - X4 500 - X5 - X6 112041 - MSG point-07 - N1 3 - N2 300 - X1 10431 - X2 101910 - X3 300 - X4 500 - X5 10111 - X6 112041 - MSG point-07 - N1 5 - N2 500 - X1 10931 - X2 101610 - X3 500 - X4 - X5 320 - X6 101930 - MSG point-07 - N1 - N2 50 - X1 112041 - X2 101110 - X3 101110 - X4 101110 - X5 101110 - X6 101110 - MSG point-07 - N1 - N2 60 - X1 112041 - X2 101110 - X3 101110 - X4 101110 - X5 101110 - X6 101110 - MSG point-07 - N1 - N2 1000 - X1 112041 - X2 101110 - X3 101110 - X4 101110 - X5 101110 - X6 101110 - MSG point-07 - N1 - N2 100000 - X1 112041 - X2 101110 - X3 101110 - X4 101110 - X5 101110 - X6 101110 - MSG point-08 - N1 1 - N2 1 - X1 1 - X2 112041 - X3 1 - X4 10100 - X5 - X6 112041 - MSG point-08 - N1 1 - N2 10 - X1 11 - X2 112040 - X3 10 - X4 10020 - X5 - X6 112041 - MSG point-08 - N1 1 - N2 100 - X1 111 - X2 112030 - X3 100 - X4 320 - X5 1 - X6 112041 - MSG point-08 - N1 1 - N2 10000 - X1 10111 - X2 111930 - X3 10000 - X4 800 - X5 11 - X6 112041 - MSG point-08 - N1 2 - N2 20 - X1 10131 - X2 101930 - X3 20 - X4 550 - X5 110 - X6 112040 - MSG point-08 - N1 3 - N2 300 - X1 10431 - X2 101910 - X3 300 - X4 110 - X5 10100 - X6 112030 - MSG point-08 - N1 5 - N2 500 - X1 10931 - X2 101610 - X3 500 - X4 1060 - X5 10020 - X6 111930 - MSG point-08 - N1 - N2 50 - X1 10981 - X2 101110 - X3 50 - X4 101000 - X5 320 - X6 101930 - MSG point-08 - N1 - N2 60 - X1 11041 - X2 101060 - X3 60 - X4 100000 - X5 800 - X6 101910 - MSG point-08 - N1 - N2 1000 - X1 12041 - X2 101000 - X3 1000 - X4 - X5 550 - X6 101610 - MSG point-08 - N1 - N2 100000 - X1 112041 - X2 100000 - X3 100000 - X4 - X5 110 - X6 101110 - MSG point-09 - N1 5 - N2 500 - X1 500 - X2 112041 - X3 500 - X4 320 - X5 - X6 112041 - MSG point-09 - N1 3 - N2 300 - X1 800 - X2 111541 - X3 300 - X4 10111 - X5 500 - X6 112041 - MSG point-09 - N1 2 - N2 20 - X1 820 - X2 111241 - X3 20 - X4 - X5 500 - X6 112041 - MSG point-09 - N1 1 - N2 1 - X1 10931 - X2 111221 - X3 10111 - X4 - X5 300 - X6 111541 - MSG point-09 - N1 1 - N2 10 - X1 10931 - X2 111221 - X3 10111 - X4 - X5 300 - X6 111541 - MSG point-09 - N1 1 - N2 100 - X1 10931 - X2 111221 - X3 10111 - X4 - X5 300 - X6 111541 - MSG point-09 - N1 1 - N2 10000 - X1 10931 - X2 111221 - X3 10111 - X4 - X5 300 - X6 111541 - MSG point-09 - N1 - N2 50 - X1 112041 - X2 101110 - X3 101110 - X4 101110 - X5 101110 - X6 101110 - MSG point-09 - N1 - N2 60 - X1 112041 - X2 101110 - X3 101110 - X4 101110 - X5 101110 - X6 101110 - MSG point-09 - N1 - N2 1000 - X1 112041 - X2 101110 - X3 101110 - X4 101110 - X5 101110 - X6 101110 - MSG point-09 - N1 - N2 100000 - X1 112041 - X2 101110 - X3 101110 - X4 101110 - X5 101110 - X6 101110 - MSG point-10 - N1 5 - N2 500 - X1 500 - X2 112041 - X3 500 - X4 21 - X5 - X6 112041 - MSG point-10 - N1 3 - N2 300 - X1 800 - X2 111541 - X3 300 - X4 11 - X5 - X6 112041 - MSG point-10 - N1 2 - N2 20 - X1 820 - X2 111241 - X3 20 - X4 110 - X5 500 - X6 112041 - MSG point-10 - N1 1 - N2 1 - X1 821 - X2 111221 - X3 1 - X4 10100 - X5 800 - X6 112041 - MSG point-10 - N1 1 - N2 10 - X1 831 - X2 111220 - X3 10 - X4 10050 - X5 320 - X6 111541 - MSG point-10 - N1 1 - N2 100 - X1 931 - X2 111210 - X3 100 - X4 110 - X5 21 - X6 111241 - MSG point-10 - N1 1 - N2 10000 - X1 10931 - X2 111110 - X3 10000 - X4 1060 - X5 11 - X6 111221 - MSG point-10 - N1 - N2 50 - X1 10981 - X2 101110 - X3 50 - X4 101000 - X5 110 - X6 111220 - MSG point-10 - N1 - N2 60 - X1 11041 - X2 101060 - X3 60 - X4 100000 - X5 10100 - X6 111210 - MSG point-10 - N1 - N2 1000 - X1 12041 - X2 101000 - X3 1000 - X4 - X5 10050 - X6 111110 - MSG point-10 - N1 - N2 100000 - X1 112041 - X2 100000 - X3 100000 - X4 - X5 110 - X6 101110 - MSG point-11 - N1 - N2 50 - X1 101110 - X2 112041 - X3 101110 - X4 101110 - X5 101110 - X6 112041 - MSG point-11 - N1 - N2 60 - X1 101110 - X2 112041 - X3 101110 - X4 101110 - X5 101110 - X6 112041 - MSG point-11 - N1 - N2 1000 - X1 101110 - X2 112041 - X3 101110 - X4 101110 - X5 101110 - X6 112041 - MSG point-11 - N1 - N2 100000 - X1 101110 - X2 112041 - X3 101110 - X4 101110 - X5 101110 - X6 112041 - MSG point-11 - N1 5 - N2 500 - X1 101610 - X2 10931 - X3 500 - X4 320 - X5 - X6 10931 - MSG point-11 - N1 3 - N2 300 - X1 101910 - X2 10431 - X3 300 - X4 10111 - X5 500 - X6 10931 - MSG point-11 - N1 2 - N2 20 - X1 101930 - X2 10131 - X3 20 - X4 - X5 500 - X6 10931 - MSG point-11 - N1 1 - N2 1 - X1 112041 - X2 10111 - X3 10111 - X4 - X5 300 - X6 10431 - MSG point-11 - N1 1 - N2 10 - X1 112041 - X2 10111 - X3 10111 - X4 - X5 300 - X6 10431 - MSG point-11 - N1 1 - N2 100 - X1 112041 - X2 10111 - X3 10111 - X4 - X5 300 - X6 10431 - MSG point-11 - N1 1 - N2 10000 - X1 112041 - X2 10111 - X3 10111 - X4 - X5 300 - X6 10431 - MSG point-12 - N1 - N2 50 - X1 50 - X2 112041 - X3 50 - X4 101000 - X5 - X6 112041 - MSG point-12 - N1 - N2 60 - X1 110 - X2 111991 - X3 60 - X4 100500 - X5 - X6 112041 - MSG point-12 - N1 - N2 1000 - X1 1110 - X2 111931 - X3 1000 - X4 800 - X5 50 - X6 112041 - MSG point-12 - N1 - N2 100000 - X1 101110 - X2 110931 - X3 100000 - X4 320 - X5 110 - X6 112041 - MSG point-12 - N1 5 - N2 500 - X1 101610 - X2 10931 - X3 500 - X4 21 - X5 1060 - X6 111991 - MSG point-12 - N1 3 - N2 300 - X1 101910 - X2 10431 - X3 300 - X4 11 - X5 101000 - X6 111931 - MSG point-12 - N1 2 - N2 20 - X1 101930 - X2 10131 - X3 20 - X4 110 - X5 100500 - X6 110931 - MSG point-12 - N1 1 - N2 1 - X1 101931 - X2 10111 - X3 1 - X4 10100 - X5 800 - X6 10931 - MSG point-12 - N1 1 - N2 10 - X1 101941 - X2 10110 - X3 10 - X4 10000 - X5 320 - X6 10431 - MSG point-12 - N1 1 - N2 100 - X1 102041 - X2 10100 - X3 100 - X4 - X5 21 - X6 10131 - MSG point-12 - N1 1 - N2 10000 - X1 112041 - X2 10000 - X3 10000 - X4 - X5 11 - X6 10111 - MSG point-13 - N1 5 - N2 500 - X1 500 - X2 112041 - X3 500 - X4 320 - X5 - X6 112041 - MSG point-13 - N1 3 - N2 300 - X1 800 - X2 111541 - X3 300 - X4 10111 - X5 500 - X6 112041 - MSG point-13 - N1 2 - N2 20 - X1 820 - X2 111241 - X3 20 - X4 - X5 500 - X6 112041 - MSG point-13 - N1 1 - N2 1 - X1 10931 - X2 111221 - X3 10111 - X4 - X5 300 - X6 111541 - MSG point-13 - N1 1 - N2 10 - X1 10931 - X2 111221 - X3 10111 - X4 - X5 300 - X6 111541 - MSG point-13 - N1 1 - N2 100 - X1 10931 - X2 111221 - X3 10111 - X4 - X5 300 - X6 111541 - MSG point-13 - N1 1 - N2 10000 - X1 10931 - X2 111221 - X3 10111 - X4 - X5 300 - X6 111541 - MSG point-13 - N1 - N2 50 - X1 112041 - X2 101110 - X3 101110 - X4 101110 - X5 101110 - X6 101110 - MSG point-13 - N1 - N2 60 - X1 112041 - X2 101110 - X3 101110 - X4 101110 - X5 101110 - X6 101110 - MSG point-13 - N1 - N2 1000 - X1 112041 - X2 101110 - X3 101110 - X4 101110 - X5 101110 - X6 101110 - MSG point-13 - N1 - N2 100000 - X1 112041 - X2 101110 - X3 101110 - X4 101110 - X5 101110 - X6 101110 - MSG point-14 - N1 5 - N2 500 - X1 500 - X2 112041 - X3 500 - X4 21 - X5 - X6 112041 - MSG point-14 - N1 3 - N2 300 - X1 800 - X2 111541 - X3 300 - X4 11 - X5 - X6 112041 - MSG point-14 - N1 2 - N2 20 - X1 820 - X2 111241 - X3 20 - X4 110 - X5 500 - X6 112041 - MSG point-14 - N1 1 - N2 1 - X1 821 - X2 111221 - X3 1 - X4 10100 - X5 800 - X6 112041 - MSG point-14 - N1 1 - N2 10 - X1 831 - X2 111220 - X3 10 - X4 10050 - X5 320 - X6 111541 - MSG point-14 - N1 1 - N2 100 - X1 931 - X2 111210 - X3 100 - X4 110 - X5 21 - X6 111241 - MSG point-14 - N1 1 - N2 10000 - X1 10931 - X2 111110 - X3 10000 - X4 1060 - X5 11 - X6 111221 - MSG point-14 - N1 - N2 50 - X1 10981 - X2 101110 - X3 50 - X4 101000 - X5 110 - X6 111220 - MSG point-14 - N1 - N2 60 - X1 11041 - X2 101060 - X3 60 - X4 100000 - X5 10100 - X6 111210 - MSG point-14 - N1 - N2 1000 - X1 12041 - X2 101000 - X3 1000 - X4 - X5 10050 - X6 111110 - MSG point-14 - N1 - N2 100000 - X1 112041 - X2 100000 - X3 100000 - X4 - X5 110 - X6 101110 - MSG point-15 - N1 - N2 50 - X1 4 - X2 101110 - X3 4 - X4 101110 - MSG point-15 - N1 - N2 60 - X1 4 - X2 101110 - X3 4 - X4 101110 - MSG point-15 - N1 - N2 1000 - X1 4 - X2 101110 - X3 4 - X4 101110 - MSG point-15 - N1 - N2 100000 - X1 4 - X2 101110 - X3 4 - X4 101110 - MSG point-15 - N1 1 - N2 1 - X1 0 - X2 - X3 1 - X4 20 - MSG point-15 - N1 1 - N2 10 - X1 0 - X2 - X3 1 - X4 20 - MSG point-15 - N1 1 - N2 100 - X1 0 - X2 - X3 1 - X4 20 - MSG point-15 - N1 1 - N2 10000 - X1 0 - X2 - X3 1 - X4 20 - MSG point-15 - N1 2 - N2 20 - X1 0 - X2 - X3 0 - X4 - MSG point-15 - N1 3 - N2 300 - X1 0 - X2 - X3 0 - X4 - MSG point-15 - N1 5 - N2 500 - X1 0 - X2 - X3 0 - X4 - MSG point-16 - N1 - N2 50 - X1 1 - X2 50 - X3 1 - X4 50 - MSG point-16 - N1 - N2 60 - X1 1 - X2 60 - X3 1 - X4 60 - MSG point-16 - N1 - N2 1000 - X1 1 - X2 1000 - X3 1 - X4 1000 - MSG point-16 - N1 - N2 100000 - X1 1 - X2 100000 - X3 1 - X4 100000 - MSG point-16 - N1 1 - N2 1 - X1 1 - X2 100000 - X3 1 - X4 10 - MSG point-16 - N1 1 - N2 10 - X1 1 - X2 1 - X3 1 - X4 100 - MSG point-16 - N1 1 - N2 100 - X1 1 - X2 10 - X3 1 - X4 10000 - MSG point-16 - N1 1 - N2 10000 - X1 1 - X2 100 - X3 1 - X4 20 - MSG point-16 - N1 2 - N2 20 - X1 1 - X2 100 - X3 1 - X4 500 - MSG point-16 - N1 3 - N2 300 - X1 1 - X2 100 - X3 0 - X4 - MSG point-16 - N1 5 - N2 500 - X1 1 - X2 10 - X3 0 - X4 - MSG point-17 - N1 1 - N2 1 - X1 1 - X2 10111 - X3 1 - X4 11 - X5 10111 - MSG point-17 - N1 1 - N2 10 - X1 11 - X2 10110 - X3 10 - X4 11 - X5 10110 - MSG point-17 - N1 2 - N2 20 - X1 20 - X2 20 - X3 20 - X4 20 - X5 20 - MSG point-17 - N1 - N2 50 - X1 50 - X2 101110 - X3 50 - X4 110 - X5 101110 - MSG point-17 - N1 - N2 60 - X1 110 - X2 101060 - X3 60 - X4 110 - X5 101060 - MSG point-17 - N1 1 - N2 100 - X1 111 - X2 10100 - X3 100 - X4 100 - X5 10100 - MSG point-17 - N1 3 - N2 300 - X1 300 - X2 300 - X3 300 - X4 300 - X5 300 - MSG point-17 - N1 5 - N2 500 - X1 500 - X2 500 - X3 500 - X4 500 - X5 500 - MSG point-17 - N1 - N2 1000 - X1 1110 - X2 101000 - X3 1000 - X4 1000 - X5 101000 - MSG point-17 - N1 1 - N2 10000 - X1 10111 - X2 10000 - X3 10000 - X4 10000 - X5 10000 - MSG point-17 - N1 - N2 100000 - X1 101110 - X2 100000 - X3 100000 - X4 100000 - X5 100000 -""" - -@pytest.mark.version('>=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/index/alter/test_01.py b/tests/functional/index/alter/test_01.py index ebea3f04..da1a0d78 100644 --- a/tests/functional/index/alter/test_01.py +++ b/tests/functional/index/alter/test_01.py @@ -1,36 +1,24 @@ #coding:utf-8 -# -# id: functional.index.alter.01 -# title: ALTER INDEX - INACTIVE -# decription: ALTER INDEX - INACTIVE -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# CREATE INDEX -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.index.alter.alter_index_01 + +""" +ID: index.alter-01 +TITLE: ALTER INDEX - INACTIVE +DESCRIPTION: +FBTEST: functional.index.alter.01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """ +init_script = """ create table test( a integer); create index test_idx on test(a); commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ +test_script = """ alter index test_idx inactive; commit; set list on; @@ -41,16 +29,15 @@ test_script_1 = """ where rdb$index_name=upper('test_idx'); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ - IDX_NAME TEST_IDX +expected_stdout = """ + IDX_NAME TEST_IDX IS_INACTIVE 1 """ -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/index/alter/test_02.py b/tests/functional/index/alter/test_02.py index 80fbfda9..c56cca27 100644 --- a/tests/functional/index/alter/test_02.py +++ b/tests/functional/index/alter/test_02.py @@ -1,38 +1,25 @@ #coding:utf-8 -# -# id: functional.index.alter.02 -# title: ALTER INDEX -# decription: ALTER INDEX -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# CREATE INDEX -# ALTER INDEX -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.index.alter.alter_index_02 + +""" +ID: index.alter-02 +TITLE: ALTER INDEX +DESCRIPTION: +FBTEST: functional.index.alter.02 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """ +init_script = """ create table test_active_state_toggle( a integer); commit; create index test_active_state_toggle_idx on test_active_state_toggle(a); commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ +test_script = """ alter index test_active_state_toggle_idx inactive; alter index test_active_state_toggle_idx active; commit; @@ -42,16 +29,15 @@ test_script_1 = """ where rdb$index_name=upper('test_active_state_toggle_idx'); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ RDB$INDEX_NAME TEST_ACTIVE_STATE_TOGGLE_IDX RDB$INDEX_INACTIVE 0 """ -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/index/alter/test_03.py b/tests/functional/index/alter/test_03.py index c860a4ac..aed27bf0 100644 --- a/tests/functional/index/alter/test_03.py +++ b/tests/functional/index/alter/test_03.py @@ -1,36 +1,24 @@ #coding:utf-8 -# -# id: functional.index.alter.03 -# title: ALTER INDEX - INACTIVE UNIQUE INDEX -# decription: ALTER INDEX - INACTIVE UNIQUE INDEX -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# CREATE INDEX -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.index.alter.alter_index_03 + +""" +ID: index.alter-03 +TITLE: ALTER INDEX - INACTIVE UNIQUE INDEX +DESCRIPTION: +FBTEST: functional.index.alter.03 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """ +init_script = """ create table test( a integer); create unique index test_idx_unq on test(a); commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ +test_script = """ alter index test_idx_unq inactive; commit; set list on; @@ -42,17 +30,16 @@ test_script_1 = """ where rdb$index_name=upper('test_idx_unq'); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ - IDX_NAME TEST_IDX_UNQ +expected_stdout = """ + IDX_NAME TEST_IDX_UNQ IS_INACTIVE 1 IS_UNIQUE 1 """ -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/index/alter/test_04.py b/tests/functional/index/alter/test_04.py index 93eb3570..d3cdae9b 100644 --- a/tests/functional/index/alter/test_04.py +++ b/tests/functional/index/alter/test_04.py @@ -1,37 +1,27 @@ #coding:utf-8 -# -# id: functional.index.alter.04 -# title: ALTER INDEX - INACTIVE PRIMARY KEY -# decription: ALTER INDEX - INACTIVE PRIMARY KEY -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE with PRIMARY KEY -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.index.alter.alter_index_04 + +""" +ID: index.alter-04 +TITLE: ALTER INDEX - INACTIVE PRIMARY KEY +DESCRIPTION: +FBTEST: functional.index.alter.04 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE t( a INTEGER NOT NULL, +init_script = """CREATE TABLE t( a INTEGER NOT NULL, CONSTRAINT pkindx PRIMARY KEY(a) ); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ALTER INDEX pkindx INACTIVE;""" +test_script = """ALTER INDEX pkindx INACTIVE;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stderr_1 = """Statement failed, SQLSTATE = 27000 +expected_stderr = """Statement failed, SQLSTATE = 27000 unsuccessful metadata update -ALTER INDEX PKINDX failed -action cancelled by trigger (3) to preserve data integrity @@ -39,8 +29,7 @@ unsuccessful metadata update """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/index/alter/test_05.py b/tests/functional/index/alter/test_05.py index 232aa7f7..69aebf92 100644 --- a/tests/functional/index/alter/test_05.py +++ b/tests/functional/index/alter/test_05.py @@ -1,30 +1,16 @@ #coding:utf-8 -# -# id: functional.index.alter.05 -# title: ALTER INDEX - INACTIVE FOREIGN KEY -# decription: ALTER INDEX - INACTIVE FOREIGN KEY -# -# Note !SF 609538 -# Bad error message "-Cannot deactivate primary index" -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE with PRIMARY KEY -# CREATE TABLE with FOREIGN KEY -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.index.alter.alter_index_05 + +""" +ID: index.alter-05 +TITLE: ALTER INDEX - INACTIVE FOREIGN KEY +DESCRIPTION: +FBTEST: functional.index.alter.05 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE pk( a INTEGER NOT NULL, +init_script = """CREATE TABLE pk( a INTEGER NOT NULL, CONSTRAINT pkindx PRIMARY KEY(a) ); commit; @@ -33,13 +19,13 @@ CREATE TABLE fk( a INTEGER NOT NULL, ); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ALTER INDEX fkindx INACTIVE;""" +test_script = """ALTER INDEX fkindx INACTIVE;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stderr_1 = """Statement failed, SQLSTATE = 27000 +expected_stderr = """Statement failed, SQLSTATE = 27000 unsuccessful metadata update -ALTER INDEX FKINDX failed -action cancelled by trigger (2) to preserve data integrity @@ -47,8 +33,7 @@ unsuccessful metadata update """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/index/create/test_01.py b/tests/functional/index/create/test_01.py index c021bdac..20531d6f 100644 --- a/tests/functional/index/create/test_01.py +++ b/tests/functional/index/create/test_01.py @@ -1,41 +1,29 @@ #coding:utf-8 -# -# id: functional.index.create.01 -# title: CREATE INDEX -# decription: CREATE INDEX -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# SHOW INDEX -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.index.create.create_index_01 + +""" +ID: index.create-01 +TITLE: CREATE INDEX +DESCRIPTION: +FBTEST: functional.index.create.01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE t( a INTEGER); +init_script = """CREATE TABLE t( a INTEGER); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """CREATE INDEX test ON t(a); +test_script = """CREATE INDEX test ON t(a); SHOW INDEX test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST INDEX ON T(A)""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST INDEX ON T(A)""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/index/create/test_02.py b/tests/functional/index/create/test_02.py index 89b9c758..eb04c289 100644 --- a/tests/functional/index/create/test_02.py +++ b/tests/functional/index/create/test_02.py @@ -1,42 +1,30 @@ #coding:utf-8 -# -# id: functional.index.create.02 -# title: CREATE UNIQUE INDEX -# decription: CREATE UNIQUE INDEX -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# SHOW INDEX -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.index.create.create_index_02 + +""" +ID: index.create-02 +TITLE: CREATE UNIQUE INDEX +DESCRIPTION: +FBTEST: functional.index.create.02 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE t( a INTEGER); +init_script = """CREATE TABLE t( a INTEGER); commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """CREATE UNIQUE INDEX test ON t(a); +test_script = """CREATE UNIQUE INDEX test ON t(a); SHOW INDEX test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST UNIQUE INDEX ON T(A)""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST UNIQUE INDEX ON T(A)""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/index/create/test_03.py b/tests/functional/index/create/test_03.py index ec0e9388..bcd74aa4 100644 --- a/tests/functional/index/create/test_03.py +++ b/tests/functional/index/create/test_03.py @@ -1,41 +1,29 @@ #coding:utf-8 -# -# id: functional.index.create.03 -# title: CREATE ASC INDEX -# decription: CREATE ASC INDEX -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# SHOW INDEX -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.index.create.create_index_03 + +""" +ID: index.create-03 +TITLE: CREATE ASC INDEX +DESCRIPTION: +FBTEST: functional.index.create.03 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE t( a INTEGER); +init_script = """CREATE TABLE t( a INTEGER); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """CREATE ASC INDEX test ON t(a); +test_script = """CREATE ASC INDEX test ON t(a); SHOW INDEX test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST INDEX ON T(A)""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST INDEX ON T(A)""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/index/create/test_04.py b/tests/functional/index/create/test_04.py index 06c9faa8..62a09110 100644 --- a/tests/functional/index/create/test_04.py +++ b/tests/functional/index/create/test_04.py @@ -1,41 +1,29 @@ #coding:utf-8 -# -# id: functional.index.create.04 -# title: CREATE ASCENDING INDEX -# decription: CREATE ASCENDING INDEX -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# SHOW INDEX -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.index.create.create_index_04 + +""" +ID: index.create-04 +TITLE: CREATE ASCENDING INDEX +DESCRIPTION: +FBTEST: functional.index.create.04 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE t( a INTEGER); +init_script = """CREATE TABLE t( a INTEGER); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """CREATE ASCENDING INDEX test ON t(a); +test_script = """CREATE ASCENDING INDEX test ON t(a); SHOW INDEX test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST INDEX ON T(A)""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST INDEX ON T(A)""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/index/create/test_05.py b/tests/functional/index/create/test_05.py index 0710f797..ed85fc03 100644 --- a/tests/functional/index/create/test_05.py +++ b/tests/functional/index/create/test_05.py @@ -1,40 +1,28 @@ #coding:utf-8 -# -# id: functional.index.create.05 -# title: CREATE DESC INDEX -# decription: CREATE DESC INDEX -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# SHOW INDEX -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.index.create.create_index_05 + +""" +ID: index.create-05 +TITLE: CREATE DESC INDEX +DESCRIPTION: +FBTEST: functional.index.create.05 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +init_script = """CREATE TABLE t( a INTEGER);""" -substitutions_1 = [] +db = db_factory(init=init_script) -init_script_1 = """CREATE TABLE t( a INTEGER);""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE DESC INDEX test ON t(a); +test_script = """CREATE DESC INDEX test ON t(a); SHOW INDEX test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST DESCENDING INDEX ON T(A)""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST DESCENDING INDEX ON T(A)""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/index/create/test_06.py b/tests/functional/index/create/test_06.py index eb73f794..742a9191 100644 --- a/tests/functional/index/create/test_06.py +++ b/tests/functional/index/create/test_06.py @@ -1,42 +1,30 @@ #coding:utf-8 -# -# id: functional.index.create.06 -# title: CREATE DESCENDING INDEX -# decription: CREATE DESCENDING INDEX -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# SHOW INDEX -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.index.create.create_index_06 + +""" +ID: index.create-06 +TITLE: CREATE DESCENDING INDEX +DESCRIPTION: +FBTEST: functional.index.create.06 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE t( a INTEGER); +init_script = """CREATE TABLE t( a INTEGER); commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """CREATE DESCENDING INDEX test ON t(a); +test_script = """CREATE DESCENDING INDEX test ON t(a); SHOW INDEX test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST DESCENDING INDEX ON T(A)""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST DESCENDING INDEX ON T(A)""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/index/create/test_07.py b/tests/functional/index/create/test_07.py index 619275b8..c6807e8a 100644 --- a/tests/functional/index/create/test_07.py +++ b/tests/functional/index/create/test_07.py @@ -1,42 +1,30 @@ #coding:utf-8 -# -# id: functional.index.create.07 -# title: CREATE INDEX - Multi column -# decription: CREATE INDEX - Multi column -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# SHOW INDEX -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.index.create.create_index_07 + +""" +ID: index.create-07 +TITLE: CREATE INDEX - Multi column +DESCRIPTION: +FBTEST: functional.index.create.07 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE t( a INTEGER, b INT, c INT, d INT); +init_script = """CREATE TABLE t( a INTEGER, b INT, c INT, d INT); commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """CREATE INDEX test ON t(a,b,c,d); +test_script = """CREATE INDEX test ON t(a,b,c,d); SHOW INDEX test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST INDEX ON T(A, B, C, D)""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST INDEX ON T(A, B, C, D)""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/index/create/test_08.py b/tests/functional/index/create/test_08.py index 397d0508..b1a6e2a8 100644 --- a/tests/functional/index/create/test_08.py +++ b/tests/functional/index/create/test_08.py @@ -1,28 +1,16 @@ #coding:utf-8 -# -# id: functional.index.create.08 -# title: CREATE INDEX - Table with data -# decription: CREATE INDEX - Table with data -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# INSERT -# SHOW INDEX -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.index.create.create_index_08 + +""" +ID: index.create-08 +TITLE: CREATE INDEX - Table with data +DESCRIPTION: +FBTEST: functional.index.create.08 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE t( a INTEGER); +init_script = """CREATE TABLE t( a INTEGER); commit; INSERT INTO t VALUES(0); INSERT INTO t VALUES(0); @@ -33,18 +21,17 @@ INSERT INTO t VALUES(4); INSERT INTO t VALUES(null); COMMIT;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """CREATE INDEX test ON t(a); +test_script = """CREATE INDEX test ON t(a); SHOW INDEX test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST INDEX ON T(A)""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST INDEX ON T(A)""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/index/create/test_09.py b/tests/functional/index/create/test_09.py index 8753b023..dab06073 100644 --- a/tests/functional/index/create/test_09.py +++ b/tests/functional/index/create/test_09.py @@ -1,28 +1,16 @@ #coding:utf-8 -# -# id: functional.index.create.09 -# title: CREATE UNIQUE INDEX - Table with data -# decription: CREATE UNIQUE INDEX - Table with data -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# INSERT -# SHOW INDEX -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.index.create.create_index_09 + +""" +ID: index.create-09 +TITLE: CREATE UNIQUE INDEX - Table with data +DESCRIPTION: +FBTEST: functional.index.create.09 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE t( a INTEGER); +init_script = """CREATE TABLE t( a INTEGER); commit; INSERT INTO t VALUES(0); INSERT INTO t VALUES(1); @@ -31,18 +19,17 @@ INSERT INTO t VALUES(3); INSERT INTO t VALUES(4); COMMIT;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """CREATE UNIQUE INDEX test ON t(a); +test_script = """CREATE UNIQUE INDEX test ON t(a); SHOW INDEX test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEST UNIQUE INDEX ON T(A)""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEST UNIQUE INDEX ON T(A)""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/index/create/test_10.py b/tests/functional/index/create/test_10.py index cc9c1a40..9e56591c 100644 --- a/tests/functional/index/create/test_10.py +++ b/tests/functional/index/create/test_10.py @@ -1,47 +1,34 @@ #coding:utf-8 -# -# id: functional.index.create.10 -# title: CREATE INDEX - try create index with same name -# decription: CREATE INDEX - try create index with same name -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# INSERT -# SHOW INDEX -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.index.create.create_index_10 + +""" +ID: index.create-10 +TITLE: CREATE INDEX - try create index with same name +DESCRIPTION: +FBTEST: functional.index.create.10 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE t( a INTEGER); +init_script = """CREATE TABLE t( a INTEGER); CREATE INDEX test ON t(a); commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(sql_dialect=3, init=init_script) -test_script_1 = """CREATE INDEX test ON t(a);""" +test_script = """CREATE INDEX test ON t(a);""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stderr_1 = """Statement failed, SQLSTATE = 42S11 +expected_stderr = """Statement failed, SQLSTATE = 42S11 unsuccessful metadata update -CREATE INDEX TEST failed -Index TEST already exists """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/index/create/test_11.py b/tests/functional/index/create/test_11.py index a5cee7f6..ed65561c 100644 --- a/tests/functional/index/create/test_11.py +++ b/tests/functional/index/create/test_11.py @@ -1,28 +1,16 @@ #coding:utf-8 -# -# id: functional.index.create.11 -# title: CREATE UNIQUE INDEX - Non unique data in table -# decription: CREATE UNIQUE INDEX - Non unique data in table -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# INSERT -# SHOW INDEX -# tracker_id: -# min_versions: [] -# versions: 2.5.3 -# qmid: functional.index.create.create_index_11 + +""" +ID: index.create-11 +TITLE: CREATE UNIQUE INDEX - Non unique data in table +DESCRIPTION: +FBTEST: functional.index.create.11 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5.3 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE t( a INTEGER); +init_script = """CREATE TABLE t( a INTEGER); commit; INSERT INTO t VALUES(0); INSERT INTO t VALUES(0); @@ -33,19 +21,18 @@ INSERT INTO t VALUES(4); COMMIT; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """CREATE UNIQUE INDEX test ON t(a);""" +test_script = """CREATE UNIQUE INDEX test ON t(a);""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stderr_1 = """Statement failed, SQLSTATE = 23000 +expected_stderr = """Statement failed, SQLSTATE = 23000 attempt to store duplicate value (visible to active transactions) in unique index "TEST" -Problematic key value is ("A" = 0)""" -@pytest.mark.version('>=2.5.3') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/index/create/test_12.py b/tests/functional/index/create/test_12.py index b2ab5efa..18c996ea 100644 --- a/tests/functional/index/create/test_12.py +++ b/tests/functional/index/create/test_12.py @@ -1,30 +1,16 @@ #coding:utf-8 -# -# id: functional.index.create.12 -# title: CREATE UNIQUE INDEX - Null value in table -# decription: CREATE UNIQUE INDEX - Null value in table -# -# Note: Misinterpretable message (attempt to store duplicate value) -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# INSERT -# SHOW INDEX -# tracker_id: -# min_versions: [] -# versions: 1.5 -# qmid: functional.index.create.create_index_12 + +""" +ID: index.create-12 +TITLE: CREATE UNIQUE INDEX - Null value in table +DESCRIPTION: +FBTEST: functional.index.create.12 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.5 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE t( a INTEGER); +init_script = """CREATE TABLE t( a INTEGER); commit; INSERT INTO t VALUES(null); INSERT INTO t VALUES(0); @@ -34,14 +20,12 @@ INSERT INTO t VALUES(3); INSERT INTO t VALUES(4); COMMIT;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """CREATE UNIQUE INDEX test ON t(a);""" +test_script = """CREATE UNIQUE INDEX test ON t(a);""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - - -@pytest.mark.version('>=1.5') -def test_1(act_1: Action): - act_1.execute() +act = isql_act('db', test_script) +@pytest.mark.version('>=3') +def test_1(act: Action): + act.execute() diff --git a/tests/functional/intfunc/avg/test_01.py b/tests/functional/intfunc/avg/test_01.py index 2b5c43f5..f40cc559 100644 --- a/tests/functional/intfunc/avg/test_01.py +++ b/tests/functional/intfunc/avg/test_01.py @@ -1,45 +1,32 @@ #coding:utf-8 -# -# id: functional.intfunc.avg.01 -# title: AVG from single integer row -# decription: AVG from single integer row -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# INSERT -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.avg.avg_01 + +""" +ID: intfunc.avg-01 +TITLE: AVG from single integer row +DESCRIPTION: +FBTEST: functional.intfunc.avg.01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT unq UNIQUE, +init_script = """CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT unq UNIQUE, text VARCHAR(32)); INSERT INTO test VALUES(5,null);""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """SELECT AVG(id) FROM test;""" +act = isql_act('db', "SELECT AVG(id) FROM test;") -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ AVG +expected_stdout = """ +AVG ===================== -5""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +5 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/avg/test_02.py b/tests/functional/intfunc/avg/test_02.py index 3a0bdf50..45b87358 100644 --- a/tests/functional/intfunc/avg/test_02.py +++ b/tests/functional/intfunc/avg/test_02.py @@ -1,45 +1,32 @@ #coding:utf-8 -# -# id: functional.intfunc.avg.02 -# title: AVG - Test for INTEGER -# decription: If X is exactly halfway between two whole numbers, the result is always the even number. -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# INSERT -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.avg.avg_02 + +""" +ID: intfunc.avg-02 +TITLE: AVG - Test for INTEGER +DESCRIPTION: If X is exactly halfway between two whole numbers, the result is always the even number. +FBTEST: functional.intfunc.avg.02 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE test( id INTEGER NOT NULL); +init_script = """CREATE TABLE test( id INTEGER NOT NULL); INSERT INTO test VALUES(5); INSERT INTO test VALUES(6);""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """SELECT AVG(id) FROM test;""" +act = isql_act('db', "SELECT AVG(id) FROM test;") -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ AVG +expected_stdout = """ +AVG ===================== -5""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +5 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/avg/test_03.py b/tests/functional/intfunc/avg/test_03.py index 33505dc4..f4185592 100644 --- a/tests/functional/intfunc/avg/test_03.py +++ b/tests/functional/intfunc/avg/test_03.py @@ -1,46 +1,33 @@ #coding:utf-8 -# -# id: functional.intfunc.avg.03 -# title: AVG - Test for INTEGER -# decription: Round down (16/3 = 5.3) -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# INSERT -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.avg.avg_03 + +""" +ID: intfunc.avg-03 +TITLE: AVG - Test for INTEGER +DESCRIPTION: Round down (16/3 = 5.3) +FBTEST: functional.intfunc.avg.03 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE test( id INTEGER NOT NULL); +init_script = """CREATE TABLE test( id INTEGER NOT NULL); INSERT INTO test VALUES(5); INSERT INTO test VALUES(5); INSERT INTO test VALUES(6);""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """SELECT AVG(id) FROM test;""" +act = isql_act('db', "SELECT AVG(id) FROM test;") -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ AVG +expected_stdout = """ +AVG ===================== -5""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +5 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/avg/test_04.py b/tests/functional/intfunc/avg/test_04.py index 86560189..b79db8f0 100644 --- a/tests/functional/intfunc/avg/test_04.py +++ b/tests/functional/intfunc/avg/test_04.py @@ -1,47 +1,35 @@ #coding:utf-8 -# -# id: functional.intfunc.avg.04 -# title: AVG - Test for INTEGER -# decription: Round up (17/3 = 5,66666666666667) -# But it's from INTEGER columns, so 5 is expected. -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# INSERT -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.avg.avg_04 + +""" +ID: intfunc.avg-04 +TITLE: AVG - Test for INTEGER +DESCRIPTION: + Round up (17/3 = 5,66666666666667) + But it's from INTEGER columns, so 5 is expected. +FBTEST: functional.intfunc.avg.04 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE test( id INTEGER NOT NULL); +init_script = """CREATE TABLE test( id INTEGER NOT NULL); INSERT INTO test VALUES(5); INSERT INTO test VALUES(6); INSERT INTO test VALUES(6);""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """SELECT AVG(id) FROM test;""" +act = isql_act('db', "SELECT AVG(id) FROM test;") -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ AVG +expected_stdout = """ +AVG ===================== -5""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +5 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/avg/test_05.py b/tests/functional/intfunc/avg/test_05.py index 19352ca7..1186577a 100644 --- a/tests/functional/intfunc/avg/test_05.py +++ b/tests/functional/intfunc/avg/test_05.py @@ -1,44 +1,33 @@ #coding:utf-8 -# -# id: functional.intfunc.avg.05 -# title: AVG - DISTINCT -# decription: Dependencies: -# CREATE DATABASE -# CREATE TABLE -# INSERT -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.avg.avg_05 + +""" +ID: intfunc.avg-05 +TITLE: AVG - DISTINCT +DESCRIPTION: +FBTEST: functional.intfunc.avg.05 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE test( id INTEGER NOT NULL); +init_script = """CREATE TABLE test( id INTEGER NOT NULL); INSERT INTO test VALUES(5); INSERT INTO test VALUES(5); INSERT INTO test VALUES(7);""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """SELECT AVG(DISTINCT id) FROM test;""" +act = isql_act('db', "SELECT AVG(DISTINCT id) FROM test;") -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ AVG +expected_stdout = """ +AVG ===================== -6""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +6 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/avg/test_06.py b/tests/functional/intfunc/avg/test_06.py index 0be5514c..71743456 100644 --- a/tests/functional/intfunc/avg/test_06.py +++ b/tests/functional/intfunc/avg/test_06.py @@ -1,31 +1,23 @@ #coding:utf-8 -# -# id: functional.intfunc.avg.06 -# title: AVG - Integer OverFlow -# decription: -# Refactored 14.10.2019: adjusted expected_stdout/stderr -# 25.06.2020, 4.0.0.2076: changed types in SQLDA from numeric to int128 // after discuss with Alex about CORE-6342. -# 09.07.2020, 4.0.0.2091: -# NO more overflow since INT128 was introduced. AVG() is evaluated successfully. -# Removed error message from expected_stderr, added result into expected_stdout. -# -# 27.07.2021: changed sqltype in FB 4.x+ to 580 INT64: this is needed since fix #6874. -# Checked on 5.0.0.113, 4.0.1.2539. -# -# tracker_id: -# min_versions: [] -# versions: 3.0, 4.0 -# qmid: functional.intfunc.avg.avg_06 + +""" +ID: intfunc.avg-06 +TITLE: AVG - Integer OverFlow +DESCRIPTION: +NOTES: +[14.10.2019] Refactored: adjusted expected_stdout/stderr +[25.06.2020] 4.0.0.2076: changed types in SQLDA from numeric to int128 // after discuss with Alex about CORE-6342. +[09.07.2020], 4.0.0.2091: + NO more overflow since INT128 was introduced. AVG() is evaluated successfully. + Removed error message from expected_stderr, added result into expected_stdout. +[27.07.2021] changed sqltype in FB 4.x+ to 580 INT64: this is needed since fix #6874. +FBTEST: functional.intfunc.avg.06 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None - -substitutions_1 = [('[ \t]+', ' ')] - -init_script_1 = """ +init_script = """ recreate table test( id integer not null); insert into test values(2100000000); insert into test values(2100000000); @@ -34,17 +26,19 @@ init_script_1 = """ commit; create or alter view v_test as select avg(2100000000*id)as avg_result from test; commit; - """ +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ +test_script = """ set list on; set sqlda_display on; select * from v_test; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('^((?!sqltype|AVG_RESULT).)*$', ''), ('[ \t]+', ' ')]) + +# version: 3.0 expected_stdout_1 = """ INPUT message field count: 0 @@ -53,57 +47,32 @@ expected_stdout_1 = """ : name: AVG_RESULT alias: AVG_RESULT : table: V_TEST owner: SYSDBA """ + expected_stderr_1 = """ Statement failed, SQLSTATE = 22003 Integer overflow. The result of an integer operation caused the most significant bit of the result to carry. """ @pytest.mark.version('>=3.0,<4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout +def test_1(act: Action): + act.expected_stdout = expected_stdout_1 + act.expected_stderr = expected_stderr_1 + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) # version: 4.0 -# resources: None - -substitutions_2 = [('^((?!sqltype|AVG_RESULT).)*$', ''), ('[ \t]+', ' ')] - -init_script_2 = """ - recreate table test( id integer not null); - insert into test values(2100000000); - insert into test values(2100000000); - insert into test values(2100000000); - insert into test values(2100000000); - commit; - create or alter view v_test as select avg(2100000000*id)as avg_result from test; - commit; - """ - -db_2 = db_factory(sql_dialect=3, init=init_script_2) - -test_script_2 = """ - set list on; - set sqlda_display on; - select * from v_test; -""" - -act_2 = isql_act('db_2', test_script_2, substitutions=substitutions_2) expected_stdout_2 = """ 01: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8 : name: AVG_RESULT alias: AVG_RESULT : table: V_TEST owner: SYSDBA - AVG_RESULT 4410000000000000000 + AVG_RESULT 4410000000000000000 """ @pytest.mark.version('>=4.0') -def test_2(act_2: Action): - act_2.expected_stdout = expected_stdout_2 - act_2.execute() - assert act_2.clean_stdout == act_2.clean_expected_stdout - +def test_2(act: Action): + act.expected_stdout = expected_stdout_2 + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/avg/test_07.py b/tests/functional/intfunc/avg/test_07.py index 86f615f6..e39bbcf2 100644 --- a/tests/functional/intfunc/avg/test_07.py +++ b/tests/functional/intfunc/avg/test_07.py @@ -1,45 +1,34 @@ #coding:utf-8 -# -# id: functional.intfunc.avg.07 -# title: AVG - Integer with NULL -# decription: Dependencies: -# CREATE DATABASE -# CREATE TABLE -# INSERT -# SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.avg.avg_07 + +""" +ID: intfunc.avg-07 +TITLE: AVG - Integer with NULL +DESCRIPTION: +FBTEST: functional.intfunc.avg.07 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE test( id INTEGER); +init_script = """CREATE TABLE test( id INTEGER); INSERT INTO test VALUES(12); INSERT INTO test VALUES(13); INSERT INTO test VALUES(14); INSERT INTO test VALUES(NULL);""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """SELECT AVG(id) FROM test;""" +act = isql_act('db', "SELECT AVG(id) FROM test;") -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ AVG +expected_stdout = """ +AVG ===================== -13""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +13 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/avg/test_08.py b/tests/functional/intfunc/avg/test_08.py index c238681f..29dd9a38 100644 --- a/tests/functional/intfunc/avg/test_08.py +++ b/tests/functional/intfunc/avg/test_08.py @@ -1,42 +1,31 @@ #coding:utf-8 -# -# id: functional.intfunc.avg.08 -# title: AVG - NULL test -# decription: Dependencies: -# CREATE DATABASE -# CREATE TABLE -# INSERT -# SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.avg.avg_08 + +""" +ID: intfunc.avg-08 +TITLE: AVG - NULL test +DESCRIPTION: +FBTEST: functional.intfunc.avg.08 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE test( id INTEGER); +init_script = """CREATE TABLE test( id INTEGER); INSERT INTO test VALUES(NULL);""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """SELECT AVG(id) FROM test;""" +act = isql_act('db', "SELECT AVG(id) FROM test;") -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ AVG +expected_stdout = """ +AVG ===================== -""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout + +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/avg/test_09.py b/tests/functional/intfunc/avg/test_09.py index 33ac430d..c9abc8ae 100644 --- a/tests/functional/intfunc/avg/test_09.py +++ b/tests/functional/intfunc/avg/test_09.py @@ -1,44 +1,31 @@ #coding:utf-8 -# -# id: functional.intfunc.avg.09 -# title: AVG - DOUBLE PRECISION -# decription: AVG from single DOUBLE PRECISION row -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# INSERT -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.avg.avg_09 + +""" +ID: intfunc.avg-09 +TITLE: AVG - DOUBLE PRECISION +DESCRIPTION: AVG from single DOUBLE PRECISION row +FBTEST: functional.intfunc.avg.09 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE test( id DOUBLE PRECISION NOT NULL); +init_script = """CREATE TABLE test( id DOUBLE PRECISION NOT NULL); INSERT INTO test VALUES(5.123456789);""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """SELECT AVG(id) FROM test;""" +act = isql_act('db', "SELECT AVG(id) FROM test;") -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ AVG +expected_stdout = """ +AVG ======================= -5.123456789000000""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +5.123456789000000 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/binary/test_and_01.py b/tests/functional/intfunc/binary/test_and_01.py index 1cd070f7..220fe7e3 100644 --- a/tests/functional/intfunc/binary/test_and_01.py +++ b/tests/functional/intfunc/binary/test_and_01.py @@ -1,34 +1,25 @@ #coding:utf-8 -# -# id: functional.intfunc.binary.and_01 -# title: New Built-in Functions, Firebird 2.1 : BIN_AND( [, ...] ) -# decription: test of BIN_AND -# -# Returns the result of a binary AND operation performed on all arguments. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.binary.bin_and_01 + +""" +ID: intfunc.binary.and +TITLE: New Built-in Functions, Firebird 2.1 : BIN_AND( [, ...] ) +DESCRIPTION: Returns the result of a binary AND operation performed on all arguments. +FBTEST: functional.intfunc.binary.and_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +test_script = """select BIN_AND( 1, 1) from rdb$database; +select BIN_AND( 1, 0) from rdb$database; +""" -init_script_1 = """""" +act = isql_act('db', test_script) -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select BIN_AND( 1, 1) from rdb$database; - -select BIN_AND( 1, 0) from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ BIN_AND +expected_stdout = """ + BIN_AND ============ 1 @@ -39,9 +30,8 @@ expected_stdout_1 = """ BIN_AND """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/binary/test_or_01.py b/tests/functional/intfunc/binary/test_or_01.py index 0b69057a..d408df46 100644 --- a/tests/functional/intfunc/binary/test_or_01.py +++ b/tests/functional/intfunc/binary/test_or_01.py @@ -1,36 +1,25 @@ #coding:utf-8 -# -# id: functional.intfunc.binary.or_01 -# title: New Built-in Functions, Firebird 2.1 : BIN_OR( [, ...] ) -# decription: test of BIN_OR -# -# -# Returns the result of a binary OR operation performed on all arguments. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.binary.bin_or_01 + +""" +ID: intfunc.binary.or +TITLE: New Built-in Functions, Firebird 2.1 : BIN_OR( [, ...] ) +DESCRIPTION: Returns the result of a binary OR operation performed on all arguments. +FBTEST: functional.intfunc.binary.or_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select BIN_OR( 1, 1) from rdb$database; +db = db_factory() +test_script = """select BIN_OR( 1, 1) from rdb$database; select BIN_OR( 1, 0) from rdb$database; select BIN_OR( 0, 0) from rdb$database;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ BIN_OR +expected_stdout = """ + BIN_OR ============ 1 @@ -44,12 +33,10 @@ expected_stdout_1 = """ BIN_OR ============ 0 - """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/binary/test_shl_01.py b/tests/functional/intfunc/binary/test_shl_01.py index b930c1b5..7baa077f 100644 --- a/tests/functional/intfunc/binary/test_shl_01.py +++ b/tests/functional/intfunc/binary/test_shl_01.py @@ -1,41 +1,27 @@ #coding:utf-8 -# -# id: functional.intfunc.binary.shl_01 -# title: New Built-in Functions, Firebird 2.1 : BIN_SHL( , ) -# decription: test of BIN_SHL -# -# Returns the result of a binary shift left operation performed on the arguments (first << second). -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.binary.bin_shl_01 - -import pytest -from firebird.qa import db_factory, isql_act, Action - -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select BIN_SHL( 8,1) from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ BIN_SHL -===================== - 16 - +""" +ID: intfunc.binary.shl +TITLE: New Built-in Functions, Firebird 2.1 : BIN_SHL( , ) +DESCRIPTION: Returns the result of a binary shift left operation performed on the arguments (first << second). +FBTEST: functional.intfunc.binary.shl_01 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +import pytest +from firebird.qa import * +db = db_factory() + +act = isql_act('db', "select BIN_SHL(8, 1) from rdb$database;") + +expected_stdout = """ + BIN_SHL +===================== + 16 +""" + +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/binary/test_shr_01.py b/tests/functional/intfunc/binary/test_shr_01.py index fd168464..d65ba0aa 100644 --- a/tests/functional/intfunc/binary/test_shr_01.py +++ b/tests/functional/intfunc/binary/test_shr_01.py @@ -1,42 +1,27 @@ #coding:utf-8 -# -# id: functional.intfunc.binary.shr_01 -# title: New Built-in Functions, Firebird 2.1 : BIN_SHR( , ) -# decription: test of BIN_SHR -# -# Returns the result of a binary shift left operation performed on the arguments (first << second). -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.binary.bin_shr_01 - -import pytest -from firebird.qa import db_factory, isql_act, Action - -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select BIN_SHR( 8,1) from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ BIN_SHR -===================== - 4 - - +""" +ID: intfunc.binary.shr +TITLE: New Built-in Functions, Firebird 2.1 : BIN_SHR( , ) +DESCRIPTION: Returns the result of a binary shift left operation performed on the arguments (first << second). +FBTEST: functional.intfunc.binary.shr_01 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +import pytest +from firebird.qa import * +db = db_factory() + +act = isql_act('db', "select BIN_SHR(8, 1) from rdb$database;") + +expected_stdout = """ + BIN_SHR +===================== + 4 +""" + +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/binary/test_xor_01.py b/tests/functional/intfunc/binary/test_xor_01.py index f7bfdab1..15b25eed 100644 --- a/tests/functional/intfunc/binary/test_xor_01.py +++ b/tests/functional/intfunc/binary/test_xor_01.py @@ -1,34 +1,26 @@ #coding:utf-8 -# -# id: functional.intfunc.binary.xor_01 -# title: New Built-in Functions, Firebird 2.1 : BIN_XOR( [, ...] ) -# decription: test of BIN_XOR -# -# Returns the result of a binary XOR operation performed on all arguments. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.binary.bin_xor_01 + +""" +ID: intfunc.binary.xor +TITLE: New Built-in Functions, Firebird 2.1 : BIN_XOR( [, ...] ) +DESCRIPTION: Returns the result of a binary XOR operation performed on all arguments. +FBTEST: functional.intfunc.binary.xor_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select BIN_XOR( 0,1) from rdb$database; +test_script = """select BIN_XOR( 0,1) from rdb$database; select BIN_XOR( 0,0) from rdb$database; -select BIN_XOR( 1,1) from rdb$database;""" +select BIN_XOR( 1,1) from rdb$database; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ BIN_XOR +expected_stdout = """ + BIN_XOR ============ 1 @@ -41,14 +33,10 @@ expected_stdout_1 = """ BIN_XOR BIN_XOR ============ 0 - - - """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/cast/test_01.py b/tests/functional/intfunc/cast/test_01.py index 8831aa5c..d3b772ef 100644 --- a/tests/functional/intfunc/cast/test_01.py +++ b/tests/functional/intfunc/cast/test_01.py @@ -1,41 +1,27 @@ #coding:utf-8 -# -# id: functional.intfunc.cast.01 -# title: CAST Numeric -> CHAR -# decription: CAST Numeric -> CHAR -# -# Dependencies: -# CREATE DATABASE -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.cast.cast_01 + +""" +ID: intfunc.cast-01 +TITLE: CAST Numeric -> CHAR +DESCRIPTION: +FBTEST: functional.intfunc.cast.01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "SELECT CAST(1.25001 AS CHAR(21)) FROM rdb$Database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SELECT CAST(1.25001 AS CHAR(21)) FROM rdb$Database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """CAST +expected_stdout = """CAST ===================== -1.25001""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +1.25001 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/cast/test_02.py b/tests/functional/intfunc/cast/test_02.py index 6752e11f..b1957f7a 100644 --- a/tests/functional/intfunc/cast/test_02.py +++ b/tests/functional/intfunc/cast/test_02.py @@ -1,41 +1,27 @@ #coding:utf-8 -# -# id: functional.intfunc.cast.02 -# title: CAST Numeric -> VARCHAR -# decription: CAST Numeric -> VARCHAR -# -# Dependencies: -# CREATE DATABASE -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.cast.cast_02 + +""" +ID: intfunc.cast-02 +TITLE: CAST Numeric -> VARCHAR +DESCRIPTION: +FBTEST: functional.intfunc.cast.02 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "SELECT CAST(1.25001 AS VARCHAR(21)) FROM rdb$Database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SELECT CAST(1.25001 AS VARCHAR(21)) FROM rdb$Database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """CAST +expected_stdout = """CAST ===================== -1.25001""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +1.25001 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/cast/test_03.py b/tests/functional/intfunc/cast/test_03.py index 3bda5147..b43a8d34 100644 --- a/tests/functional/intfunc/cast/test_03.py +++ b/tests/functional/intfunc/cast/test_03.py @@ -1,47 +1,32 @@ #coding:utf-8 -# -# id: functional.intfunc.cast.03 -# title: CAST Numeric -> DATE -# decription: Convert from number to date is not (yet) supported -# -# CAST Numeric -> DATE -# -# Dependencies: -# CREATE DATABASE -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 2.5 -# qmid: functional.intfunc.cast.cast_03 + +""" +ID: intfunc.cast-03 +TITLE: CAST Numeric -> DATE +DESCRIPTION: Convert from number to date is not (yet) supported +FBTEST: functional.intfunc.cast.03 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "SELECT CAST(CAST(1.25001 AS INT) AS DATE) FROM rdb$Database;") -init_script_1 = """""" +expected_stdout = """CAST +=========== +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +expected_stderr = """Statement failed, SQLSTATE = 22018 -test_script_1 = """SELECT CAST(CAST(1.25001 AS INT) AS DATE) FROM rdb$Database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """CAST -===========""" -expected_stderr_1 = '''Statement failed, SQLSTATE = 22018 - -conversion error from string "1"''' - -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout +conversion error from string "1" +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/intfunc/cast/test_04.py b/tests/functional/intfunc/cast/test_04.py index c9d1d641..19e97dc5 100644 --- a/tests/functional/intfunc/cast/test_04.py +++ b/tests/functional/intfunc/cast/test_04.py @@ -1,42 +1,28 @@ #coding:utf-8 -# -# id: functional.intfunc.cast.04 -# title: CAST Numeric -> Numeric (Round down) -# decription: CAST Numeric -> Numeric -# Round down -# -# Dependencies: -# CREATE DATABASE -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.cast.cast_04 + +""" +ID: intfunc.cast-04 +TITLE: CAST Numeric -> Numeric (Round down) +DESCRIPTION: +FBTEST: functional.intfunc.cast.04 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "SELECT CAST(1.24999 AS NUMERIC(2,1)) FROM rdb$Database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SELECT CAST(1.24999 AS NUMERIC(2,1)) FROM rdb$Database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ CAST +expected_stdout = """ +CAST ======= -1.2""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +1.2 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/cast/test_05.py b/tests/functional/intfunc/cast/test_05.py index 7af50899..a55faf9a 100644 --- a/tests/functional/intfunc/cast/test_05.py +++ b/tests/functional/intfunc/cast/test_05.py @@ -1,42 +1,28 @@ #coding:utf-8 -# -# id: functional.intfunc.cast.05 -# title: CAST Numeric -> Numeric (Round up) -# decription: CAST Numeric -> Numeric -# Round up -# -# Dependencies: -# CREATE DATABASE -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.cast.cast_05 + +""" +ID: intfunc.cast-05 +TITLE: CAST Numeric -> Numeric (Round up) +DESCRIPTION: +FBTEST: functional.intfunc.cast.05 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "SELECT CAST(1.25001 AS NUMERIC(2,1)) FROM rdb$Database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SELECT CAST(1.25001 AS NUMERIC(2,1)) FROM rdb$Database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ CAST +expected_stdout = """ +CAST ======= -1.3""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +1.3 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/cast/test_06.py b/tests/functional/intfunc/cast/test_06.py index af0b8343..d410961b 100644 --- a/tests/functional/intfunc/cast/test_06.py +++ b/tests/functional/intfunc/cast/test_06.py @@ -1,42 +1,28 @@ #coding:utf-8 -# -# id: functional.intfunc.cast.06 -# title: CAST CHAR -> INTEGER -# decription: CAST CHAR -> INTEGER -# Round down -# -# Dependencies: -# CREATE DATABASE -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.cast.cast_06 + +""" +ID: intfunc.cast-06 +TITLE: CAST CHAR -> INTEGER (Round down) +DESCRIPTION: +FBTEST: functional.intfunc.cast.06 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "SELECT CAST('1.25001' AS INTEGER) FROM rdb$Database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SELECT CAST('1.25001' AS INTEGER) FROM rdb$Database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ CAST +expected_stdout = """ +CAST ============ -1""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +1 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/cast/test_07.py b/tests/functional/intfunc/cast/test_07.py index b782e396..07e45ed1 100644 --- a/tests/functional/intfunc/cast/test_07.py +++ b/tests/functional/intfunc/cast/test_07.py @@ -1,41 +1,27 @@ #coding:utf-8 -# -# id: functional.intfunc.cast.07 -# title: CAST CHAR -> INTEGER -# decription: CAST CHAR -> INTEGER -# -# Dependencies: -# CREATE DATABASE -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.cast.cast_07 + +""" +ID: intfunc.cast-07 +TITLE: CAST CHAR -> INTEGER +DESCRIPTION: +FBTEST: functional.intfunc.cast.07 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "SELECT CAST('1.5001' AS INTEGER) FROM rdb$Database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SELECT CAST('1.5001' AS INTEGER) FROM rdb$Database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """CAST +expected_stdout = """ +CAST ============ 2 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/cast/test_08.py b/tests/functional/intfunc/cast/test_08.py index b06cd0db..36799c75 100644 --- a/tests/functional/intfunc/cast/test_08.py +++ b/tests/functional/intfunc/cast/test_08.py @@ -1,43 +1,30 @@ #coding:utf-8 -# -# id: functional.intfunc.cast.08 -# title: CAST CHAR -> DATE -# decription: CAST CHAR -> DATE -# Be careful about date format on FB server ! -# Universal format is not defined or not documented. -# -# Dependencies: -# CREATE DATABASE -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.cast.cast_08 + +""" +ID: intfunc.cast-08 +TITLE: CAST CHAR -> DATE +DESCRIPTION: + Be careful about date format on FB server ! + Universal format is not defined or not documented. +FBTEST: functional.intfunc.cast.08 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "SELECT CAST('28.1.2001' AS DATE) FROM rdb$Database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SELECT CAST('28.1.2001' AS DATE) FROM rdb$Database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ CAST +expected_stdout = """ +CAST =========== -2001-01-28""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +2001-01-28 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/cast/test_09.py b/tests/functional/intfunc/cast/test_09.py index 8d227828..4964a9cb 100644 --- a/tests/functional/intfunc/cast/test_09.py +++ b/tests/functional/intfunc/cast/test_09.py @@ -1,47 +1,35 @@ #coding:utf-8 -# -# id: functional.intfunc.cast.09 -# title: CAST CHAR -> DATE -# decription: CAST CHAR -> DATE -# Be careful about date format on FB server ! -# Universal format is not defined or not documented. -# -# Dependencies: -# CREATE DATABASE -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 2.5 -# qmid: functional.intfunc.cast.cast_09 + +""" +ID: intfunc.cast-09 +TITLE: CAST CHAR -> DATE +DESCRIPTION: + Be careful about date format on FB server ! + Universal format is not defined or not documented. +FBTEST: functional.intfunc.cast.09 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "SELECT CAST('29.2.2002' AS DATE) FROM rdb$Database;") -init_script_1 = """""" +expected_stdout = """ +CAST +=========== +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +expected_stderr = """Statement failed, SQLSTATE = 22018 -test_script_1 = """SELECT CAST('29.2.2002' AS DATE) FROM rdb$Database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ CAST -===========""" -expected_stderr_1 = '''Statement failed, SQLSTATE = 22018 - -conversion error from string "29.2.2002"''' - -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout +conversion error from string "29.2.2002" +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/intfunc/cast/test_10.py b/tests/functional/intfunc/cast/test_10.py index 2cef484b..5bd7ef84 100644 --- a/tests/functional/intfunc/cast/test_10.py +++ b/tests/functional/intfunc/cast/test_10.py @@ -1,43 +1,30 @@ #coding:utf-8 -# -# id: functional.intfunc.cast.10 -# title: CAST CHAR -> TIME -# decription: CAST CHAR -> TIME -# Be careful about time format on FB server ! -# Universal format is not defined or not documented. -# -# Dependencies: -# CREATE DATABASE -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.cast.cast_10 + +""" +ID: intfunc.cast-10 +TITLE: CAST CHAR -> TIME +DESCRIPTION: + Be careful about time format on FB server ! + Universal format is not defined or not documented. +FBTEST: functional.intfunc.cast.10 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "SELECT CAST('14:34:59.1234' AS TIME) FROM rdb$Database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SELECT CAST('14:34:59.1234' AS TIME) FROM rdb$Database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ CAST +expected_stdout = """ +CAST ============= -14:34:59.1234""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +14:34:59.1234 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/cast/test_11.py b/tests/functional/intfunc/cast/test_11.py index b5b0e371..4bca4bd4 100644 --- a/tests/functional/intfunc/cast/test_11.py +++ b/tests/functional/intfunc/cast/test_11.py @@ -1,42 +1,29 @@ #coding:utf-8 -# -# id: functional.intfunc.cast.11 -# title: CAST CHAR -> TIME -# decription: CAST CHAR -> TIME -# Be careful about time format on FB server ! -# Universal format is not defined or not documented. -# -# Dependencies: -# CREATE DATABASE -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.cast.cast_11 + +""" +ID: intfunc.cast-11 +TITLE: CAST CHAR -> TIME +DESCRIPTION: + Be careful about time format on FB server ! + Universal format is not defined or not documented. +FBTEST: functional.intfunc.cast.11 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "SELECT CAST('14:34:59.1234' AS TIME) FROM rdb$Database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SELECT CAST('14:34:59.1234' AS TIME) FROM rdb$Database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ CAST +expected_stdout = """ +CAST ============= -14:34:59.1234""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +14:34:59.1234 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/cast/test_12.py b/tests/functional/intfunc/cast/test_12.py index 49f0bfaa..acc2d6d9 100644 --- a/tests/functional/intfunc/cast/test_12.py +++ b/tests/functional/intfunc/cast/test_12.py @@ -1,47 +1,35 @@ #coding:utf-8 -# -# id: functional.intfunc.cast.12 -# title: CAST CHAR -> TIME -# decription: CAST CHAR -> TIME -# Be careful about date format on FB server ! -# Universal format is not defined or not documented. -# -# Dependencies: -# CREATE DATABASE -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 2.5 -# qmid: functional.intfunc.cast.cast_12 + +""" +ID: intfunc.cast-12 +TITLE: CAST CHAR -> TIME +DESCRIPTION: + Be careful about time format on FB server ! + Universal format is not defined or not documented. +FBTEST: functional.intfunc.cast.12 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "SELECT CAST('9:11:60' AS TIME) FROM rdb$Database;") -init_script_1 = """""" +expected_stdout = """ +CAST +============= +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +expected_stderr = """Statement failed, SQLSTATE = 22018 -test_script_1 = """SELECT CAST('9:11:60' AS TIME) FROM rdb$Database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ CAST -=============""" -expected_stderr_1 = '''Statement failed, SQLSTATE = 22018 - -conversion error from string "9:11:60"''' - -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout +conversion error from string "9:11:60" +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/intfunc/cast/test_13.py b/tests/functional/intfunc/cast/test_13.py index d493501e..26dbf194 100644 --- a/tests/functional/intfunc/cast/test_13.py +++ b/tests/functional/intfunc/cast/test_13.py @@ -1,43 +1,30 @@ #coding:utf-8 -# -# id: functional.intfunc.cast.13 -# title: CAST CHAR -> TIMESTAM -# decription: CAST CHAR -> TIMESTAMP -# Be careful about time format on FB server ! -# Universal format is not defined or not documented. -# -# Dependencies: -# CREATE DATABASE -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.cast.cast_13 + +""" +ID: intfunc.cast-13 +TITLE: CAST CHAR -> TIMESTAM +DESCRIPTION: + Be careful about time format on FB server ! + Universal format is not defined or not documented. +FBTEST: functional.intfunc.cast.13 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "SELECT CAST('10.2.1489 14:34:59.1234' AS TIMESTAMP) FROM rdb$Database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SELECT CAST('10.2.1489 14:34:59.1234' AS TIMESTAMP) FROM rdb$Database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ CAST +expected_stdout = """ +CAST ========================= -1489-02-10 14:34:59.1234""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +1489-02-10 14:34:59.1234 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/cast/test_14.py b/tests/functional/intfunc/cast/test_14.py index 2677a9fa..f5bbd042 100644 --- a/tests/functional/intfunc/cast/test_14.py +++ b/tests/functional/intfunc/cast/test_14.py @@ -1,43 +1,30 @@ #coding:utf-8 -# -# id: functional.intfunc.cast.14 -# title: CAST CHAR -> TIMESTAMP -# decription: CAST CHAR -> TIMESTAMP -# Be careful about time format on FB server ! -# Universal format is not defined or not documented. -# -# Dependencies: -# CREATE DATABASE -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.cast.cast_14 + +""" +ID: intfunc.cast-14 +TITLE: CAST CHAR -> TIMESTAMP +DESCRIPTION: + Be careful about time format on FB server ! + Universal format is not defined or not documented. +FBTEST: functional.intfunc.cast.14 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "SELECT CAST('10.2.1489 14:34:59.1234' AS TIMESTAMP) FROM rdb$Database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SELECT CAST('10.2.1489 14:34:59.1234' AS TIMESTAMP) FROM rdb$Database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ CAST +expected_stdout = """ +CAST ========================= -1489-02-10 14:34:59.1234""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +1489-02-10 14:34:59.1234 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/cast/test_15.py b/tests/functional/intfunc/cast/test_15.py index 085e5a7d..d2ea836c 100644 --- a/tests/functional/intfunc/cast/test_15.py +++ b/tests/functional/intfunc/cast/test_15.py @@ -1,44 +1,30 @@ #coding:utf-8 -# -# id: functional.intfunc.cast.15 -# title: CAST DATE -> CHAR -# decription: CAST DATE -> CHAR -# Be careful about time format on FB server ! -# Universal format is not defined or not documented. -# -# Dependencies: -# CREATE DATABASE -# CAST CHAR -> DATE -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.cast.cast_15 + +""" +ID: intfunc.cast-15 +TITLE: CAST DATE -> CHAR +DESCRIPTION: + Be careful about date format on FB server ! + Universal format is not defined or not documented. +FBTEST: functional.intfunc.cast.15 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "SELECT CAST(CAST('10.2.1973' AS DATE) AS CHAR(32)) FROM rdb$Database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SELECT CAST(CAST('10.2.1973' AS DATE) AS CHAR(32)) FROM rdb$Database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """CAST +expected_stdout = """ +CAST ================================ -1973-02-10""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +1973-02-10 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/cast/test_16.py b/tests/functional/intfunc/cast/test_16.py index 986d9111..d40f874b 100644 --- a/tests/functional/intfunc/cast/test_16.py +++ b/tests/functional/intfunc/cast/test_16.py @@ -1,44 +1,30 @@ #coding:utf-8 -# -# id: functional.intfunc.cast.16 -# title: CAST DATE -> VARCHAR -# decription: CAST DATE -> VARCHAR -# Be careful about time format on FB server ! -# Universal format is not defined or not documented. -# -# Dependencies: -# CREATE DATABASE -# CAST CHAR -> DATE -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.cast.cast_16 + +""" +ID: intfunc.cast-16 +TITLE: CAST DATE -> VARCHAR +DESCRIPTION: + Be careful about date format on FB server ! + Universal format is not defined or not documented. +FBTEST: functional.intfunc.cast.16 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "SELECT CAST(CAST('10.2.1973' AS DATE) AS VARCHAR(40)) FROM rdb$Database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SELECT CAST(CAST('10.2.1973' AS DATE) AS VARCHAR(40)) FROM rdb$Database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """CAST +expected_stdout = """ +CAST ======================================== -1973-02-10""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +1973-02-10 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/cast/test_17.py b/tests/functional/intfunc/cast/test_17.py index f6b61387..43149e4c 100644 --- a/tests/functional/intfunc/cast/test_17.py +++ b/tests/functional/intfunc/cast/test_17.py @@ -1,44 +1,30 @@ #coding:utf-8 -# -# id: functional.intfunc.cast.17 -# title: CAST DATE -> TIMESTAMP -# decription: CAST DATE -> TIMESTAMP -# Be careful about date/time format on FB server ! -# Universal format is not defined or not documented. -# -# Dependencies: -# CREATE DATABASE -# CAST CHAR -> DATE -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.cast.cast_17 + +""" +ID: intfunc.cast-17 +TITLE: CAST DATE -> TIMESTAMP +DESCRIPTION: + Be careful about date/time format on FB server ! + Universal format is not defined or not documented. +FBTEST: functional.intfunc.cast.17 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "SELECT CAST(CAST('10.2.1973' AS DATE) AS TIMESTAMP) FROM rdb$Database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SELECT CAST(CAST('10.2.1973' AS DATE) AS TIMESTAMP) FROM rdb$Database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ CAST +expected_stdout = """ +CAST ========================= -1973-02-10 00:00:00.0000""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +1973-02-10 00:00:00.0000 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/cast/test_18.py b/tests/functional/intfunc/cast/test_18.py index 27af0120..3d891ff7 100644 --- a/tests/functional/intfunc/cast/test_18.py +++ b/tests/functional/intfunc/cast/test_18.py @@ -1,44 +1,30 @@ #coding:utf-8 -# -# id: functional.intfunc.cast.18 -# title: CAST TIME -> CHAR -# decription: CAST TIME -> CHAR -# Be careful about date/time format on FB server ! -# Universal format is not defined or not documented. -# -# Dependencies: -# CREATE DATABASE -# CAST CHAR -> TIME -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.cast.cast_18 + +""" +ID: intfunc.cast-18 +TITLE: CAST TIME -> CHAR +DESCRIPTION: + Be careful about time format on FB server ! + Universal format is not defined or not documented. +FBTEST: functional.intfunc.cast.18 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "SELECT CAST(CAST('13:28:45' AS TIME) AS CHAR(32)) FROM rdb$Database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SELECT CAST(CAST('13:28:45' AS TIME) AS CHAR(32)) FROM rdb$Database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """CAST +expected_stdout = """ +CAST ================================ -13:28:45.0000""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +13:28:45.0000 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/cast/test_19.py b/tests/functional/intfunc/cast/test_19.py index 6cf22047..e273e671 100644 --- a/tests/functional/intfunc/cast/test_19.py +++ b/tests/functional/intfunc/cast/test_19.py @@ -1,44 +1,30 @@ #coding:utf-8 -# -# id: functional.intfunc.cast.19 -# title: CAST TIME -> VARCHAR -# decription: CAST TIME -> VARCHAR -# Be careful about date/time format on FB server ! -# Universal format is not defined or not documented. -# -# Dependencies: -# CREATE DATABASE -# CAST CHAR -> TIME -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.cast.cast_19 + +""" +ID: intfunc.cast-19 +TITLE: CAST TIME -> VARCHAR +DESCRIPTION: + Be careful about time format on FB server ! + Universal format is not defined or not documented. +FBTEST: functional.intfunc.cast.19 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "SELECT CAST(CAST('13:28:45' AS TIME) AS VARCHAR(32)) FROM rdb$Database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SELECT CAST(CAST('13:28:45' AS TIME) AS VARCHAR(32)) FROM rdb$Database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """CAST +expected_stdout = """ +CAST ================================ -13:28:45.0000""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +13:28:45.0000 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/cast/test_20.py b/tests/functional/intfunc/cast/test_20.py index af940681..0f775177 100644 --- a/tests/functional/intfunc/cast/test_20.py +++ b/tests/functional/intfunc/cast/test_20.py @@ -1,44 +1,30 @@ #coding:utf-8 -# -# id: functional.intfunc.cast.20 -# title: CAST TIMESTAMP -> CHAR -# decription: CAST TIMESTAMP -> CHAR -# Be careful about date/time format on FB server ! -# Universal format is not defined or not documented. -# -# Dependencies: -# CREATE DATABASE -# CAST CHAR -> TIMESTAMP -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.cast.cast_20 + +""" +ID: intfunc.cast-20 +TITLE: CAST TIMESTAMP -> CHAR +DESCRIPTION: + Be careful about date/time format on FB server ! + Universal format is not defined or not documented. +FBTEST: functional.intfunc.cast.20 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "SELECT CAST(CAST('1.4.2002 0:59:59.1' AS TIMESTAMP) AS CHAR(50)) FROM rdb$Database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SELECT CAST(CAST('1.4.2002 0:59:59.1' AS TIMESTAMP) AS CHAR(50)) FROM rdb$Database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """CAST +expected_stdout = """ +CAST ================================================== -2002-04-01 00:59:59.1000""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +2002-04-01 00:59:59.1000 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/cast/test_21.py b/tests/functional/intfunc/cast/test_21.py index e9ffa4be..8e0fdd37 100644 --- a/tests/functional/intfunc/cast/test_21.py +++ b/tests/functional/intfunc/cast/test_21.py @@ -1,44 +1,30 @@ #coding:utf-8 -# -# id: functional.intfunc.cast.21 -# title: CAST TIMESTAMP -> VARCHAR -# decription: CAST TIMESTAMP -> VARCHAR -# Be careful about date/time format on FB server ! -# Universal format is not defined or not documented. -# -# Dependencies: -# CREATE DATABASE -# CAST CHAR -> TIMESTAMP -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.cast.cast_21 + +""" +ID: intfunc.cast-21 +TITLE: CAST TIMESTAMP -> VARCHAR +DESCRIPTION: + Be careful about date/time format on FB server ! + Universal format is not defined or not documented. +FBTEST: functional.intfunc.cast.21 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "SELECT CAST(CAST('1.4.2002 0:59:59.1' AS TIMESTAMP) AS VARCHAR(50)) FROM rdb$Database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SELECT CAST(CAST('1.4.2002 0:59:59.1' AS TIMESTAMP) AS VARCHAR(50)) FROM rdb$Database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """CAST +expected_stdout = """ +CAST ================================================== -2002-04-01 00:59:59.1000""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +2002-04-01 00:59:59.1000 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/cast/test_22.py b/tests/functional/intfunc/cast/test_22.py index 34071dc1..7f006245 100644 --- a/tests/functional/intfunc/cast/test_22.py +++ b/tests/functional/intfunc/cast/test_22.py @@ -1,44 +1,30 @@ #coding:utf-8 -# -# id: functional.intfunc.cast.22 -# title: CAST TIMESTAMP -> DATE -# decription: CAST TIMESTAMP -> DATE -# Be careful about date/time format on FB server ! -# Universal format is not defined or not documented. -# -# Dependencies: -# CREATE DATABASE -# CAST CHAR -> TIMESTAMP -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.cast.cast_22 + +""" +ID: intfunc.cast-22 +TITLE: CAST TIMESTAMP -> DATE +DESCRIPTION: + Be careful about date/time format on FB server ! + Universal format is not defined or not documented. +FBTEST: functional.intfunc.cast.22 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "SELECT CAST(CAST('1.4.2002 0:59:59.1' AS TIMESTAMP) AS DATE) FROM rdb$Database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SELECT CAST(CAST('1.4.2002 0:59:59.1' AS TIMESTAMP) AS DATE) FROM rdb$Database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ CAST +expected_stdout = """ +CAST =========== -2002-04-01""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +2002-04-01 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/cast/test_23.py b/tests/functional/intfunc/cast/test_23.py index 79ad8f43..67cc749b 100644 --- a/tests/functional/intfunc/cast/test_23.py +++ b/tests/functional/intfunc/cast/test_23.py @@ -1,44 +1,30 @@ #coding:utf-8 -# -# id: functional.intfunc.cast.23 -# title: CAST TIMESTAMP -> TIME -# decription: CAST TIMESTAMP -> TIME -# Be careful about date/time format on FB server ! -# Universal format is not defined or not documented. -# -# Dependencies: -# CREATE DATABASE -# CAST CHAR -> TIMESTAMP -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.intfunc.cast.cast_23 + +""" +ID: intfunc.cast-23 +TITLE: CAST TIMESTAMP -> TIME +DESCRIPTION: + Be careful about date/time format on FB server ! + Universal format is not defined or not documented. +FBTEST: functional.intfunc.cast.23 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "SELECT CAST(CAST('1.4.2002 0:59:59.1' AS TIMESTAMP) AS TIME) FROM rdb$Database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SELECT CAST(CAST('1.4.2002 0:59:59.1' AS TIMESTAMP) AS TIME) FROM rdb$Database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ CAST +expected_stdout = """ +CAST ============= -00:59:59.1000""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +00:59:59.1000 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/count/test_01.py b/tests/functional/intfunc/count/test_01.py index 3aa73c9f..3049d02a 100644 --- a/tests/functional/intfunc/count/test_01.py +++ b/tests/functional/intfunc/count/test_01.py @@ -1,42 +1,27 @@ #coding:utf-8 -# -# id: functional.intfunc.count.01 -# title: COUNT - empty -# decription: COUNT - Select from empty table -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.intfunc.count.count_01 + +""" +ID: intfunc.count-01 +TITLE: COUNT - Select from empty table +DESCRIPTION: +FBTEST: functional.intfunc.count.01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(init="CREATE TABLE test( id INTEGER);") -substitutions_1 = [] +act = isql_act('db', "SELECT COUNT(*) FROM test;") -init_script_1 = """CREATE TABLE test( id INTEGER);""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SELECT COUNT(*) FROM test;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ COUNT +expected_stdout = """ +COUNT ===================== 0 """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/count/test_02.py b/tests/functional/intfunc/count/test_02.py index 23a407e6..8dd33c59 100644 --- a/tests/functional/intfunc/count/test_02.py +++ b/tests/functional/intfunc/count/test_02.py @@ -1,28 +1,16 @@ #coding:utf-8 -# -# id: functional.intfunc.count.02 -# title: COUNT -# decription: Count of Not Null values and count of rows and count of distinct values -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# INSERT -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.intfunc.count.count_02 + +""" +ID: intfunc.count-02 +TITLE: COUNT +DESCRIPTION: Count of Not Null values and count of rows and count of distinct values +FBTEST: functional.intfunc.count.02 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE test( id INTEGER); +init_script = """CREATE TABLE test( id INTEGER); INSERT INTO test VALUES(0); INSERT INTO test VALUES(0); INSERT INTO test VALUES(null); @@ -34,20 +22,17 @@ INSERT INTO test VALUES(1); INSERT INTO test VALUES(1); """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """SELECT COUNT(*), COUNT(ID), COUNT(DISTINCT ID) FROM test;""" +act = isql_act('db', "SELECT COUNT(*), COUNT(ID), COUNT(DISTINCT ID) FROM test;") -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ COUNT COUNT COUNT +expected_stdout = """ COUNT COUNT COUNT ===================== ===================== ===================== 9 6 2 """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/date/test_dateadd_01.py b/tests/functional/intfunc/date/test_dateadd_01.py index 7edb4a33..0ee20bc0 100644 --- a/tests/functional/intfunc/date/test_dateadd_01.py +++ b/tests/functional/intfunc/date/test_dateadd_01.py @@ -1,31 +1,24 @@ #coding:utf-8 -# -# id: functional.intfunc.date.dateadd_01 -# title: test de la fonction dateadd pour l'ajout d'un jour -# decription: Returns a date/time/timestamp value increased (or decreased, when negative) by the specified amount of time. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.date.dateadd_01 + +""" +ID: intfunc.date.dateadd-01 +TITLE: DATEADD +DESCRIPTION: + Returns a date/time/timestamp value increased (or decreased, when negative) by the specified amount of time. +FBTEST: functional.intfunc.date.dateadd_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select dateadd(-1 day TO date '2008-02-06' ) as yesterday from rdb$database; +test_script = """select dateadd(-1 day TO date '2008-02-06' ) as yesterday from rdb$database; select dateadd(day,-1, date '2008-02-06' ) as yesterday from rdb$database;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ YESTERDAY =========== 2008-02-05 @@ -36,9 +29,8 @@ expected_stdout_1 = """ 2008-02-05 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/date/test_dateadd_02.py b/tests/functional/intfunc/date/test_dateadd_02.py index da9fd87d..1142da7c 100644 --- a/tests/functional/intfunc/date/test_dateadd_02.py +++ b/tests/functional/intfunc/date/test_dateadd_02.py @@ -1,31 +1,24 @@ #coding:utf-8 -# -# id: functional.intfunc.date.dateadd_02 -# title: Test results of DATEADD function for MONTH -# decription: Returns a date/time/timestamp value increased (or decreased, when negative) by the specified amount of time. -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: functional.intfunc.date.dateadd_02 + +""" +ID: intfunc.date.dateadd-02 +TITLE: DATEADD function for MONTH +DESCRIPTION: + Returns a date/time/timestamp value increased (or decreased, when negative) by the specified amount of time. +FBTEST: functional.intfunc.date.dateadd_02 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [] +test_script = """ -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ - - -- doc\\sql.extensions\\README.builtin_functions.txt + -- doc\\sql.extensions\\README.builtin_functions.txt -- 5) When using YEAR or MONTH and the input day is greater than the maximum possible day in the -- result year/month, the result day is returned in the last day of the result year/month. - + set list on; select dateadd( 1 month to date '2004-01-31') as leap_jan_31_plus__01_month from rdb$database; @@ -57,9 +50,9 @@ test_script_1 = """ -- select dateadd(month,-1, date '2008-02-06' ) as yesterday from rdb$database; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ LEAP_JAN_31_PLUS__01_MONTH 2004-02-29 LEAP_FEB_28_PLUS__01_MONTH 2004-03-28 LEAP_FEB_29_PLUS__01_MONTH 2004-03-29 @@ -85,9 +78,8 @@ expected_stdout_1 = """ NONL_MAR_31_MINUS_01_MONTH 2003-02-28 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/date/test_dateadd_03.py b/tests/functional/intfunc/date/test_dateadd_03.py index 7204cdee..79ea0f87 100644 --- a/tests/functional/intfunc/date/test_dateadd_03.py +++ b/tests/functional/intfunc/date/test_dateadd_03.py @@ -1,31 +1,24 @@ #coding:utf-8 -# -# id: functional.intfunc.date.dateadd_03 -# title: test de la fonction dateadd pour l'ajout d'une année -# decription: Returns a date/time/timestamp value increased (or decreased, when negative) by the specified amount of time. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.date.dateadd_03 + +""" +ID: intfunc.date.dateadd-03 +TITLE: DATEADD +DESCRIPTION: + Returns a date/time/timestamp value increased (or decreased, when negative) by the specified amount of time. +FBTEST: functional.intfunc.date.dateadd_03 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select dateadd(-1 year TO date '2008-02-06' ) as yesterday from rdb$database; +test_script = """select dateadd(-1 year TO date '2008-02-06' ) as yesterday from rdb$database; select dateadd(year,-1, date '2008-02-06' ) as yesterday from rdb$database;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ YESTERDAY =========== 2007-02-06 @@ -36,9 +29,8 @@ expected_stdout_1 = """ 2007-02-06 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/date/test_dateadd_04.py b/tests/functional/intfunc/date/test_dateadd_04.py index 6bb03d61..82228ce5 100644 --- a/tests/functional/intfunc/date/test_dateadd_04.py +++ b/tests/functional/intfunc/date/test_dateadd_04.py @@ -1,31 +1,24 @@ #coding:utf-8 -# -# id: functional.intfunc.date.dateadd_04 -# title: test de la fonction dateadd pour l'ajout d'une semaine -# decription: Returns a date/time/timestamp value increased (or decreased, when negative) by the specified amount of time. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.date.dateadd_04 + +""" +ID: intfunc.date.dateadd-04 +TITLE: DATEADD +DESCRIPTION: + Returns a date/time/timestamp value increased (or decreased, when negative) by the specified amount of time. +FBTEST: functional.intfunc.date.dateadd_04 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select dateadd(-1 day TO timestamp '2008-02-06 10:10:00' ) as yesterday from rdb$database; +test_script = """select dateadd(-1 day TO timestamp '2008-02-06 10:10:00' ) as yesterday from rdb$database; select dateadd(day,-1, timestamp '2008-02-06 10:10:00' ) as yesterday from rdb$database;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ YESTERDAY ========================= 2008-02-05 10:10:00.0000 @@ -36,9 +29,8 @@ expected_stdout_1 = """ 2008-02-05 10:10:00.0000 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/date/test_dateadd_05.py b/tests/functional/intfunc/date/test_dateadd_05.py index 271fa5b2..22861ed8 100644 --- a/tests/functional/intfunc/date/test_dateadd_05.py +++ b/tests/functional/intfunc/date/test_dateadd_05.py @@ -1,31 +1,24 @@ #coding:utf-8 -# -# id: functional.intfunc.date.dateadd_05 -# title: test de la fonction dateadd pour l'ajout d'une heure -# decription: Returns a date/time/timestamp value increased (or decreased, when negative) by the specified amount of time. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.date.dateadd_05 + +""" +ID: intfunc.date.dateadd-05 +TITLE: DATEADD +DESCRIPTION: + Returns a date/time/timestamp value increased (or decreased, when negative) by the specified amount of time. +FBTEST: functional.intfunc.date.dateadd_05 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select dateadd(-1 hour to time '12:12:00' ) as yesterday from rdb$database; +test_script = """select dateadd(-1 hour to time '12:12:00' ) as yesterday from rdb$database; select dateadd(hour,-1, time '12:12:00' ) as yesterday from rdb$database;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ YESTERDAY ============= 11:12:00.0000 @@ -36,9 +29,8 @@ expected_stdout_1 = """ 11:12:00.0000 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/date/test_dateadd_06.py b/tests/functional/intfunc/date/test_dateadd_06.py index 861f95a0..e575e6c4 100644 --- a/tests/functional/intfunc/date/test_dateadd_06.py +++ b/tests/functional/intfunc/date/test_dateadd_06.py @@ -1,31 +1,24 @@ #coding:utf-8 -# -# id: functional.intfunc.date.dateadd_06 -# title: test de la fonction dateadd pour l'ajout de minute a une heure -# decription: Returns a date/time/timestamp value increased (or decreased, when negative) by the specified amount of time. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.date.dateadd_06 + +""" +ID: intfunc.date.dateadd-06 +TITLE: DATEADD +DESCRIPTION: + Returns a date/time/timestamp value increased (or decreased, when negative) by the specified amount of time. +FBTEST: functional.intfunc.date.dateadd_06 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select dateadd(-1 minute to time '12:12:00' ) as yesterday from rdb$database; +test_script = """select dateadd(-1 minute to time '12:12:00' ) as yesterday from rdb$database; select dateadd(minute,-1, time '12:12:00' ) as yesterday from rdb$database;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ YESTERDAY ============= 12:11:00.0000 @@ -36,9 +29,8 @@ expected_stdout_1 = """ 12:11:00.0000 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/date/test_dateadd_07.py b/tests/functional/intfunc/date/test_dateadd_07.py index 55e9ef08..4c518c09 100644 --- a/tests/functional/intfunc/date/test_dateadd_07.py +++ b/tests/functional/intfunc/date/test_dateadd_07.py @@ -1,35 +1,29 @@ #coding:utf-8 -# -# id: functional.intfunc.date.dateadd_07 -# title: test de la fonction dateadd pour l'ajout d'une seconde -# decription: Returns a date/time/timestamp value increased (or decreased, when negative) by the specified amount of time. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.date.dateadd_07 + +""" +ID: intfunc.date.dateadd-07 +TITLE: DATEADD +DESCRIPTION: + Returns a date/time/timestamp value increased (or decreased, when negative) by the specified amount of time. +FBTEST: functional.intfunc.date.dateadd_07 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [('^((?!sqltype:|DD_).)*$', ''), ('[ \t]+', ' '), ('.*alias:.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; set sqlda_display on; select dateadd(-1 second to time '12:12:00' ) as tx_1 from rdb$database; select dateadd(second,-1, time '12:12:00' ) as tx_2 from rdb$database; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('^((?!sqltype:|DD_).)*$', ''), + ('[ \t]+', ' '), ('.*alias:.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ 01: sqltype: 560 TIME scale: 0 subtype: 0 len: 4 TX_1 12:11:59.0000 @@ -37,9 +31,8 @@ expected_stdout_1 = """ TX_2 12:11:59.0000 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/date/test_dateadd_08.py b/tests/functional/intfunc/date/test_dateadd_08.py index ea655d0a..2833fbfb 100644 --- a/tests/functional/intfunc/date/test_dateadd_08.py +++ b/tests/functional/intfunc/date/test_dateadd_08.py @@ -1,31 +1,25 @@ #coding:utf-8 -# -# id: functional.intfunc.date.dateadd_08 -# title: Dateadd milliseconds -# decription: -# tracker_id: CORE-1387 -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.date.dateadd_08 + +""" +ID: intfunc.date.dateadd-08 +ISSUE: 1805 +TITLE: Dateadd milliseconds +DESCRIPTION: +JIRA: CORE-1387 +FBTEST: functional.intfunc.date.dateadd_08 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select dateadd(-1 millisecond to time '12:12:00:0000' ) as test from rdb$database; +test_script = """select dateadd(-1 millisecond to time '12:12:00:0000' ) as test from rdb$database; select dateadd(millisecond,-1, time '12:12:00:0000' ) as test from rdb$database;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ TEST ============= 12:11:59.9990 @@ -36,9 +30,8 @@ expected_stdout_1 = """ 12:11:59.9990 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/date/test_datediff_01.py b/tests/functional/intfunc/date/test_datediff_01.py index 060075c8..ba7326e3 100644 --- a/tests/functional/intfunc/date/test_datediff_01.py +++ b/tests/functional/intfunc/date/test_datediff_01.py @@ -1,31 +1,24 @@ #coding:utf-8 -# -# id: functional.intfunc.date.datediff_01 -# title: test de la fonction datediff pour avoir un resultat en seconde -# decription: Returns an exact numeric value representing the interval of time from the first date/time/timestamp value to the second one. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.date.datediff_01 + +""" +ID: intfunc.date.datediff-01 +TITLE: DATEDIFF +DESCRIPTION: + Returns an exact numeric value representing the interval of time from the first date/time/timestamp value to the second one. +FBTEST: functional.intfunc.date.datediff_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select datediff(SECOND,cast( '12/02/2008 13:33:33' as timestamp),cast( '12/02/2008 13:33:35' as timestamp)) from rdb$database; +test_script = """select datediff(SECOND,cast( '12/02/2008 13:33:33' as timestamp),cast( '12/02/2008 13:33:35' as timestamp)) from rdb$database; select datediff(SECOND FROM cast( '12/02/2008 13:33:33' as timestamp) TO cast( '12/02/2008 13:33:35' as timestamp)) from rdb$database;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ DATEDIFF ===================== 2 @@ -36,9 +29,8 @@ expected_stdout_1 = """ 2 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/date/test_datediff_02.py b/tests/functional/intfunc/date/test_datediff_02.py index b5d145f5..da50fcff 100644 --- a/tests/functional/intfunc/date/test_datediff_02.py +++ b/tests/functional/intfunc/date/test_datediff_02.py @@ -1,31 +1,24 @@ #coding:utf-8 -# -# id: functional.intfunc.date.datediff_02 -# title: test de la fonction datediff pour avoir le resultat en minute -# decription: Returns an exact numeric value representing the interval of time from the first date/time/timestamp value to the second one. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.date.datediff_02 + +""" +ID: intfunc.date.datediff-02 +TITLE: DATEDIFF +DESCRIPTION: + Returns an exact numeric value representing the interval of time from the first date/time/timestamp value to the second one. +FBTEST: functional.intfunc.date.datediff_02 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select datediff(MINUTE,cast( '12/02/2008 13:33:33' as timestamp),cast( '12/02/2008 13:34:35' as timestamp)) from rdb$database; +test_script = """select datediff(MINUTE,cast( '12/02/2008 13:33:33' as timestamp),cast( '12/02/2008 13:34:35' as timestamp)) from rdb$database; select datediff(MINUTE FROM cast( '12/02/2008 13:33:33' as timestamp) TO cast( '12/02/2008 13:34:35' as timestamp)) from rdb$database;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ DATEDIFF ===================== 1 @@ -36,9 +29,8 @@ expected_stdout_1 = """ 1 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/date/test_datediff_03.py b/tests/functional/intfunc/date/test_datediff_03.py index d84b8dcc..0d3d9972 100644 --- a/tests/functional/intfunc/date/test_datediff_03.py +++ b/tests/functional/intfunc/date/test_datediff_03.py @@ -1,31 +1,24 @@ #coding:utf-8 -# -# id: functional.intfunc.date.datediff_03 -# title: test de la fonction datediff pour avoir le resultat en heure -# decription: Returns an exact numeric value representing the interval of time from the first date/time/timestamp value to the second one. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.date.datediff_03 + +""" +ID: intfunc.date.datediff-03 +TITLE: DATEDIFF +DESCRIPTION: + Returns an exact numeric value representing the interval of time from the first date/time/timestamp value to the second one. +FBTEST: functional.intfunc.date.datediff_03 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select datediff(HOUR,cast( '12/02/2008 13:33:33' as timestamp),cast( '12/02/2008 14:34:35' as timestamp)) from rdb$database; +test_script = """select datediff(HOUR,cast( '12/02/2008 13:33:33' as timestamp),cast( '12/02/2008 14:34:35' as timestamp)) from rdb$database; select datediff(HOUR FROM cast( '12/02/2008 13:33:33' as timestamp) TO cast( '12/02/2008 14:34:35' as timestamp)) from rdb$database;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ DATEDIFF ===================== 1 @@ -36,9 +29,8 @@ expected_stdout_1 = """ 1 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/date/test_datediff_04.py b/tests/functional/intfunc/date/test_datediff_04.py index 928f5c99..0132f43b 100644 --- a/tests/functional/intfunc/date/test_datediff_04.py +++ b/tests/functional/intfunc/date/test_datediff_04.py @@ -1,31 +1,24 @@ #coding:utf-8 -# -# id: functional.intfunc.date.datediff_04 -# title: test de la fonction datediff pour avoir le resultat en annee -# decription: Returns an exact numeric value representing the interval of time from the first date/time/timestamp value to the second one. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.date.datediff_04 + +""" +ID: intfunc.date.datediff-04 +TITLE: DATEDIFF +DESCRIPTION: + Returns an exact numeric value representing the interval of time from the first date/time/timestamp value to the second one. +FBTEST: functional.intfunc.date.datediff_04 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select datediff(YEAR,cast( '12/02/2008 13:33:33' as timestamp),cast( '12/02/2009 13:34:35' as timestamp)) from rdb$database; +test_script = """select datediff(YEAR,cast( '12/02/2008 13:33:33' as timestamp),cast( '12/02/2009 13:34:35' as timestamp)) from rdb$database; select datediff(YEAR FROM cast( '12/02/2008 13:33:33' as timestamp) TO cast( '12/02/2009 13:34:35' as timestamp)) from rdb$database;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ DATEDIFF ===================== 1 @@ -36,9 +29,8 @@ expected_stdout_1 = """ 1 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/date/test_datediff_05.py b/tests/functional/intfunc/date/test_datediff_05.py index 1fcb83f0..efc94b52 100644 --- a/tests/functional/intfunc/date/test_datediff_05.py +++ b/tests/functional/intfunc/date/test_datediff_05.py @@ -1,31 +1,24 @@ #coding:utf-8 -# -# id: functional.intfunc.date.datediff_05 -# title: test de la fonction datediff pour avoir le resultat en mois -# decription: Returns an exact numeric value representing the interval of time from the first date/time/timestamp value to the second one. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.date.datediff_05 + +""" +ID: intfunc.date.datediff-05 +TITLE: DATEDIFF +DESCRIPTION: + Returns an exact numeric value representing the interval of time from the first date/time/timestamp value to the second one. +FBTEST: functional.intfunc.date.datediff_05 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select datediff(MONTH,cast( '12/02/2008 13:33:33' as timestamp),cast( '12/02/2009 13:34:35' as timestamp)) from rdb$database; +test_script = """select datediff(MONTH,cast( '12/02/2008 13:33:33' as timestamp),cast( '12/02/2009 13:34:35' as timestamp)) from rdb$database; select datediff(MONTH FROM cast( '12/02/2008 13:33:33' as timestamp) TO cast( '12/02/2009 13:34:35' as timestamp)) from rdb$database;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ DATEDIFF ===================== 12 @@ -36,9 +29,8 @@ expected_stdout_1 = """ 12 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/date/test_datediff_06.py b/tests/functional/intfunc/date/test_datediff_06.py index 337e62ef..6bde3b2e 100644 --- a/tests/functional/intfunc/date/test_datediff_06.py +++ b/tests/functional/intfunc/date/test_datediff_06.py @@ -1,31 +1,24 @@ #coding:utf-8 -# -# id: functional.intfunc.date.datediff_06 -# title: test de la fonction datediff pour avoir le resultat en jour -# decription: Returns an exact numeric value representing the interval of time from the first date/time/timestamp value to the second one. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.date.datediff_06 + +""" +ID: intfunc.date.datediff-06 +TITLE: DATEDIFF +DESCRIPTION: + Returns an exact numeric value representing the interval of time from the first date/time/timestamp value to the second one. +FBTEST: functional.intfunc.date.datediff_06 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select datediff(DAY,cast( '12/02/2008 13:33:33' as timestamp),cast( '12/02/2009 13:34:35' as timestamp)) from rdb$database; +test_script = """select datediff(DAY,cast( '12/02/2008 13:33:33' as timestamp),cast( '12/02/2009 13:34:35' as timestamp)) from rdb$database; select datediff(DAY FROM cast( '12/02/2008 13:33:33' as timestamp) TO cast( '12/02/2009 13:34:35' as timestamp)) from rdb$database;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ DATEDIFF ===================== 365 @@ -36,9 +29,8 @@ expected_stdout_1 = """ 365 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/date/test_datediff_07.py b/tests/functional/intfunc/date/test_datediff_07.py index c8bc32ad..f1895cef 100644 --- a/tests/functional/intfunc/date/test_datediff_07.py +++ b/tests/functional/intfunc/date/test_datediff_07.py @@ -1,26 +1,19 @@ #coding:utf-8 -# -# id: functional.intfunc.date.datediff_07 -# title: test de la fonction datediff pour avoir le resultat en minute -# decription: Returns an exact numeric value representing the interval of time from the first date/time/timestamp value to the second one. -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.intfunc.date.datediff_07 + +""" +ID: intfunc.date.datediff-07 +TITLE: DATEDIFF +DESCRIPTION: + Returns an exact numeric value representing the interval of time from the first date/time/timestamp value to the second one. +FBTEST: functional.intfunc.date.datediff_07 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; set list on; select datediff( millisecond, @@ -43,12 +36,12 @@ test_script_1 = """ from cast( '00:00:00.0001' as time) to cast( '23:59:59.9999' as time) ) as dd_02b from rdb$database; - + """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ DD_01A 315537897599999.8 DD_01B 86399999.8 DD_02A 315537897599999.8 @@ -56,8 +49,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/date/test_extract_01.py b/tests/functional/intfunc/date/test_extract_01.py index 1d349278..0204b4b9 100644 --- a/tests/functional/intfunc/date/test_extract_01.py +++ b/tests/functional/intfunc/date/test_extract_01.py @@ -1,38 +1,29 @@ #coding:utf-8 -# -# id: functional.intfunc.date.extract_01 -# title: Test the extract week function -# decription: -# tracker_id: CORE-663 -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.date.extract_01 + +""" +ID: intfunc.date.extract-01 +ISSUE: 1029 +TITLE: EXTRACT(WEEK FROM DATE) +DESCRIPTION: Test the extract week function +JIRA: CORE-663 +FBTEST: functional.intfunc.date.extract_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select extract(week from date '30.12.2008'), extract(week from date '30.12.2009') from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select extract(week from date '30.12.2008'), extract(week from date '30.12.2009') from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ +expected_stdout = """ EXTRACT EXTRACT ======= ======= 1 53 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/date/test_extract_02.py b/tests/functional/intfunc/date/test_extract_02.py index eab5b594..8b35c23a 100644 --- a/tests/functional/intfunc/date/test_extract_02.py +++ b/tests/functional/intfunc/date/test_extract_02.py @@ -1,31 +1,25 @@ #coding:utf-8 -# -# id: functional.intfunc.date.extract_02 -# title: test extract function with miliseconds -# decription: -# tracker_id: CORE-1387 -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.date.extract_02 + +""" +ID: intfunc.date.extract-02 +ISSUE: 1805 +TITLE: EXTRACT - MILLISECONDS +DESCRIPTION: Test the extract function with miliseconds +JIRA: CORE-1387 +FBTEST: functional.intfunc.date.extract_02 +""" import pytest from firebird.qa import db_factory, isql_act, Action -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select extract(millisecond from time '12:12:00.1111' ) as test from rdb$database; +test_script = """select extract(millisecond from time '12:12:00.1111' ) as test from rdb$database; select extract(millisecond from timestamp '2008-12-08 12:12:00.1111' ) as test from rdb$database;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ TEST ============ 111.1 @@ -36,9 +30,8 @@ expected_stdout_1 = """ 111.1 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/encryption/test_block_cipher_basic.py b/tests/functional/intfunc/encryption/test_block_cipher_basic.py index 2e49b098..d948dea0 100644 --- a/tests/functional/intfunc/encryption/test_block_cipher_basic.py +++ b/tests/functional/intfunc/encryption/test_block_cipher_basic.py @@ -1,32 +1,20 @@ #coding:utf-8 -# -# id: functional.intfunc.encryption.block_cipher_basic -# title: -# Verify block crypto algorithms that are implemented in ENCRYPT/DECRYPT built-in functions. -# See doc\\sql.extensions\\README.builtin_functions.txt for details. -# -# Checked on 4.0.0.1691: OK, 1.561s. -# -# decription: -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: None + +""" +ID: intfunc.encryption.block-cipher-basic +TITLE: ENCRYPT/DECRYPT built-in functions +DESCRIPTION: + Verify block crypto algorithms that are implemented in ENCRYPT/DECRYPT built-in functions. + See doc/sql.extensions/README.builtin_functions.txt for details. +FBTEST: functional.intfunc.encryption.block_cipher_basic +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; set blob all; @@ -56,7 +44,7 @@ test_script_1 = """ 'cfb', lpad('', cast(rdb$get_context('USER_SESSION', 'DATA_LEN') as int),uuid_to_char(gen_uuid()) ), '0101010101010101', - lpad('',16, uuid_to_char( gen_uuid() )) + lpad('',16, uuid_to_char( gen_uuid() )) ); @@ -71,7 +59,7 @@ test_script_1 = """ 'ctr', lpad('', cast(rdb$get_context('USER_SESSION', 'DATA_LEN') as int),uuid_to_char(gen_uuid()) ), lpad('',16, replace(uuid_to_char(gen_uuid()),'-','') ), - lpad('',16, uuid_to_char( gen_uuid() )) + lpad('',16, uuid_to_char( gen_uuid() )) ); @@ -102,7 +90,7 @@ test_script_1 = """ 'ofb', lpad('', cast(rdb$get_context('USER_SESSION', 'DATA_LEN') as int),uuid_to_char(gen_uuid()) ), lpad('',16, replace(uuid_to_char(gen_uuid()),'-','') ), - lpad('',16, uuid_to_char( gen_uuid() )) + lpad('',16, uuid_to_char( gen_uuid() )) ); ------------------------------------------------------ @@ -117,7 +105,7 @@ test_script_1 = """ from test_char where crypto_alg = 'AES' ; - + insert into test_char select @@ -141,7 +129,7 @@ test_script_1 = """ from test_char where crypto_alg = 'AES' ; - + insert into test_char select @@ -153,7 +141,7 @@ test_script_1 = """ from test_char where crypto_alg = 'AES' ; - + insert into test_char select @@ -165,7 +153,7 @@ test_script_1 = """ from test_char where crypto_alg = 'AES' ; - + insert into test_char select @@ -229,7 +217,7 @@ test_script_1 = """ do begin v_encrypt_sttm = 'select encrypt( q''{' || c.source_text || '}'' using ' || c.crypto_alg || coalesce( ' mode ' || c.mode , '' ) || ' key q''{' || c.crypto_key || '}''' || coalesce(' iv q''{' || c.crypto_iv || '}'' ', '') || ') from rdb$database'; execute statement v_encrypt_sttm into v_encrypted; - + --v_decrypt_sttm = 'select decrypt( q''{' || v_encrypted || '}'' using ' || c.crypto_alg || coalesce( ' mode ' || c.mode , '' ) || ' key q''{' || c.crypto_key || '}''' || coalesce(' iv q''{' || c.crypto_iv || '}'' ', '') || ') from rdb$database'; --v_decrypt_sttm = 'select decrypt( x''' || v_encrypted || ''' using ' || c.crypto_alg || coalesce( ' mode ' || c.mode , '' ) || ' key q''{' || c.crypto_key || '}''' || coalesce(' iv q''{' || c.crypto_iv || '}'' ', '') || ') from rdb$database'; v_decrypt_sttm = 'select decrypt( cast(? as varbinary(32700)) using ' || c.crypto_alg || coalesce( ' mode ' || c.mode , '' ) || ' key q''{' || c.crypto_key || '}''' || coalesce(' iv q''{' || c.crypto_iv || '}'' ', '') || ') from rdb$database'; @@ -250,233 +238,232 @@ test_script_1 = """ set term ;^ commit; - select * from sp_char_block_test; + select * from sp_char_block_test; commit; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ CRYPTO_ALG AES MODE CFB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG AES MODE CTR - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG AES MODE ECB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG AES MODE OFB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG ANUBIS MODE CFB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG ANUBIS MODE CTR - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG ANUBIS MODE ECB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG ANUBIS MODE OFB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG BLOWFISH MODE CFB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG BLOWFISH MODE CTR - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG BLOWFISH MODE ECB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG BLOWFISH MODE OFB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG KHAZAD MODE CFB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG KHAZAD MODE CTR - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG KHAZAD MODE ECB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG KHAZAD MODE OFB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG RC5 MODE CFB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG RC5 MODE CTR - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG RC5 MODE ECB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG RC5 MODE OFB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG RC6 MODE CFB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG RC6 MODE CTR - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG RC6 MODE ECB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG RC6 MODE OFB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG "SAFER+" MODE CFB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG "SAFER+" MODE CTR - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG "SAFER+" MODE ECB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG "SAFER+" MODE OFB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG TWOFISH MODE CFB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG TWOFISH MODE CTR - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG TWOFISH MODE ECB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG TWOFISH MODE OFB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG XTEA MODE CFB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG XTEA MODE CTR - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG XTEA MODE ECB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG XTEA MODE OFB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/encryption/test_block_cipher_special.py b/tests/functional/intfunc/encryption/test_block_cipher_special.py index 7c2cca5e..07bb7cdd 100644 --- a/tests/functional/intfunc/encryption/test_block_cipher_special.py +++ b/tests/functional/intfunc/encryption/test_block_cipher_special.py @@ -1,33 +1,21 @@ #coding:utf-8 -# -# id: functional.intfunc.encryption.block_cipher_special -# title: -# Verify block crypto algorithms that are implemented in ENCRYPT/DECRYPT built-in functions. -# Additional tests for key length = 192 and 256 bits. -# See doc\\sql.extensions\\README.builtin_functions.txt for details. -# -# Checked on 4.0.0.1691: OK, 1.343s. -# -# decription: -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: None + +""" +ID: intfunc.encryption.block-cipher-special +TITLE: ENCRYPT/DECRYPT built-in functions +DESCRIPTION: + Verify block crypto algorithms that are implemented in ENCRYPT/DECRYPT built-in functions. + Additional tests for key length = 192 and 256 bits. + See doc/sql.extensions/README.builtin_functions.txt for details. +FBTEST: functional.intfunc.encryption.block_cipher_special +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; create or alter procedure sp_char_block_test(a_alg varchar(30)) as begin end; commit; @@ -43,7 +31,7 @@ test_script_1 = """ ^ set term ;^ - + --############################ AES mode OFB ########################## insert into test_char( crypto_alg, @@ -56,7 +44,7 @@ test_script_1 = """ 'ofb', lpad('', cast(rdb$get_context('USER_SESSION', 'DATA_LEN') as int),uuid_to_char(gen_uuid()) ), lpad('', 24, '01'), -- 192 bits - lpad('',16, uuid_to_char( gen_uuid() )) + lpad('',16, uuid_to_char( gen_uuid() )) ); insert into test_char( @@ -70,7 +58,7 @@ test_script_1 = """ 'ofb', lpad('', cast(rdb$get_context('USER_SESSION', 'DATA_LEN') as int),uuid_to_char(gen_uuid()) ), lpad('', 32, '01'), -- 256 bits - lpad('',16, uuid_to_char( gen_uuid() )) + lpad('',16, uuid_to_char( gen_uuid() )) ); @@ -87,7 +75,7 @@ test_script_1 = """ 'cfb', lpad('', cast(rdb$get_context('USER_SESSION', 'DATA_LEN') as int),uuid_to_char(gen_uuid()) ), lpad('', 24, '01'), -- 192 bits - lpad('',16, uuid_to_char( gen_uuid() )) + lpad('',16, uuid_to_char( gen_uuid() )) ); insert into test_char( @@ -101,7 +89,7 @@ test_script_1 = """ 'cfb', lpad('', cast(rdb$get_context('USER_SESSION', 'DATA_LEN') as int),uuid_to_char(gen_uuid()) ), lpad('', 32, '01'), -- 256 bits - lpad('',16, uuid_to_char( gen_uuid() )) + lpad('',16, uuid_to_char( gen_uuid() )) ); @@ -117,7 +105,7 @@ test_script_1 = """ 'ctr', lpad('', cast(rdb$get_context('USER_SESSION', 'DATA_LEN') as int),uuid_to_char(gen_uuid()) ), lpad('', 24, '01'), -- 192 bits - lpad('',16, uuid_to_char( gen_uuid() )) + lpad('',16, uuid_to_char( gen_uuid() )) ); insert into test_char( @@ -131,7 +119,7 @@ test_script_1 = """ 'ctr', lpad('', cast(rdb$get_context('USER_SESSION', 'DATA_LEN') as int),uuid_to_char(gen_uuid()) ), lpad('', 32, '01'), -- 256 bits - lpad('',16, uuid_to_char( gen_uuid() )) + lpad('',16, uuid_to_char( gen_uuid() )) ); @@ -173,7 +161,7 @@ test_script_1 = """ 'cbc', lpad('', cast(rdb$get_context('USER_SESSION', 'DATA_LEN') as int),uuid_to_char(gen_uuid()) ), lpad('', 24, '01'), -- 192 bits - lpad('', 16, uuid_to_char( gen_uuid() )) + lpad('', 16, uuid_to_char( gen_uuid() )) ); insert into test_char( @@ -230,7 +218,7 @@ select encrypt( lpad('', 16, 'A') using aes mode cbc key '1234567890123456789012 do begin v_encrypt_sttm = 'select encrypt( q''{' || c.source_text || '}'' using ' || c.crypto_alg || coalesce( ' mode ' || c.mode , '' ) || ' key q''{' || c.crypto_key || '}''' || coalesce(' iv q''{' || c.crypto_iv || '}'' ', '') || ') from rdb$database'; execute statement v_encrypt_sttm into v_encrypted; - + v_decrypt_sttm = 'select decrypt( cast(? as varbinary(32700)) using ' || c.crypto_alg || coalesce( ' mode ' || c.mode , '' ) || ' key q''{' || c.crypto_key || '}''' || coalesce(' iv q''{' || c.crypto_iv || '}'' ', '') || ') from rdb$database'; execute statement ( v_decrypt_sttm ) ( v_encrypted ) into v_decrypted; @@ -249,77 +237,76 @@ select encrypt( lpad('', 16, 'A') using aes mode cbc key '1234567890123456789012 set term ;^ commit; - select * from sp_char_block_test; + select * from sp_char_block_test; commit; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ CRYPTO_ALG AES MODE OFB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG AES MODE OFB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG AES MODE CFB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG AES MODE CFB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG AES MODE CTR - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG AES MODE CTR - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG AES MODE ECB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG AES MODE ECB - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG AES MODE CBC - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT CRYPTO_ALG AES MODE CBC - RESULT_MSG Source and decrypted strings are identical. + RESULT_MSG Source and decrypted strings are identical. SRC_TEXT DECRYPTED_TEXT """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/encryption/test_rsa_family.py b/tests/functional/intfunc/encryption/test_rsa_family.py index 2da5dc91..8b474217 100644 --- a/tests/functional/intfunc/encryption/test_rsa_family.py +++ b/tests/functional/intfunc/encryption/test_rsa_family.py @@ -1,50 +1,45 @@ #coding:utf-8 -# -# id: functional.intfunc.encryption.rsa_family -# title: Basic test for RSA-family: rsa_private(), rsa_public(), rsa_encrypt(), rsa_decrypt(), rsa_sign_hash() and rsa_verify_hash() -# decription: -# We create table with one record and write in it UTF8 text with enough length. -# Then we get random substring from this text and use it as "source message" that will be encrypted further. -# After this test generates private and public keys and uses them for two tasks: -# 1) get RSA signature of crypted_hash of source message (using private key) -# and verify it (using public key); RSA_VERIFY_HASH must return ; -# 2) encrypt source message, then decrypt it and compare result of decryption with original text. They must be equal. -# -# All these actions are applied against four algorithms: MD5, SHA1, SHA256 and SHA512. -# -# NB: code for FB 5.x is separated from FB 4.x because of renamed functions rsa_sign_hash() and rsa_verify_hash(), -# see: https://github.com/FirebirdSQL/firebird/issues/6806 -# -# ::: NOTE ::: -# It was encountered maximal number of octets in the source text must NOT exceed 126. -# Otherwise usage of SHA512 will fail with: -# Statement failed, SQLSTATE = 22023 -# TomCrypt library error: Invalid sized parameter. -# -Encrypting using cipher RSA -# Because of this, we must limit length of generated UTF8 "source text" is limited by 63 - see variable 'lorem' in EB. -# (greec and cyrillic characters are used here in the source text; they both require 2 bytes per character). -# -# Checked on 4.0.0.2479; 5.0.0.20. -# 22.05.2021: correction because of renamed functions rsa_sign and rsa_verify: suffix "_hash" was added to their names after ~14.05.2021 -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0, 5.0 -# qmid: None + +""" +ID: intfunc.encryption.rsa-family +TITLE: Basic test for RSA-family: rsa_private(), rsa_public(), rsa_encrypt(), + rsa_decrypt(), rsa_sign_hash() and rsa_verify_hash() +DESCRIPTION: + We create table with one record and write in it UTF8 text with enough length. + Then we get random substring from this text and use it as "source message" that will be encrypted further. + After this test generates private and public keys and uses them for two tasks: + 1) get RSA signature of crypted_hash of source message (using private key) + and verify it (using public key); RSA_VERIFY_HASH must return ; + 2) encrypt source message, then decrypt it and compare result of decryption with original text. They must be equal. + + All these actions are applied against four algorithms: MD5, SHA1, SHA256 and SHA512. + + NB: code for FB 5.x is separated from FB 4.x because of renamed functions rsa_sign_hash() and rsa_verify_hash(), + see: https://github.com/FirebirdSQL/firebird/issues/6806 + + ::: NOTE ::: + It was encountered maximal number of octets in the source text must NOT exceed 126. + Otherwise usage of SHA512 will fail with: + Statement failed, SQLSTATE = 22023 + TomCrypt library error: Invalid sized parameter. + -Encrypting using cipher RSA + Because of this, we must limit length of generated UTF8 "source text" is limited by 63 - see variable 'lorem' in EB. + (greek and cyrillic characters are used here in the source text; they both require 2 bytes per character). +NOTES: +[22.05.2021] + correction because of renamed functions rsa_sign and rsa_verify: suffix "_hash" was added to their names after ~14.05.2021 +[03.02.2022] pcisar + Removed version for 5.0, because both tests_script and expected_output were identical. + Renamed functions rsa_sign_hash() and rsa_verify_hash() are part of v4.0 +FBTEST: functional.intfunc.encryption.rsa_family +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; recreate table rsa( text_unencrypted varchar(256) @@ -83,7 +78,7 @@ test_script_1 = """ begin lorem = 'ΛορεμιπσθμδολορσιταμετvισιδνοvθμαλτερθμcομπλεcτιτθρτεναμηομεροβλανδιτμελανqθοδμοδοδολορεμΑτσεδγραεcιδελεcτθσcθειθσπροπριαεμολεστιαεθσθανηισqθοδσιποστθλαντπερτιναcιαΛαθδεμεqθιδεματηισΔθοπορροτολλιτπλατονεμαναλιιπαθλοcονστιτθαμqθοανΠρορεπθδιαρεcονσεqθθντθρεξνεcεθθνθμσολεαττεvελσθασcασελεγιμθσΝεεvερτιτθραππελλαντθρvισθτναμνοστερμολλισvολθπταριαΑγαμδολορεφφιcιαντθραννεcΑφφερτσιγνιφερθμqθεπριεισθασλθδθσδεσερθισσειδπερΠροβοπονδερθμετvιξετσιτστετφαcερρεφερρεντθρΕιαεqθετολλιτπροπριαεμεαvιξεθσθασδισπθτανδοΛαβορεφαcετεvολθπτατθμαδcθμηαβεοvιρισδολορετμελΑγαμλιβερβλανδιτηασαδvιμcθαεqθειντερεσσετVισεξvενιαμομνεσqθεινvενιρεCασεcονσετετθρvισατεvερτιφορενσιβθσσθσcιπιαντθρεξηισΕτπερτολλιτεριπθιτσαπιεντεμvελνεqθαεqθεποστθλανταεqθεηονεστατισεοσανΣονετεριπθιτεθvισλεγερεαδολεσcενσετναμCιβοαβηορρεαντινμεαναμηαβεοvιταειναδηισμαλισριδενσcορρθμπιτΝεcαδπριμισμενανδριμαγναqθαεστιοεξπλιcαριεθθσθνεqθοcομμοδοαετερνοαργθμεντθμΣθμοvιτθπερατοριβθσεστθτΜειτεvενιαμσεμπερΠαθλοαλβθcιθσvισατCθεαμφερριδοcτθσοφφενδιτεστατνατθμμθτατΕαμελτριτανιελαβοραρετλθδθσσcριπτορεμπερετΣεαειανιμαλcοντεντιονεσομιτταμλθcιλιθσπερνεΑπεριριπερσιθσαλτερθμθσθνονεcαπειριανοπορτερενεΑδμειμεισφαcερπθταντvιμcασεμοvεττραcτατοσεξΑππετερεμανδαμθσνοηιστιμεαμqθαεqθεδελενιτετεστvιρτθτετεμποριβθσδθοτεΜοδοινιμιcθσεισεαεοσιναφφερττεμπορcομμοδοΕνιμqθοτειρμοδcθπεραδολεσcενσπηιλοσοπηιαvιξεαΘτιναμνοστρθδvιστεπροειφαλλιλατινερεφορμιδανσΕθμαθδιαμεξπετενδισλιβεραvισσεεαεξοcθρρερετπροδεσσετσιτΝαμεξcοντεντιονεσδετερρθισσετvιστεδθισαθδιρεΕξμαλορθμcονσεqθατμνεσαρcηθμσεδΑδπροομνεσqθεcονστιτθαμλιβεραvισσεεξνθλλαμδοcτθσινδοcτθμεαμΕιηασμολλισομιτταμνεcδιcθντλθcιλιθσανταντασποσσιμιθvαρετεοσετΕιεραντμαιεστατισνεcεθμμοδθσαλιqθανδοεαVιμεθισμοδτορqθατοσδεσερθισσεειεαπροπονδερθμπερφεcτοQθοvιρισcαθσαετιβιqθετεσεδδιcοεσσελαορεετνοΑσσθμλθδθσβλανδιτεοσεξΦαcιλισοφφενδιτεαμεξατσιτπορροφαcιλισΗασπερφεcτοσεντεντιαεαccομμοδαρενοΗαβεοαλβθcιθσcονcλθσιονεμqθεατεοσμελαττατιονcονσετετθρjθστοαδιπισcιτεεοσΔιcαμρατιονιβθσσιγνιφερθμqθεεθμνομεισολεταδμοδθμνοφαcερταcιματεσεξπετενδισηισαδΗισcθπρομπτατορqθατοσvιξαδμελιορεπερcιπιτΕqθιδεμταcιματεσεθριπιδισιδπερμελατιλλθμεqθιδεμαccθμσανΕιαπεριαμινcορρθπτενεcΕιδθοδελιcαταρεφορμιδανσσολθμεθισμοδεθπερΔθορεβθμαλιqθιπδενιqθεθτcθqθιεσσεελιτδεσερθντΑμετσcριπταπαρτιενδονεvιμνοθσθιλλθμπορροπθτεντιδλαβορεσαλιενθμπροΑφφερτατομορθμcορρθμπιτναμαδαλιqθαμινερμισμεντιτθμεαμ' || 'ЛоремипсумдолорситаметехяуилибердоцендицоррумпитЕраталиенумносеаЕхтотатациматесирацундиаеумехерцицонсецтетуеридцумПробоатяуиделицатиссимиехяуосумосусципиантурхасехПродебитисмандамусеунеприиллудсонетрегионеТееамлегеремандамусПереталиатинцидунтНецибодицамлаудемусуанприталенуллаДолоресвертеремсплендидецумеуадмелопортересигниферумяуеАнвитаеорнатусопортеатеосадвисвениаминермисделенитиВихсуавитатенеглегентуртееумелталеиллуммалуиссетЕросмелиорехисеуПутентпосидониумтенецДицантделенитменандриутмеаадяуолатинеаргументумнемеиалтерумерудитицомплецтитурЦибототавидитеумцуменандрилаборамусеунамадагамграецохисЕхеумассумяуаестиоатеосцибомоллисТемелнолуиссеинтеллегатделицатиссимиНолаудемцонституамаппеллантурцумнецяуотграеценеЕпицуреипхаедруместехнулладебетеумутЕтхисенимаугуемуциуссцрипсеритвелидЕтвимунумволуптариасеабрутеинтеллегамцуМеатеелеифендмнесарчумяуисверотемпорибусусуанидперуллуминтеллегатХомероевертитурпосидониумиусетеимелвероаеяуеФацерпосситпробатусеинецСиттецонгуецонцептамседфацермоллислабитуринХабеосусципиантуридхасцасемаиоруминцорруптеехеосеосомнисвивендумаппеллантуранЕхмеаплацератплатонемцонцлусионемяуеПосситперицулаеамеаетдуофацерволуптариаяуаерендумДесеруиссесцриптореминяуиеивимпромптафеугиатхисеуетиамимпедитНовисопортеатволуптарианеглегентурдицампоссимеаяуиусуидаппетереинцидеринтцонцлудатуряуеДицоцорпорамеицуПервениамсаперетнеяуиеадолорессапиентемалияуандоансимуларгументумперВереарнонуместоряуатосмелетвисрецтеяуемолестиаецонсеяуунтуреиеоснеоффендитволуптуаратионибусСеаноессесаперетреформидансвимяуидамратионибусвитуператаеисединвидевидитДесерунтрепудиаределицатиссимицувиханперсиусерипуитцотидиеяуеприЯуимагнаиуваретфацилисцуинмоветириуреатоморумеамВисимпетусрецтеяуеевертитуреабрутериденсдолореснамнеНееффициантурделицатиссимияуоМеиеааеяуепосситнемореНовимяуаслудусаццусатаеиеумвертеремлуцилиусперсеяуерисНевелунумвероелояуентиамехпроеррорцонститутоантиопамеррорибусяуитеЕосепицуриоцурреретулламцорперноЕтомниуминструцтиорнамСитинутинамевертинихилдесеруиссееиперСедтеверосимулдигниссимпетентиумвулпутатенамеуНостроалияуамеуцумАлиипондерумхонестатисеапроутвихевертииндоцтумсапиентеметсеаодиоцаусаеПротеимпердиетсигниферумяуеЕамеиаццумсаномиттантурДуиснобиспертинахнецанцуцумалиенумпатриояуеадолесценсдебетнумяуамехприПаулоаперирилаореетяуиинмелнофастидиипетентиумЗриланциллаесадипсцингин'; - + -- while (1=1) do -- begin p_beg = cast( 1 + rand() * ( char_length(lorem) - 63 ) as smallint); @@ -154,9 +149,9 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ RSA_SIGN_LEN_MG5 256 RSA_VRFY_MD5 @@ -182,162 +177,8 @@ expected_stdout_1 = """ DECRYPTION_RESULT_SHA512 OK. """ -@pytest.mark.version('>=4.0,<5.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - -# version: 5.0 -# resources: None - -substitutions_2 = [] - -init_script_2 = """""" - -db_2 = db_factory(sql_dialect=3, init=init_script_2) - -test_script_2 = """ - set list on; - recreate table rsa( - text_unencrypted varchar(256) - ,k_prv varbinary(16384) - ,k_pub varbinary(8192) - ,text_rsa_sign varchar(8192) - ,text_rsa_vrfy boolean - ,text_encrypted varchar(256) - ,text_decrypted varchar(256) - ); - insert into rsa default values; - - set term ^; - execute block returns( - rsa_sign_len_mg5 smallint - ,rsa_vrfy_md5 boolean - ,rsa_sign_len_sha1 smallint - ,rsa_vrfy_sha1 boolean - ,rsa_sign_len_sha256 smallint - ,rsa_vrfy_sha256 boolean - ,rsa_sign_len_sha512 smallint - ,rsa_vrfy_sha512 boolean - ,encr_octet_len_md5 smallint - ,decryption_result_md5 varchar(3) - ,encr_octet_len_sha1 smallint - ,decryption_result_sha1 varchar(3) - ,encr_octet_len_sha256 smallint - ,decryption_result_sha256 varchar(3) - ,encr_octet_len_sha512 smallint - ,decryption_result_sha512 varchar(3) - ) - as - declare lorem varchar(8190) character set utf8; - declare p_beg smallint; - declare n_for smallint; - begin - lorem = 'ΛορεμιπσθμδολορσιταμετvισιδνοvθμαλτερθμcομπλεcτιτθρτεναμηομεροβλανδιτμελανqθοδμοδοδολορεμΑτσεδγραεcιδελεcτθσcθειθσπροπριαεμολεστιαεθσθανηισqθοδσιποστθλαντπερτιναcιαΛαθδεμεqθιδεματηισΔθοπορροτολλιτπλατονεμαναλιιπαθλοcονστιτθαμqθοανΠρορεπθδιαρεcονσεqθθντθρεξνεcεθθνθμσολεαττεvελσθασcασελεγιμθσΝεεvερτιτθραππελλαντθρvισθτναμνοστερμολλισvολθπταριαΑγαμδολορεφφιcιαντθραννεcΑφφερτσιγνιφερθμqθεπριεισθασλθδθσδεσερθισσειδπερΠροβοπονδερθμετvιξετσιτστετφαcερρεφερρεντθρΕιαεqθετολλιτπροπριαεμεαvιξεθσθασδισπθτανδοΛαβορεφαcετεvολθπτατθμαδcθμηαβεοvιρισδολορετμελΑγαμλιβερβλανδιτηασαδvιμcθαεqθειντερεσσετVισεξvενιαμομνεσqθεινvενιρεCασεcονσετετθρvισατεvερτιφορενσιβθσσθσcιπιαντθρεξηισΕτπερτολλιτεριπθιτσαπιεντεμvελνεqθαεqθεποστθλανταεqθεηονεστατισεοσανΣονετεριπθιτεθvισλεγερεαδολεσcενσετναμCιβοαβηορρεαντινμεαναμηαβεοvιταειναδηισμαλισριδενσcορρθμπιτΝεcαδπριμισμενανδριμαγναqθαεστιοεξπλιcαριεθθσθνεqθοcομμοδοαετερνοαργθμεντθμΣθμοvιτθπερατοριβθσεστθτΜειτεvενιαμσεμπερΠαθλοαλβθcιθσvισατCθεαμφερριδοcτθσοφφενδιτεστατνατθμμθτατΕαμελτριτανιελαβοραρετλθδθσσcριπτορεμπερετΣεαειανιμαλcοντεντιονεσομιτταμλθcιλιθσπερνεΑπεριριπερσιθσαλτερθμθσθνονεcαπειριανοπορτερενεΑδμειμεισφαcερπθταντvιμcασεμοvεττραcτατοσεξΑππετερεμανδαμθσνοηιστιμεαμqθαεqθεδελενιτετεστvιρτθτετεμποριβθσδθοτεΜοδοινιμιcθσεισεαεοσιναφφερττεμπορcομμοδοΕνιμqθοτειρμοδcθπεραδολεσcενσπηιλοσοπηιαvιξεαΘτιναμνοστρθδvιστεπροειφαλλιλατινερεφορμιδανσΕθμαθδιαμεξπετενδισλιβεραvισσεεαεξοcθρρερετπροδεσσετσιτΝαμεξcοντεντιονεσδετερρθισσετvιστεδθισαθδιρεΕξμαλορθμcονσεqθατμνεσαρcηθμσεδΑδπροομνεσqθεcονστιτθαμλιβεραvισσεεξνθλλαμδοcτθσινδοcτθμεαμΕιηασμολλισομιτταμνεcδιcθντλθcιλιθσανταντασποσσιμιθvαρετεοσετΕιεραντμαιεστατισνεcεθμμοδθσαλιqθανδοεαVιμεθισμοδτορqθατοσδεσερθισσεειεαπροπονδερθμπερφεcτοQθοvιρισcαθσαετιβιqθετεσεδδιcοεσσελαορεετνοΑσσθμλθδθσβλανδιτεοσεξΦαcιλισοφφενδιτεαμεξατσιτπορροφαcιλισΗασπερφεcτοσεντεντιαεαccομμοδαρενοΗαβεοαλβθcιθσcονcλθσιονεμqθεατεοσμελαττατιονcονσετετθρjθστοαδιπισcιτεεοσΔιcαμρατιονιβθσσιγνιφερθμqθεεθμνομεισολεταδμοδθμνοφαcερταcιματεσεξπετενδισηισαδΗισcθπρομπτατορqθατοσvιξαδμελιορεπερcιπιτΕqθιδεμταcιματεσεθριπιδισιδπερμελατιλλθμεqθιδεμαccθμσανΕιαπεριαμινcορρθπτενεcΕιδθοδελιcαταρεφορμιδανσσολθμεθισμοδεθπερΔθορεβθμαλιqθιπδενιqθεθτcθqθιεσσεελιτδεσερθντΑμετσcριπταπαρτιενδονεvιμνοθσθιλλθμπορροπθτεντιδλαβορεσαλιενθμπροΑφφερτατομορθμcορρθμπιτναμαδαλιqθαμινερμισμεντιτθμεαμ' - || 'ЛоремипсумдолорситаметехяуилибердоцендицоррумпитЕраталиенумносеаЕхтотатациматесирацундиаеумехерцицонсецтетуеридцумПробоатяуиделицатиссимиехяуосумосусципиантурхасехПродебитисмандамусеунеприиллудсонетрегионеТееамлегеремандамусПереталиатинцидунтНецибодицамлаудемусуанприталенуллаДолоресвертеремсплендидецумеуадмелопортересигниферумяуеАнвитаеорнатусопортеатеосадвисвениаминермисделенитиВихсуавитатенеглегентуртееумелталеиллуммалуиссетЕросмелиорехисеуПутентпосидониумтенецДицантделенитменандриутмеаадяуолатинеаргументумнемеиалтерумерудитицомплецтитурЦибототавидитеумцуменандрилаборамусеунамадагамграецохисЕхеумассумяуаестиоатеосцибомоллисТемелнолуиссеинтеллегатделицатиссимиНолаудемцонституамаппеллантурцумнецяуотграеценеЕпицуреипхаедруместехнулладебетеумутЕтхисенимаугуемуциуссцрипсеритвелидЕтвимунумволуптариасеабрутеинтеллегамцуМеатеелеифендмнесарчумяуисверотемпорибусусуанидперуллуминтеллегатХомероевертитурпосидониумиусетеимелвероаеяуеФацерпосситпробатусеинецСиттецонгуецонцептамседфацермоллислабитуринХабеосусципиантуридхасцасемаиоруминцорруптеехеосеосомнисвивендумаппеллантуранЕхмеаплацератплатонемцонцлусионемяуеПосситперицулаеамеаетдуофацерволуптариаяуаерендумДесеруиссесцриптореминяуиеивимпромптафеугиатхисеуетиамимпедитНовисопортеатволуптарианеглегентурдицампоссимеаяуиусуидаппетереинцидеринтцонцлудатуряуеДицоцорпорамеицуПервениамсаперетнеяуиеадолорессапиентемалияуандоансимуларгументумперВереарнонуместоряуатосмелетвисрецтеяуемолестиаецонсеяуунтуреиеоснеоффендитволуптуаратионибусСеаноессесаперетреформидансвимяуидамратионибусвитуператаеисединвидевидитДесерунтрепудиаределицатиссимицувиханперсиусерипуитцотидиеяуеприЯуимагнаиуваретфацилисцуинмоветириуреатоморумеамВисимпетусрецтеяуеевертитуреабрутериденсдолореснамнеНееффициантурделицатиссимияуоМеиеааеяуепосситнемореНовимяуаслудусаццусатаеиеумвертеремлуцилиусперсеяуерисНевелунумвероелояуентиамехпроеррорцонститутоантиопамеррорибусяуитеЕосепицуриоцурреретулламцорперноЕтомниуминструцтиорнамСитинутинамевертинихилдесеруиссееиперСедтеверосимулдигниссимпетентиумвулпутатенамеуНостроалияуамеуцумАлиипондерумхонестатисеапроутвихевертииндоцтумсапиентеметсеаодиоцаусаеПротеимпердиетсигниферумяуеЕамеиаццумсаномиттантурДуиснобиспертинахнецанцуцумалиенумпатриояуеадолесценсдебетнумяуамехприПаулоаперирилаореетяуиинмелнофастидиипетентиумЗриланциллаесадипсцингин'; - - -- while (1=1) do - -- begin - p_beg = cast( 1 + rand() * ( char_length(lorem) - 63 ) as smallint); - n_for = 63; -- cast( 8 + rand() * (63-8) as smallint); - update rsa set text_unencrypted = substring(:lorem from :p_beg for :n_for); - - rdb$set_context('USER_SESSION', 'SOURCE_TEXT', (select text_unencrypted from rsa rows 1)); - - update rsa set k_prv = rsa_private(256); - update rsa set k_pub = rsa_public(k_prv); - -- update rsa set text_decrypted = rsa_decrypt(cast(text_encrypted as varbinary(32760)) key k_prv ); - - ------------------------------------------------- - -- RSA_SIGN_HASH ( KEY [HASH ] [SALT_LENGTH ] ) - -- RSA_VERIFY_HASH ( SIGNATURE KEY [HASH ] [SALT_LENGTH ] ) - -- RSA_ENCRYPT ( KEY [LPARAM ] [HASH ]) - -- ::= { MD5 | SHA1 | SHA256 | SHA512 } ; Default is SHA256. - - update rsa set text_rsa_sign = rsa_sign_hash( crypt_hash(text_unencrypted using md5) key k_prv hash md5) returning octet_length(text_rsa_sign) into rsa_sign_len_mg5; - update rsa set text_rsa_vrfy = rsa_verify_hash( crypt_hash(text_unencrypted using md5) signature text_rsa_sign key k_pub hash md5) returning text_rsa_vrfy into rsa_vrfy_md5; - - update rsa set text_rsa_sign = rsa_sign_hash( crypt_hash(text_unencrypted using sha1) key k_prv hash sha1) returning octet_length(text_rsa_sign) into rsa_sign_len_sha1; - update rsa set text_rsa_vrfy = rsa_verify_hash( crypt_hash(text_unencrypted using sha1) signature text_rsa_sign key k_pub hash sha1) returning text_rsa_vrfy into rsa_vrfy_sha1; - - update rsa set text_rsa_sign = rsa_sign_hash( crypt_hash(text_unencrypted using sha256) key k_prv hash sha256) returning octet_length(text_rsa_sign) into rsa_sign_len_sha256; - update rsa set text_rsa_vrfy = rsa_verify_hash( crypt_hash(text_unencrypted using sha256) signature text_rsa_sign key k_pub hash sha256) returning text_rsa_vrfy into rsa_vrfy_sha256; - - update rsa set text_rsa_sign = rsa_sign_hash( crypt_hash(text_unencrypted using sha512) key k_prv hash sha512) returning octet_length(text_rsa_sign) into rsa_sign_len_sha512; - update rsa set text_rsa_vrfy = rsa_verify_hash( crypt_hash(text_unencrypted using sha512) signature text_rsa_sign key k_pub hash sha512) returning text_rsa_vrfy into rsa_vrfy_sha512; - - --################################################# - - update rsa set text_encrypted = rsa_encrypt(text_unencrypted key k_pub hash md5) returning octet_length(text_encrypted) into encr_octet_len_md5; - update rsa set text_decrypted = rsa_decrypt(text_encrypted key k_prv hash md5) returning iif(text_unencrypted = text_decrypted, 'OK.','BAD') into decryption_result_md5; - ------------------------------------------------- - update rsa set text_encrypted = rsa_encrypt(text_unencrypted key k_pub hash sha1) returning octet_length(text_encrypted) into encr_octet_len_sha1; - update rsa set text_decrypted = rsa_decrypt(text_encrypted key k_prv hash sha1) returning iif(text_unencrypted = text_decrypted, 'OK.','BAD') into decryption_result_sha1; - ------------------------------------------------- - update rsa set text_encrypted = rsa_encrypt(text_unencrypted key k_pub hash sha256) returning octet_length(text_encrypted) into encr_octet_len_sha256; - update rsa set text_decrypted = rsa_decrypt(text_encrypted key k_prv hash sha256) returning iif(text_unencrypted = text_decrypted, 'OK.','BAD') into decryption_result_sha256; - ------------------------------------------------- - update rsa set text_encrypted = rsa_encrypt(text_unencrypted key k_pub hash sha512) returning octet_length(text_encrypted) into encr_octet_len_sha512; - update rsa set text_decrypted = rsa_decrypt(text_encrypted key k_prv hash sha512) returning iif(text_unencrypted = text_decrypted, 'OK.','BAD') into decryption_result_sha512; - ------------------------------------------------- - - rdb$set_context('USER_SESSION', 'SOURCE_TEXT', null); - - -- end - suspend; - - end - ^ - execute block returns(problem_text varchar(512) character set utf8, problem_octets_len smallint) as - begin - for - select problem_text, octet_length(problem_text) - from ( - select rdb$get_context('USER_SESSION', 'SOURCE_TEXT') as problem_text - from rdb$database - ) - where problem_text is not null - into problem_text, problem_octets_len - do - suspend; - end - ^ - set term ;^ - -""" - -act_2 = isql_act('db_2', test_script_2, substitutions=substitutions_2) - -expected_stdout_2 = """ - RSA_SIGN_LEN_MG5 256 - RSA_VRFY_MD5 - - RSA_SIGN_LEN_SHA1 256 - RSA_VRFY_SHA1 - - RSA_SIGN_LEN_SHA256 256 - RSA_VRFY_SHA256 - - RSA_SIGN_LEN_SHA512 256 - RSA_VRFY_SHA512 - - ENCR_OCTET_LEN_MD5 256 - DECRYPTION_RESULT_MD5 OK. - - ENCR_OCTET_LEN_SHA1 256 - DECRYPTION_RESULT_SHA1 OK. - - ENCR_OCTET_LEN_SHA256 256 - DECRYPTION_RESULT_SHA256 OK. - - ENCR_OCTET_LEN_SHA512 256 - DECRYPTION_RESULT_SHA512 OK. -""" - -@pytest.mark.version('>=5.0') -def test_2(act_2: Action): - act_2.expected_stdout = expected_stdout_2 - act_2.execute() - assert act_2.clean_stdout == act_2.clean_expected_stdout - +@pytest.mark.version('>=4.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/encryption/test_rsa_family_pkcs_1_5.py b/tests/functional/intfunc/encryption/test_rsa_family_pkcs_1_5.py index c97ead0e..e6b27a98 100644 --- a/tests/functional/intfunc/encryption/test_rsa_family_pkcs_1_5.py +++ b/tests/functional/intfunc/encryption/test_rsa_family_pkcs_1_5.py @@ -1,29 +1,19 @@ #coding:utf-8 -# -# id: functional.intfunc.encryption.rsa_family_pkcs_1_5 -# title: RSA-family function, attitional test: verify ability to use PKCS_1_5 keyword -# decription: -# See ticket: https://github.com/FirebirdSQL/firebird/issues/6929 -# Checked on 5.0.0.169. -# -# tracker_id: -# min_versions: ['5.0'] -# versions: 5.0 -# qmid: None + +""" +ID: intfunc.encryption.rsa-family-pkcs15 +ISSUE: 6929 +TITLE: RSA-family function, attitional test: verify ability to use PKCS_1_5 keyword +DESCRIPTION: +FBTEST: functional.intfunc.encryption.rsa_family_pkcs_1_5 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 5.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; recreate table rsa( text_unencrypted varchar(256) @@ -62,7 +52,7 @@ test_script_1 = """ begin lorem = 'ΛορεμιπσθμδολορσιταμετvισιδνοvθμαλτερθμcομπλεcτιτθρτεναμηομεροβλανδιτμελανqθοδμοδοδολορεμΑτσεδγραεcιδελεcτθσcθειθσπροπριαεμολεστιαεθσθανηισqθοδσιποστθλαντπερτιναcιαΛαθδεμεqθιδεματηισΔθοπορροτολλιτπλατονεμαναλιιπαθλοcονστιτθαμqθοανΠρορεπθδιαρεcονσεqθθντθρεξνεcεθθνθμσολεαττεvελσθασcασελεγιμθσΝεεvερτιτθραππελλαντθρvισθτναμνοστερμολλισvολθπταριαΑγαμδολορεφφιcιαντθραννεcΑφφερτσιγνιφερθμqθεπριεισθασλθδθσδεσερθισσειδπερΠροβοπονδερθμετvιξετσιτστετφαcερρεφερρεντθρΕιαεqθετολλιτπροπριαεμεαvιξεθσθασδισπθτανδοΛαβορεφαcετεvολθπτατθμαδcθμηαβεοvιρισδολορετμελΑγαμλιβερβλανδιτηασαδvιμcθαεqθειντερεσσετVισεξvενιαμομνεσqθεινvενιρεCασεcονσετετθρvισατεvερτιφορενσιβθσσθσcιπιαντθρεξηισΕτπερτολλιτεριπθιτσαπιεντεμvελνεqθαεqθεποστθλανταεqθεηονεστατισεοσανΣονετεριπθιτεθvισλεγερεαδολεσcενσετναμCιβοαβηορρεαντινμεαναμηαβεοvιταειναδηισμαλισριδενσcορρθμπιτΝεcαδπριμισμενανδριμαγναqθαεστιοεξπλιcαριεθθσθνεqθοcομμοδοαετερνοαργθμεντθμΣθμοvιτθπερατοριβθσεστθτΜειτεvενιαμσεμπερΠαθλοαλβθcιθσvισατCθεαμφερριδοcτθσοφφενδιτεστατνατθμμθτατΕαμελτριτανιελαβοραρετλθδθσσcριπτορεμπερετΣεαειανιμαλcοντεντιονεσομιτταμλθcιλιθσπερνεΑπεριριπερσιθσαλτερθμθσθνονεcαπειριανοπορτερενεΑδμειμεισφαcερπθταντvιμcασεμοvεττραcτατοσεξΑππετερεμανδαμθσνοηιστιμεαμqθαεqθεδελενιτετεστvιρτθτετεμποριβθσδθοτεΜοδοινιμιcθσεισεαεοσιναφφερττεμπορcομμοδοΕνιμqθοτειρμοδcθπεραδολεσcενσπηιλοσοπηιαvιξεαΘτιναμνοστρθδvιστεπροειφαλλιλατινερεφορμιδανσΕθμαθδιαμεξπετενδισλιβεραvισσεεαεξοcθρρερετπροδεσσετσιτΝαμεξcοντεντιονεσδετερρθισσετvιστεδθισαθδιρεΕξμαλορθμcονσεqθατμνεσαρcηθμσεδΑδπροομνεσqθεcονστιτθαμλιβεραvισσεεξνθλλαμδοcτθσινδοcτθμεαμΕιηασμολλισομιτταμνεcδιcθντλθcιλιθσανταντασποσσιμιθvαρετεοσετΕιεραντμαιεστατισνεcεθμμοδθσαλιqθανδοεαVιμεθισμοδτορqθατοσδεσερθισσεειεαπροπονδερθμπερφεcτοQθοvιρισcαθσαετιβιqθετεσεδδιcοεσσελαορεετνοΑσσθμλθδθσβλανδιτεοσεξΦαcιλισοφφενδιτεαμεξατσιτπορροφαcιλισΗασπερφεcτοσεντεντιαεαccομμοδαρενοΗαβεοαλβθcιθσcονcλθσιονεμqθεατεοσμελαττατιονcονσετετθρjθστοαδιπισcιτεεοσΔιcαμρατιονιβθσσιγνιφερθμqθεεθμνομεισολεταδμοδθμνοφαcερταcιματεσεξπετενδισηισαδΗισcθπρομπτατορqθατοσvιξαδμελιορεπερcιπιτΕqθιδεμταcιματεσεθριπιδισιδπερμελατιλλθμεqθιδεμαccθμσανΕιαπεριαμινcορρθπτενεcΕιδθοδελιcαταρεφορμιδανσσολθμεθισμοδεθπερΔθορεβθμαλιqθιπδενιqθεθτcθqθιεσσεελιτδεσερθντΑμετσcριπταπαρτιενδονεvιμνοθσθιλλθμπορροπθτεντιδλαβορεσαλιενθμπροΑφφερτατομορθμcορρθμπιτναμαδαλιqθαμινερμισμεντιτθμεαμ' || 'ЛоремипсумдолорситаметехяуилибердоцендицоррумпитЕраталиенумносеаЕхтотатациматесирацундиаеумехерцицонсецтетуеридцумПробоатяуиделицатиссимиехяуосумосусципиантурхасехПродебитисмандамусеунеприиллудсонетрегионеТееамлегеремандамусПереталиатинцидунтНецибодицамлаудемусуанприталенуллаДолоресвертеремсплендидецумеуадмелопортересигниферумяуеАнвитаеорнатусопортеатеосадвисвениаминермисделенитиВихсуавитатенеглегентуртееумелталеиллуммалуиссетЕросмелиорехисеуПутентпосидониумтенецДицантделенитменандриутмеаадяуолатинеаргументумнемеиалтерумерудитицомплецтитурЦибототавидитеумцуменандрилаборамусеунамадагамграецохисЕхеумассумяуаестиоатеосцибомоллисТемелнолуиссеинтеллегатделицатиссимиНолаудемцонституамаппеллантурцумнецяуотграеценеЕпицуреипхаедруместехнулладебетеумутЕтхисенимаугуемуциуссцрипсеритвелидЕтвимунумволуптариасеабрутеинтеллегамцуМеатеелеифендмнесарчумяуисверотемпорибусусуанидперуллуминтеллегатХомероевертитурпосидониумиусетеимелвероаеяуеФацерпосситпробатусеинецСиттецонгуецонцептамседфацермоллислабитуринХабеосусципиантуридхасцасемаиоруминцорруптеехеосеосомнисвивендумаппеллантуранЕхмеаплацератплатонемцонцлусионемяуеПосситперицулаеамеаетдуофацерволуптариаяуаерендумДесеруиссесцриптореминяуиеивимпромптафеугиатхисеуетиамимпедитНовисопортеатволуптарианеглегентурдицампоссимеаяуиусуидаппетереинцидеринтцонцлудатуряуеДицоцорпорамеицуПервениамсаперетнеяуиеадолорессапиентемалияуандоансимуларгументумперВереарнонуместоряуатосмелетвисрецтеяуемолестиаецонсеяуунтуреиеоснеоффендитволуптуаратионибусСеаноессесаперетреформидансвимяуидамратионибусвитуператаеисединвидевидитДесерунтрепудиаределицатиссимицувиханперсиусерипуитцотидиеяуеприЯуимагнаиуваретфацилисцуинмоветириуреатоморумеамВисимпетусрецтеяуеевертитуреабрутериденсдолореснамнеНееффициантурделицатиссимияуоМеиеааеяуепосситнемореНовимяуаслудусаццусатаеиеумвертеремлуцилиусперсеяуерисНевелунумвероелояуентиамехпроеррорцонститутоантиопамеррорибусяуитеЕосепицуриоцурреретулламцорперноЕтомниуминструцтиорнамСитинутинамевертинихилдесеруиссееиперСедтеверосимулдигниссимпетентиумвулпутатенамеуНостроалияуамеуцумАлиипондерумхонестатисеапроутвихевертииндоцтумсапиентеметсеаодиоцаусаеПротеимпердиетсигниферумяуеЕамеиаццумсаномиттантурДуиснобиспертинахнецанцуцумалиенумпатриояуеадолесценсдебетнумяуамехприПаулоаперирилаореетяуиинмелнофастидиипетентиумЗриланциллаесадипсцингин'; - + p_beg = cast( 1 + rand() * ( char_length(lorem) - 63 ) as smallint); n_for = 63; -- cast( 8 + rand() * (63-8) as smallint); update rsa set text_unencrypted = substring(:lorem from :p_beg for :n_for); @@ -137,9 +127,9 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ RSA_SIGN_LEN_MG5 256 RSA_VRFY_MD5 @@ -166,8 +156,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=5.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/list/test_01.py b/tests/functional/intfunc/list/test_01.py index 94d35355..3696ac85 100644 --- a/tests/functional/intfunc/list/test_01.py +++ b/tests/functional/intfunc/list/test_01.py @@ -1,22 +1,18 @@ #coding:utf-8 -# -# id: functional.intfunc.list.01 -# title: List with default options -# decription: -# tracker_id: CORE-964 -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.list.list_01 + +""" +ID: intfunc.list-01 +ISSUE: 1367 +TITLE: List with default options +DESCRIPTION: +JIRA: CORE-964 +FBTEST: functional.intfunc.list.01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None - -substitutions_1 = [('list_blob_id.*', '')] - -init_script_1 = """ +init_script = """ recreate table test( rel_name char(31) character set unicode_fss ,idx_name char(31) character set unicode_fss @@ -82,9 +78,9 @@ init_script_1 = """ commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ +test_script = """ set list on; set blob all; select x.rel_name, list(trim(x.idx_name)) "list_blob_id" @@ -93,137 +89,136 @@ test_script_1 = """ group by 1; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('list_blob_id.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ REL_NAME RDB$AUTH_MAPPING list_blob_id 0:1 RDB$INDEX_52 - + REL_NAME RDB$BACKUP_HISTORY list_blob_id 0:2 RDB$INDEX_44 - + REL_NAME RDB$CHARACTER_SETS list_blob_id 0:3 RDB$INDEX_19,RDB$INDEX_25 - + REL_NAME RDB$CHECK_CONSTRAINTS list_blob_id 0:4 RDB$INDEX_14,RDB$INDEX_40 - + REL_NAME RDB$COLLATIONS list_blob_id 0:5 RDB$INDEX_20,RDB$INDEX_26 - + REL_NAME RDB$DEPENDENCIES list_blob_id 0:6 RDB$INDEX_27,RDB$INDEX_28 - + REL_NAME RDB$EXCEPTIONS list_blob_id 0:7 RDB$INDEX_23,RDB$INDEX_24 - + REL_NAME RDB$FIELDS list_blob_id 0:8 RDB$INDEX_2 - + REL_NAME RDB$FIELD_DIMENSIONS list_blob_id 0:9 RDB$INDEX_36 - + REL_NAME RDB$FILTERS list_blob_id 0:a RDB$INDEX_17,RDB$INDEX_45 - + REL_NAME RDB$FORMATS list_blob_id 0:b RDB$INDEX_16 - + REL_NAME RDB$FUNCTIONS list_blob_id 0:c RDB$INDEX_9 - + REL_NAME RDB$FUNCTION_ARGUMENTS list_blob_id 0:d RDB$INDEX_10,RDB$INDEX_49,RDB$INDEX_51 - + REL_NAME RDB$GENERATORS list_blob_id 0:e RDB$INDEX_11,RDB$INDEX_46 - + REL_NAME RDB$INDEX_SEGMENTS list_blob_id 0:f RDB$INDEX_6 - + REL_NAME RDB$INDICES list_blob_id 0:10 RDB$INDEX_5,RDB$INDEX_31,RDB$INDEX_41 - + REL_NAME RDB$PACKAGES list_blob_id 0:11 RDB$INDEX_47 - + REL_NAME RDB$PROCEDURES list_blob_id 0:12 RDB$INDEX_21,RDB$INDEX_22 - + REL_NAME RDB$PROCEDURE_PARAMETERS list_blob_id 0:13 RDB$INDEX_18,RDB$INDEX_48,RDB$INDEX_50 - + REL_NAME RDB$REF_CONSTRAINTS list_blob_id 0:14 RDB$INDEX_13 - + REL_NAME RDB$RELATIONS list_blob_id 0:15 RDB$INDEX_0,RDB$INDEX_1 - + REL_NAME RDB$RELATION_CONSTRAINTS list_blob_id 0:16 RDB$INDEX_12,RDB$INDEX_42,RDB$INDEX_43 - + REL_NAME RDB$RELATION_FIELDS list_blob_id 0:17 RDB$INDEX_3,RDB$INDEX_4,RDB$INDEX_15 - + REL_NAME RDB$ROLES list_blob_id 0:18 RDB$INDEX_39 - + REL_NAME RDB$SECURITY_CLASSES list_blob_id 0:19 RDB$INDEX_7 - + REL_NAME RDB$TRANSACTIONS list_blob_id 0:1a RDB$INDEX_32 - + REL_NAME RDB$TRIGGERS list_blob_id 0:1b RDB$INDEX_8,RDB$INDEX_38 - + REL_NAME RDB$TRIGGER_MESSAGES list_blob_id 0:1c RDB$INDEX_35 - + REL_NAME RDB$TYPES list_blob_id 0:1d RDB$INDEX_37 - + REL_NAME RDB$USER_PRIVILEGES list_blob_id 0:1e RDB$INDEX_29,RDB$INDEX_30 - + REL_NAME RDB$VIEW_RELATIONS list_blob_id 0:1f RDB$INDEX_33,RDB$INDEX_34 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/list/test_02.py b/tests/functional/intfunc/list/test_02.py index a05b2325..157eb736 100644 --- a/tests/functional/intfunc/list/test_02.py +++ b/tests/functional/intfunc/list/test_02.py @@ -1,22 +1,18 @@ #coding:utf-8 -# -# id: functional.intfunc.list.02 -# title: List function with delimiter specified -# decription: -# tracker_id: CORE-964 -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.list.list_02 + +""" +ID: intfunc.list-02 +ISSUE: 1367 +TITLE: List function with delimiter specified +DESCRIPTION: +JIRA: CORE-964 +FBTEST: functional.intfunc.list.02 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None - -substitutions_1 = [('list_blob_id.*', '')] - -init_script_1 = """ +init_script = """ recreate table test( rel_name char(31) character set unicode_fss ,idx_name char(31) character set unicode_fss @@ -82,9 +78,9 @@ init_script_1 = """ commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ +test_script = """ set list on; set blob all; select x.rel_name, list(trim(x.idx_name), ':') "list_blob_id" @@ -93,137 +89,136 @@ test_script_1 = """ group by 1; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('list_blob_id.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ REL_NAME RDB$AUTH_MAPPING list_blob_id 0:1 RDB$INDEX_52 - + REL_NAME RDB$BACKUP_HISTORY list_blob_id 0:2 RDB$INDEX_44 - + REL_NAME RDB$CHARACTER_SETS list_blob_id 0:3 RDB$INDEX_19:RDB$INDEX_25 - + REL_NAME RDB$CHECK_CONSTRAINTS list_blob_id 0:4 RDB$INDEX_14:RDB$INDEX_40 - + REL_NAME RDB$COLLATIONS list_blob_id 0:5 RDB$INDEX_20:RDB$INDEX_26 - + REL_NAME RDB$DEPENDENCIES list_blob_id 0:6 RDB$INDEX_27:RDB$INDEX_28 - + REL_NAME RDB$EXCEPTIONS list_blob_id 0:7 RDB$INDEX_23:RDB$INDEX_24 - + REL_NAME RDB$FIELDS list_blob_id 0:8 RDB$INDEX_2 - + REL_NAME RDB$FIELD_DIMENSIONS list_blob_id 0:9 RDB$INDEX_36 - + REL_NAME RDB$FILTERS list_blob_id 0:a RDB$INDEX_17:RDB$INDEX_45 - + REL_NAME RDB$FORMATS list_blob_id 0:b RDB$INDEX_16 - + REL_NAME RDB$FUNCTIONS list_blob_id 0:c RDB$INDEX_9 - + REL_NAME RDB$FUNCTION_ARGUMENTS list_blob_id 0:d RDB$INDEX_10:RDB$INDEX_49:RDB$INDEX_51 - + REL_NAME RDB$GENERATORS list_blob_id 0:e RDB$INDEX_11:RDB$INDEX_46 - + REL_NAME RDB$INDEX_SEGMENTS list_blob_id 0:f RDB$INDEX_6 - + REL_NAME RDB$INDICES list_blob_id 0:10 RDB$INDEX_5:RDB$INDEX_31:RDB$INDEX_41 - + REL_NAME RDB$PACKAGES list_blob_id 0:11 RDB$INDEX_47 - + REL_NAME RDB$PROCEDURES list_blob_id 0:12 RDB$INDEX_21:RDB$INDEX_22 - + REL_NAME RDB$PROCEDURE_PARAMETERS list_blob_id 0:13 RDB$INDEX_18:RDB$INDEX_48:RDB$INDEX_50 - + REL_NAME RDB$REF_CONSTRAINTS list_blob_id 0:14 RDB$INDEX_13 - + REL_NAME RDB$RELATIONS list_blob_id 0:15 RDB$INDEX_0:RDB$INDEX_1 - + REL_NAME RDB$RELATION_CONSTRAINTS list_blob_id 0:16 RDB$INDEX_12:RDB$INDEX_42:RDB$INDEX_43 - + REL_NAME RDB$RELATION_FIELDS list_blob_id 0:17 RDB$INDEX_3:RDB$INDEX_4:RDB$INDEX_15 - + REL_NAME RDB$ROLES list_blob_id 0:18 RDB$INDEX_39 - + REL_NAME RDB$SECURITY_CLASSES list_blob_id 0:19 RDB$INDEX_7 - + REL_NAME RDB$TRANSACTIONS list_blob_id 0:1a RDB$INDEX_32 - + REL_NAME RDB$TRIGGERS list_blob_id 0:1b RDB$INDEX_8:RDB$INDEX_38 - + REL_NAME RDB$TRIGGER_MESSAGES list_blob_id 0:1c RDB$INDEX_35 - + REL_NAME RDB$TYPES list_blob_id 0:1d RDB$INDEX_37 - + REL_NAME RDB$USER_PRIVILEGES list_blob_id 0:1e RDB$INDEX_29:RDB$INDEX_30 - + REL_NAME RDB$VIEW_RELATIONS list_blob_id 0:1f RDB$INDEX_33:RDB$INDEX_34 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/list/test_03.py b/tests/functional/intfunc/list/test_03.py index 16f32b45..d126501e 100644 --- a/tests/functional/intfunc/list/test_03.py +++ b/tests/functional/intfunc/list/test_03.py @@ -1,41 +1,29 @@ #coding:utf-8 -# -# id: functional.intfunc.list.03 -# title: List function with distinct option -# decription: -# tracker_id: CORE-964 -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.list.list_03 + +""" +ID: intfunc.list-03 +ISSUE: 1367 +TITLE: List function with distinct option +DESCRIPTION: +JIRA: CORE-964 +FBTEST: functional.intfunc.list.03 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# c = db_conn.cursor() -# c.execute("SELECT RDB$SYSTEM_FLAG, LIST(DISTINCT TRIM(RDB$OWNER_NAME)) FROM RDB$RELATIONS WHERE RDB$SYSTEM_FLAG=1 GROUP BY 1;") -# -# printData(c) -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """RDB$SYSTEM_FLAG LIST ---------------- ---- -1 SYSDBA""" - -@pytest.mark.version('>=2.1') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") +test_script = """ +SELECT RDB$SYSTEM_FLAG, LIST(DISTINCT TRIM(RDB$OWNER_NAME)) FROM RDB$RELATIONS WHERE RDB$SYSTEM_FLAG=1 GROUP BY 1; +""" +act = python_act('db') +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: + with con.cursor() as c: + c.execute("SELECT RDB$SYSTEM_FLAG, LIST(DISTINCT TRIM(RDB$OWNER_NAME)) FROM RDB$RELATIONS WHERE RDB$SYSTEM_FLAG=1 GROUP BY 1") + result = c.fetchall() + assert result == [(1, 'SYSDBA')] diff --git a/tests/functional/intfunc/math/test_abs_01.py b/tests/functional/intfunc/math/test_abs_01.py index dc375a5f..948478b0 100644 --- a/tests/functional/intfunc/math/test_abs_01.py +++ b/tests/functional/intfunc/math/test_abs_01.py @@ -1,36 +1,27 @@ #coding:utf-8 -# -# id: functional.intfunc.math.abs_01 -# title: New Built-in Functions, Firebird 2.1 : ABS( ) -# decription: test of ABS( ) function Returns the absolute value of a number. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.abs_01 + +""" +ID: intfunc.math.abs +TITLE: ABS( ) +DESCRIPTION: Test of ABS( ) function returns the absolute value of a number. +FBTEST: functional.intfunc.math.abs_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select ABS( -1 ) from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select ABS( -1 ) from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ ABS +expected_stdout = """ +ABS ===================== -1""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +1 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_acos_01.py b/tests/functional/intfunc/math/test_acos_01.py index 6fdcab4b..21281666 100644 --- a/tests/functional/intfunc/math/test_acos_01.py +++ b/tests/functional/intfunc/math/test_acos_01.py @@ -1,40 +1,29 @@ #coding:utf-8 -# -# id: functional.intfunc.math.acos_01 -# title: New Built-in Functions, Firebird 2.1 : ACOS( ) -# decription: test of ACOS -# -# Returns the arc cosine of a number. Argument to ACOS must be in the range -1 to 1. Returns a value in the range 0 to PI. er. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.acos_01 + +""" +ID: intfunc.math.acos +TITLE: ACOS( ) +DESCRIPTION: + Returns the arc cosine of a number. Argument to ACOS must be in the range -1 to 1. + Returns a value in the range 0 to PI. er. +FBTEST: functional.intfunc.math.acos_01 +""" import pytest from firebird.qa import db_factory, isql_act, Action -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select cast( ACOS( 1 ) AS DECIMAL(18,15)) from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select cast( ACOS( 1 ) AS DECIMAL(18,15)) from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ +expected_stdout = """ CAST ===================== 0.000000000000000 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_asin_01.py b/tests/functional/intfunc/math/test_asin_01.py index f7b77892..e229101a 100644 --- a/tests/functional/intfunc/math/test_asin_01.py +++ b/tests/functional/intfunc/math/test_asin_01.py @@ -1,38 +1,28 @@ #coding:utf-8 -# -# id: functional.intfunc.math.asin_01 -# title: New Built-in Functions, Firebird 2.1 : ASIN( ) -# decription: test of ASIN -# -# Returns the arc sine of a number. The argument to ASIN must be in the range -1 to 1. It returns a result in the range -PI/2 to PI/2. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.asin_01 + +""" +ID: intfunc.math.asin +TITLE: ASIN( ) +DESCRIPTION: + Returns the arc sine of a number. The argument to ASIN must be in the range -1 to 1. + It returns a result in the range -PI/2 to PI/2. +FBTEST: functional.intfunc.math.asin_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select asin( 1 ) from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select asin( 1 ) from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ ASIN +expected_stdout = """ +ASIN ======================= 1.570796326794897""" -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_atan2_01.py b/tests/functional/intfunc/math/test_atan2_01.py index 69134ba1..6d4e9e3f 100644 --- a/tests/functional/intfunc/math/test_atan2_01.py +++ b/tests/functional/intfunc/math/test_atan2_01.py @@ -1,39 +1,29 @@ #coding:utf-8 -# -# id: functional.intfunc.math.atan2_01 -# title: New Built-in Functions, Firebird 2.1 : ATAN2( , ) -# decription: test of ATAN2 -# -# Returns the arc tangent of the first number / the second number. Returns a value in the range -PI to PI. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.atan2_01 + +""" +ID: intfunc.math.atan2 +TITLE: ATAN2( , ) +DESCRIPTION: + Returns the arc tangent of the first number / the second number. + Returns a value in the range -PI to PI. +FBTEST: functional.intfunc.math.atan2_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select ATAN2( 1, 1) from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select ATAN2( 1, 1) from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ ATAN2 +expected_stdout = """ +ATAN2 ======================= 0.7853981633974483 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_atan_01.py b/tests/functional/intfunc/math/test_atan_01.py index ab157e92..339c2bb7 100644 --- a/tests/functional/intfunc/math/test_atan_01.py +++ b/tests/functional/intfunc/math/test_atan_01.py @@ -1,39 +1,28 @@ #coding:utf-8 -# -# id: functional.intfunc.math.atan_01 -# title: New Built-in Functions, Firebird 2.1 : ATAN( ) -# decription: test of ATAN -# -# Returns the arc tangent of a number. Returns a value in the range -PI/2 to PI/2. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.atan_01 + +""" +ID: intfunc.math.atan +TITLE: ATAN( ) +DESCRIPTION: + Returns the arc tangent of a number. Returns a value in the range -PI/2 to PI/2. +FBTEST: functional.intfunc.math.atan_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select ATAN( 1 ) from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select ATAN( 1 ) from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ ATAN +expected_stdout = """ +ATAN ======================= 0.7853981633974483 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_ceil_01.py b/tests/functional/intfunc/math/test_ceil_01.py index 96e8813a..97611af9 100644 --- a/tests/functional/intfunc/math/test_ceil_01.py +++ b/tests/functional/intfunc/math/test_ceil_01.py @@ -1,33 +1,26 @@ #coding:utf-8 -# -# id: functional.intfunc.math.ceil_01 -# title: New Built-in Functions, Firebird 2.1 : CEIL( ) -# decription: test of CEIL -# Returns a value representing the smallest integer that is greater than or equal to the input argument. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.ceil_01 + +""" +ID: intfunc.math.ceil +TITLE: CEIL( ) +DESCRIPTION: + Returns a value representing the smallest integer that is greater than or equal to the input argument. +FBTEST: functional.intfunc.math.ceil_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select CEIL( 2.1) from rdb$database; +test_script = """select CEIL( 2.1) from rdb$database; select CEIL( -2.1) from rdb$database; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ CEIL +expected_stdout = """ +CEIL ===================== 3 CEIL @@ -38,9 +31,8 @@ expected_stdout_1 = """ CEIL """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_cos_01.py b/tests/functional/intfunc/math/test_cos_01.py index 3b99dcb3..8d9414e9 100644 --- a/tests/functional/intfunc/math/test_cos_01.py +++ b/tests/functional/intfunc/math/test_cos_01.py @@ -1,46 +1,36 @@ #coding:utf-8 -# -# id: functional.intfunc.math.cos_01 -# title: New Built-in Functions, Firebird 2.1 : COS( ) -# decription: test of COS -# Returns the cosine of a number. The angle is specified in radians and returns a value in the range -1 to 1. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.cos_01 + +""" +ID: intfunc.math.cos +TITLE: COS( ) +DESCRIPTION: + Returns the cosine of a number. The angle is specified in radians and returns a value in the range -1 to 1. +FBTEST: functional.intfunc.math.cos_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select COS( 14) from rdb$database; +test_script = """select COS( 14) from rdb$database; select COS( 0) from rdb$database; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ COS - ======================= - 0.1367372182078336 - COS - ======================= - 1.000000000000000 - +act = isql_act('db', test_script) +expected_stdout = """ +COS +======================= +0.1367372182078336 +COS +======================= +1.000000000000000 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_cosh_01.py b/tests/functional/intfunc/math/test_cosh_01.py index a69f28cb..4ea21690 100644 --- a/tests/functional/intfunc/math/test_cosh_01.py +++ b/tests/functional/intfunc/math/test_cosh_01.py @@ -1,33 +1,25 @@ #coding:utf-8 -# -# id: functional.intfunc.math.cosh_01 -# title: New Built-in Functions, Firebird 2.1 : COSH( ) -# decription: test of COSH -# Returns the hyperbolic cosine of a number. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.cosh_01 + +""" +ID: intfunc.math.cosh +TITLE: COSH( ) +DESCRIPTION: Returns the hyperbolic cosine of a number. +FBTEST: functional.intfunc.math.cosh_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select COSH( 1) from rdb$database; +test_script = """select COSH( 1) from rdb$database; select COSH( 0) from rdb$database; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ COSH +expected_stdout = """ + COSH ======================= 1.543080634815244 @@ -35,14 +27,10 @@ expected_stdout_1 = """ COSH COSH ======================= 1.000000000000000 - - - """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_cot_01.py b/tests/functional/intfunc/math/test_cot_01.py index 4dd57e7d..5aedff57 100644 --- a/tests/functional/intfunc/math/test_cot_01.py +++ b/tests/functional/intfunc/math/test_cot_01.py @@ -1,38 +1,27 @@ #coding:utf-8 -# -# id: functional.intfunc.math.cot_01 -# title: test de la fonction cot -# decription: returns 1 / tan(argument) -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.cot_01 + +""" +ID: intfunc.math.cot +TITLE: COT( ) +DESCRIPTION: returns 1 / tan(argument) +FBTEST: functional.intfunc.math.cot_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select cast(COT(1) AS DECIMAL(18,15)) from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select cast(COT(1) AS DECIMAL(18,15)) from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ +expected_stdout = """ CAST ===================== 0.642092615934331 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_exp_01.py b/tests/functional/intfunc/math/test_exp_01.py index c1be8395..9d52609b 100644 --- a/tests/functional/intfunc/math/test_exp_01.py +++ b/tests/functional/intfunc/math/test_exp_01.py @@ -1,37 +1,27 @@ #coding:utf-8 -# -# id: functional.intfunc.math.exp_01 -# title: test for EXP( ) -# decription: Returns the exponential e to the argument. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.exp_01 + +""" +ID: intfunc.math.exp +TITLE: EXP( ) +DESCRIPTION: Returns the exponential e to the argument. +FBTEST: functional.intfunc.math.exp_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select EXP(3) from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ -select EXP( 3) from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ EXP - ======================= -20.08553692318767""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """ +EXP +======================= +20.08553692318767 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_floor_01.py b/tests/functional/intfunc/math/test_floor_01.py index 5b4ac42c..f1b86aed 100644 --- a/tests/functional/intfunc/math/test_floor_01.py +++ b/tests/functional/intfunc/math/test_floor_01.py @@ -1,42 +1,36 @@ #coding:utf-8 -# -# id: functional.intfunc.math.floor_01 -# title: test for FLOOR -# decription: Returns a value representing the largest integer that is less than or equal to the input argument -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.floor_01 + +""" +ID: intfunc.math.floor +TITLE: FLOOR( ) +DESCRIPTION: + Returns a value representing the largest integer that is less than or equal to the input argument +FBTEST: functional.intfunc.math.floor_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select FLOOR(2.1) from rdb$database; +test_script = """select FLOOR(2.1) from rdb$database; select FLOOR(-4.4) from rdb$database;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ FLOOR - ===================== - 2 +expected_stdout = """ +FLOOR +===================== +2 - FLOOR - ===================== --5""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +FLOOR +===================== +-5 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_ln_01.py b/tests/functional/intfunc/math/test_ln_01.py index 7349e68d..0b53bd40 100644 --- a/tests/functional/intfunc/math/test_ln_01.py +++ b/tests/functional/intfunc/math/test_ln_01.py @@ -1,40 +1,27 @@ #coding:utf-8 -# -# id: functional.intfunc.math.ln_01 -# title: test for LN function -# decription: LN( ) -# Returns the natural logarithm of a number. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.ln_01 - -import pytest -from firebird.qa import db_factory, isql_act, Action - -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ - select ln(5) from rdb$database; +""" +ID: intfunc.math.ln +TITLE: LN( ) +DESCRIPTION: Returns the natural logarithm of a number. +FBTEST: functional.intfunc.math.ln_01 """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +import pytest +from firebird.qa import * -expected_stdout_1 = """ LN - ======================= -1.609437912434100""" +db = db_factory() -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +act = isql_act('db', "select ln(5) from rdb$database;") +expected_stdout = """ +LN +======================= +1.609437912434100 +""" + +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_log10_01.py b/tests/functional/intfunc/math/test_log10_01.py index 593ede02..a0504367 100644 --- a/tests/functional/intfunc/math/test_log10_01.py +++ b/tests/functional/intfunc/math/test_log10_01.py @@ -1,38 +1,27 @@ #coding:utf-8 -# -# id: functional.intfunc.math.log10_01 -# title: test for LOG10 function -# decription: LOG10( ) -# -# Returns the logarithm base ten of a number. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.log10_01 + +""" +ID: intfunc.math.log10 +TITLE: LOG10( ) +DESCRIPTION: Returns the logarithm base ten of a number. +FBTEST: functional.intfunc.math.log10_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select log10(6) from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select log10(6) from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ LOG10 - ======================= -0.7781512503836436""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """ +LOG10 +======================= +0.7781512503836436 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_log_01.py b/tests/functional/intfunc/math/test_log_01.py index 51f91f7c..7124b693 100644 --- a/tests/functional/intfunc/math/test_log_01.py +++ b/tests/functional/intfunc/math/test_log_01.py @@ -1,39 +1,27 @@ #coding:utf-8 -# -# id: functional.intfunc.math.log_01 -# title: test for LOG function -# decription: LOG( , ) -# -# returns the logarithm base x of y. -# -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.log_01 + +""" +ID: intfunc.math.log +TITLE: LOG( , ) +DESCRIPTION: Returns the logarithm base x of y. +FBTEST: functional.intfunc.math.log_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select log(6, 10) from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select log(6, 10) from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ LOG - ======================= -1.285097208938469""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """ +LOG +======================= +1.285097208938469 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_maxvalue_01.py b/tests/functional/intfunc/math/test_maxvalue_01.py index 7c1e7bec..4ab52683 100644 --- a/tests/functional/intfunc/math/test_maxvalue_01.py +++ b/tests/functional/intfunc/math/test_maxvalue_01.py @@ -1,39 +1,27 @@ #coding:utf-8 -# -# id: functional.intfunc.math.maxvalue_01 -# title: test for MAXVALUE function -# decription: MAXVALUE( [, ...] ) -# -# Returns the maximum value of a list of values. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.maxvalue_01 -import pytest -from firebird.qa import db_factory, isql_act, Action - -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ select maxvalue(54, 87, 10) from rdb$database; +""" +ID: intfunc.math.maxvalue +TITLE: MAXVALUE( [, ...] ) +DESCRIPTION: Returns the maximum value of a list of values. +FBTEST: functional.intfunc.math.maxvalue_01 """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +import pytest +from firebird.qa import * -expected_stdout_1 = """ MAXVALUE - ============ -87""" +db = db_factory() -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +act = isql_act('db', "select maxvalue(54, 87, 10) from rdb$database;") +expected_stdout = """ +MAXVALUE +============ +87 +""" + +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_minvalue_01.py b/tests/functional/intfunc/math/test_minvalue_01.py index bf0107b1..3757930f 100644 --- a/tests/functional/intfunc/math/test_minvalue_01.py +++ b/tests/functional/intfunc/math/test_minvalue_01.py @@ -1,40 +1,27 @@ #coding:utf-8 -# -# id: functional.intfunc.math.minvalue_01 -# title: test for MINVALUE function -# decription: MINVALUE( [, ... ) -# -# Returns the minimun value of a list of values. -# -# -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.minvalue_01 + +""" +ID: intfunc.math.minvalue +TITLE: MINVALUE( [, ... ) +DESCRIPTION: Returns the minimun value of a list of values. +FBTEST: functional.intfunc.math.minvalue_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select minvalue(9, 7, 10) from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select minvalue(9, 7, 10) from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ MINVALUE - ============ -7""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """ +MINVALUE +============ +7 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_mod_01.py b/tests/functional/intfunc/math/test_mod_01.py index 08023157..afb9d35c 100644 --- a/tests/functional/intfunc/math/test_mod_01.py +++ b/tests/functional/intfunc/math/test_mod_01.py @@ -1,39 +1,27 @@ #coding:utf-8 -# -# id: functional.intfunc.math.mod_01 -# title: test for MOD function -# decription: -# MOD( , ) -# -# Modulo: MOD(X, Y) returns the remainder part of the division of X by Y. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.mod_01 + +""" +ID: intfunc.math.mod +TITLE: MOD( , ) +DESCRIPTION: Modulo: MOD(X, Y) returns the remainder part of the division of X by Y. +FBTEST: functional.intfunc.math.mod_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select MOD(11,10) from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select MOD(11,10) from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ MOD - ============ -1""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """ +MOD +============ +1 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_pi_01.py b/tests/functional/intfunc/math/test_pi_01.py index 5d6eaa85..88b11e30 100644 --- a/tests/functional/intfunc/math/test_pi_01.py +++ b/tests/functional/intfunc/math/test_pi_01.py @@ -1,38 +1,27 @@ #coding:utf-8 -# -# id: functional.intfunc.math.pi_01 -# title: test for PI function -# decription: PI() -# -# Returns the PI constant (3.1459...). -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.pi_01 + +""" +ID: intfunc.math.pi +TITLE: PI() +DESCRIPTION: Returns the PI constant (3.1459...). +FBTEST: functional.intfunc.math.pi_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select PI() from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select PI() from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ PI - ======================= -3.141592653589793""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """ +PI +======================= +3.141592653589793 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_power_01.py b/tests/functional/intfunc/math/test_power_01.py index 96ac5c09..571a3819 100644 --- a/tests/functional/intfunc/math/test_power_01.py +++ b/tests/functional/intfunc/math/test_power_01.py @@ -1,39 +1,27 @@ #coding:utf-8 -# -# id: functional.intfunc.math.power_01 -# title: test for POWER function -# decription: POWER( , ) -# -# POWER(X, Y) returns X to the power of Y. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.power_01 -import pytest -from firebird.qa import db_factory, isql_act, Action - -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ select power(2, 3) from rdb$database; +""" +ID: intfunc.math.power +TITLE: POWER( , ) +DESCRIPTION: POWER(X, Y) returns X to the power of Y. +FBTEST: functional.intfunc.math.power_01 """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +import pytest +from firebird.qa import * -expected_stdout_1 = """ POWER - ======================= -8.000000000000000""" +db = db_factory() -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +act = isql_act('db', "select power(2, 3) from rdb$database;") +expected_stdout = """ +POWER +======================= +8.000000000000000 +""" + +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_rand_01.py b/tests/functional/intfunc/math/test_rand_01.py index 76af6e5e..a6976939 100644 --- a/tests/functional/intfunc/math/test_rand_01.py +++ b/tests/functional/intfunc/math/test_rand_01.py @@ -1,28 +1,18 @@ #coding:utf-8 -# -# id: functional.intfunc.math.rand_01 -# title: test for RAND function -# decription: -# RAND() -# Returns a random number between 0 and 1. -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.intfunc.math.rand_01 + +""" +ID: intfunc.math.rand +TITLE: RAND() +DESCRIPTION: Returns a random number between 0 and 1. +FBTEST: functional.intfunc.math.rand_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """create table test( id char(30) ); +test_script = """create table test( id char(30) ); --on verrifie qu'il y en a pas deux identique insert into test values(CAST(rand() AS VARCHAR(255)) ); @@ -41,9 +31,10 @@ insert into test values(CAST(rand() AS VARCHAR(255)) ); select count(id) from test group by id;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ COUNT +expected_stdout = """ +COUNT ===================== 1 1 @@ -60,8 +51,7 @@ expected_stdout_1 = """ COUNT """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_round_01.py b/tests/functional/intfunc/math/test_round_01.py index 35c795d3..178316d6 100644 --- a/tests/functional/intfunc/math/test_round_01.py +++ b/tests/functional/intfunc/math/test_round_01.py @@ -1,40 +1,27 @@ #coding:utf-8 -# -# id: functional.intfunc.math.round_01 -# title: test for ROUND function -# decription: -# -# ROUND( , ) -# -# Returns a number rounded to the specified scale. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.round_01 + +""" +ID: intfunc.math.round +TITLE: ROUND( , ) +DESCRIPTION: Returns a number rounded to the specified scale. +FBTEST: functional.intfunc.math.round_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select ROUND(5.7778, 3) from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select ROUND(5.7778, 3) from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ ROUND - ===================== -5.7780""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """ +ROUND +===================== +5.7780 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_sign_01.py b/tests/functional/intfunc/math/test_sign_01.py index d72fcc1a..461a9426 100644 --- a/tests/functional/intfunc/math/test_sign_01.py +++ b/tests/functional/intfunc/math/test_sign_01.py @@ -1,51 +1,42 @@ #coding:utf-8 -# -# id: functional.intfunc.math.sign_01 -# title: test for SIGN function -# decription: -# SIGN( ) -# -# Returns 1, 0, or -1 depending on whether the input value is positive, zero or negative, respectively. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.sign_01 + +""" +ID: intfunc.math.sign +TITLE: SIGN( ) +DESCRIPTION: + Returns 1, 0, or -1 depending on whether the input value is positive, zero or negative, respectively. +FBTEST: functional.intfunc.math.sign_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +test_script = """ +select SIGN(-9) from rdb$database; +select SIGN(8) from rdb$database; +select SIGN(0) from rdb$database; +""" -init_script_1 = """""" +act = isql_act('db', test_script) -db_1 = db_factory(sql_dialect=3, init=init_script_1) +expected_stdout = """ +SIGN +======= + -1 -test_script_1 = """ select SIGN(-9) from rdb$database; - select SIGN(8) from rdb$database; -select SIGN(0) from rdb$database;""" +SIGN +======= + 1 -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ SIGN - ======= - -1 - - - SIGN - ======= - 1 - - - SIGN - ======= -0""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +SIGN +======= +0 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_sin_01.py b/tests/functional/intfunc/math/test_sin_01.py index 1fd8bdc4..17b60a4b 100644 --- a/tests/functional/intfunc/math/test_sin_01.py +++ b/tests/functional/intfunc/math/test_sin_01.py @@ -1,43 +1,27 @@ #coding:utf-8 -# -# id: functional.intfunc.math.sin_01 -# title: test for SIN function -# decription: -# -# SIN( ) -# -# -# Returns the sine of an input number that is expressed in radians. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.sin_01 + +""" +ID: intfunc.math.sin +TITLE: SIN( ) +DESCRIPTION: Returns the sine of an input number that is expressed in radians. +FBTEST: functional.intfunc.math.sin_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select CAST(SIN(12) AS DECIMAL(18,15)) from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select CAST(SIN(12) AS DECIMAL(18,15)) from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ +expected_stdout = """ CAST ===================== -0.536572918000435 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_sinh_01.py b/tests/functional/intfunc/math/test_sinh_01.py index 63094dc2..4d967fd1 100644 --- a/tests/functional/intfunc/math/test_sinh_01.py +++ b/tests/functional/intfunc/math/test_sinh_01.py @@ -1,41 +1,27 @@ #coding:utf-8 -# -# id: functional.intfunc.math.sinh_01 -# title: test for SINH function -# decription: -# -# SINH( ) -# -# -# Returns the hyperbolic sine of a number. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.sinh_01 + +""" +ID: intfunc.math.sinh +TITLE: SINH( ) +DESCRIPTION: Returns the hyperbolic sine of a number. +FBTEST: functional.intfunc.math.sinh_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select SINH(4) from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select SINH(4) from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ SINH - ======================= -27.28991719712775""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """ +SINH +======================= +27.28991719712775 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_sqrt_01.py b/tests/functional/intfunc/math/test_sqrt_01.py index d613cc77..c3f4747e 100644 --- a/tests/functional/intfunc/math/test_sqrt_01.py +++ b/tests/functional/intfunc/math/test_sqrt_01.py @@ -1,39 +1,27 @@ #coding:utf-8 -# -# id: functional.intfunc.math.sqrt_01 -# title: test for SQRT function -# decription: -# SQRT( ) -# -# Returns the square root of a number. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.sqrt_01 + +""" +ID: intfunc.math.sqrt +TITLE: SQRT( ) +DESCRIPTION: Returns the square root of a number. +FBTEST: functional.intfunc.math.sqrt_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select SQRT(4) from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select SQRT(4) from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ SQRT - ======================= -2.000000000000000""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """ +SQRT +======================= +2.000000000000000 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_tan_01.py b/tests/functional/intfunc/math/test_tan_01.py index 682abdc4..c96aee78 100644 --- a/tests/functional/intfunc/math/test_tan_01.py +++ b/tests/functional/intfunc/math/test_tan_01.py @@ -1,39 +1,27 @@ #coding:utf-8 -# -# id: functional.intfunc.math.tan_01 -# title: test for TAN function -# decription: -# TAN( ) -# -# Returns the tangent of an input number that is expressed in radians. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.tan_01 + +""" +ID: intfunc.math.tan +TITLE: TAN( ) +DESCRIPTION: Returns the tangent of an input number that is expressed in radians. +FBTEST: functional.intfunc.math.tan_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select TAN(43) from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select TAN(43) from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ TAN - ======================= --1.498387338855171""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """ +TAN +======================= +-1.498387338855171 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_tanh_01.py b/tests/functional/intfunc/math/test_tanh_01.py index bb0b491d..6454885e 100644 --- a/tests/functional/intfunc/math/test_tanh_01.py +++ b/tests/functional/intfunc/math/test_tanh_01.py @@ -1,40 +1,29 @@ #coding:utf-8 -# -# id: functional.intfunc.math.tanh_01 -# title: test for TANH function -# decription: -# TANH( ) -# -# Returns the hyperbolic tangent of a number. -# select tanh(x) from y; -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.math.tanh_01 + +""" +ID: intfunc.math.tanh +TITLE: TANH( ) +DESCRIPTION: + Returns the hyperbolic tangent of a number. + select tanh(x) from y; +FBTEST: functional.intfunc.math.tanh_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select TANH(5) from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select TANH(5) from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ TANH - ======================= -0.9999092042625951""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """ +TANH +======================= +0.9999092042625951 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/math/test_trunc_01.py b/tests/functional/intfunc/math/test_trunc_01.py index 3138cc13..f6c77710 100644 --- a/tests/functional/intfunc/math/test_trunc_01.py +++ b/tests/functional/intfunc/math/test_trunc_01.py @@ -1,46 +1,37 @@ #coding:utf-8 -# -# id: functional.intfunc.math.trunc_01 -# title: test for TRUNC function -# decription: TRUNC( [, ] ) -# -# Returns the integral part (up to the specified scale) of a number. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.string.trunc_01 + +""" +ID: intfunc.math.trunc +TITLE: TRUNC( [, ] ) +DESCRIPTION: Returns the integral part (up to the specified scale) of a number. +FBTEST: functional.intfunc.math.trunc_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +test_script = """ +select trunc(-2.8), trunc(2.8) from rdb$database; -- returns -2, 2 +select trunc(987.65, 1), trunc(987.65, -1) from rdb$database; -- returns 987.60, 980.00 +""" -init_script_1 = """""" +act = isql_act('db', test_script) -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select trunc(-2.8), trunc(2.8) - from rdb$database; -- returns -2, 2 -select trunc(987.65, 1), trunc(987.65, -1) -from rdb$database; -- returns 987.60, 980.00""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ TRUNC TRUNC - ===================== ===================== - -2 2 +expected_stdout = """ + TRUNC TRUNC +===================== ===================== + -2 2 - TRUNC TRUNC - ===================== ===================== -987.60 980.00""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout + TRUNC TRUNC +===================== ===================== +987.60 980.00 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/misc/test_decode_01.py b/tests/functional/intfunc/misc/test_decode_01.py index 14f2af05..7c106be5 100644 --- a/tests/functional/intfunc/misc/test_decode_01.py +++ b/tests/functional/intfunc/misc/test_decode_01.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.intfunc.misc.decode_01 -# title: test de la fonction decode -# decription: decode is a shortcut for a case when else expreession. -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.intfunc.misc.decode_01 + +""" +ID: intfunc.misc.decode +TITLE: DECODE() +DESCRIPTION: DECODE is a shortcut for a case when else expreession. +FBTEST: functional.intfunc.misc.decode_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SET ECHO OFF; +test_script = """SET ECHO OFF; CREATE TABLE TMPTEST( id INTEGER ); insert into TMPTEST(id) @@ -37,9 +29,10 @@ values(5); -- count doit etre egal a 0 dans ce cas select decode(id,1,'un',2,'deux',3,'trois','plus grand') from TMPTEST;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """DECODE +expected_stdout = """ +DECODE ========== un deux @@ -49,8 +42,7 @@ plus grand """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/misc/test_gen_uuid_01.py b/tests/functional/intfunc/misc/test_gen_uuid_01.py index 7e9b38b3..9f67ccc1 100644 --- a/tests/functional/intfunc/misc/test_gen_uuid_01.py +++ b/tests/functional/intfunc/misc/test_gen_uuid_01.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.intfunc.misc.gen_uuid_01 -# title: test for GEN_UUID() -# decription: Returns a universal unique number. -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.intfunc.misc.gen_uuid_01 + +""" +ID: intfunc.misc.gen_uuid +TITLE: GEN_UUID() +DESCRIPTION: Returns a universal unique number. +FBTEST: functional.intfunc.misc.gen_uuid_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ create table test( id char(30) ); --on verrifie qu'il y en a pas deux identique @@ -40,9 +32,10 @@ insert into test values(CAST(GEN_UUID() AS VARCHAR(255)) ); select count(id) from test group by id;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ COUNT +expected_stdout = """ +COUNT ===================== 1 1 @@ -59,8 +52,7 @@ expected_stdout_1 = """ COUNT """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/misc/test_hash_01.py b/tests/functional/intfunc/misc/test_hash_01.py index f319b633..27f3d1a2 100644 --- a/tests/functional/intfunc/misc/test_hash_01.py +++ b/tests/functional/intfunc/misc/test_hash_01.py @@ -1,39 +1,27 @@ #coding:utf-8 -# -# id: functional.intfunc.misc.hash_01 -# title: test for HASH -# decription: Returns a HASH of a value. -# -# HASH( ) -# -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.misc.hash_01 + +""" +ID: intfunc.misc.hash +TITLE: HASH( ) +DESCRIPTION: Returns a HASH of a value. +FBTEST: functional.intfunc.misc.hash_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select hash('toto') from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select hash('toto') from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ HASH - ===================== -505519""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """ +HASH +===================== +505519 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/string/test_ascii_01.py b/tests/functional/intfunc/string/test_ascii_01.py index cd2d076f..f257a504 100644 --- a/tests/functional/intfunc/string/test_ascii_01.py +++ b/tests/functional/intfunc/string/test_ascii_01.py @@ -1,39 +1,29 @@ #coding:utf-8 -# -# id: functional.intfunc.string.ascii_01 -# title: New Built-in Functions, Firebird 2.1 : ASCII_CHAR( ) -# decription: test of ASCII_CHAR -# -# Returns the ASCII character with the specified code. The argument to ASCII_CHAR must be in the range 0 to 255. The result is returned in character set NONE. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.string.ascii_char_01 + +""" +ID: intfunc.string.ascii +TITLE: ASCII_CHAR( ) +DESCRIPTION: + Returns the ASCII character with the specified code. The argument to ASCII_CHAR must be + in the range 0 to 255. The result is returned in character set NONE. +FBTEST: functional.intfunc.string.ascii_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select ASCII_CHAR( 065 ) from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select ASCII_CHAR( 065 ) from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ASCII_CHAR +expected_stdout = """ +ASCII_CHAR ========== A """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/string/test_ascii_val_01.py b/tests/functional/intfunc/string/test_ascii_val_01.py index 39283c8c..0550241d 100644 --- a/tests/functional/intfunc/string/test_ascii_val_01.py +++ b/tests/functional/intfunc/string/test_ascii_val_01.py @@ -1,88 +1,33 @@ #coding:utf-8 -# -# id: functional.intfunc.string.ascii_val_01 -# title: New Built-in Functions, Firebird 2.1 : ASCII_VAL( ) -# decription: test of ASCII_VAL -# -# Returns the ASCII code of the first character of the specified string. -# -# 1. -# -# Returns 0 if the string is empty -# 2. -# -# Throws an error if the first character is multi-byte -# -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.string.ascii_val_01 + +""" +ID: intfunc.string.ascii_val +TITLE: ASCII_VAL( ) +DESCRIPTION: + Returns the ASCII code of the first character of the specified string. + 1. Returns 0 if the string is empty + 2. Throws an error if the first character is multi-byte +FBTEST: functional.intfunc.string.ascii_val_01 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# c = db_conn.cursor() -# try: -# c.execute("select ascii_val( 'A' ) from rdb$database") -# print (c.fetchall()) -# except Exception,e: -# print ("Test Failed for ascii_val( 'A' )") -# print (e) -# else: -# pass -# try: -# c.execute("select ascii_val( 'Ã' ) from rdb$database") -# print (c.fetchall()) -# except: -# pass -# else: -# print ("Test Failed for ascii_val( 'Ã' )") -# try: -# c.execute("select ascii_val(cast('A' as BLOB)) from rdb$database") -# print (c.fetchall()) -# except Exception,e: -# print ("Test Failed for ascii_val(CAST('A' AS BLOB))") -# print (e) -# else: -# pass -# try: -# c.execute("select ascii_val(NULL) from rdb$database") -# print (c.fetchall()) -# except Exception,e: -# print ("Test Failed for ascii_val(NULL)") -# print (e) -# else: -# pass -# try: -# c.execute("select ascii_val('') from rdb$database") -# print (c.fetchall()) -# except Exception,e: -# print ("Test Failed for ascii_val('')") -# print (e) -# else: -# pass -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """[(65,)] -[(65,)] -[(None,)] -[(0,)]""" - -@pytest.mark.version('>=2.1') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") +db = db_factory(charset='UTF8') +act = python_act('db') +@pytest.mark.version('>=3') +def test_1(act: Action): + with act.db.connect() as con: + with con.cursor() as c: + result = c.execute("select ascii_val('A') from rdb$database").fetchone() + assert result == (65,) + result = c.execute("select ascii_val('Ã') from rdb$database").fetchone() + assert result == (195,) + result = c.execute("select ascii_val(cast('A' as BLOB)) from rdb$database").fetchone() + assert result == (65,) + result = c.execute("select ascii_val(NULL) from rdb$database").fetchone() + assert result == (None,) + result = c.execute("select ascii_val('') from rdb$database").fetchone() + assert result == (0,) diff --git a/tests/functional/intfunc/string/test_left_01.py b/tests/functional/intfunc/string/test_left_01.py index a81211ef..8b5f208d 100644 --- a/tests/functional/intfunc/string/test_left_01.py +++ b/tests/functional/intfunc/string/test_left_01.py @@ -1,39 +1,31 @@ #coding:utf-8 -# -# id: functional.intfunc.string.left_01 -# title: test for LEFT function -# decription: Returns the substring of a specified length that appears at the start of a left-to-right string. -# -# LEFT -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.string.left_01 + +""" +ID: intfunc.string.left +TITLE: LEFT function +DESCRIPTION: + Returns the substring of a specified length that appears at the start of a left-to-right string. +FBTEST: functional.intfunc.string.left_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select left('bonjour', 3) +test_script = """select left('bonjour', 3) from rdb$database;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ LEFT - ======= -bon""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """ +LEFT +======= +bon +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/string/test_lpad_01.py b/tests/functional/intfunc/string/test_lpad_01.py index 398ace70..73648368 100644 --- a/tests/functional/intfunc/string/test_lpad_01.py +++ b/tests/functional/intfunc/string/test_lpad_01.py @@ -1,153 +1,29 @@ #coding:utf-8 -# -# id: functional.intfunc.string.lpad_01 -# title: test for LPAD function, including non-ascii characters. -# decription: -# 03-mar-2021. Re-implemented in order to have ability to run this test on Linux. -# Added tests from some COREs which have no apropriate .fbt -# Test creates table and fills it with non-ascii characters using charset = UTF8. -# Then it generates .sql script for running it in separate ISQL process. -# This script makes connection to test DB using charset = ISO8859_1 and perform needed DML. -# Result will be redirected to .log which will be opened via codecs.open(...encoding='iso-8859-1'). -# Its content will be converted to UTF8 for showing in expected_stdout. -# -# Checked on: -# * Windows: 4.0.0.2377, 3.0.8.33420 -# * Linux: 4.0.0.2377, 3.0.8.33415 -# -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: + +""" +ID: intfunc.string.lpad +TITLE: LPAD function, including non-ascii characters +DESCRIPTION: +NOTES: +[03.03.2021] + Re-implemented in order to have ability to run this test on Linux. + Added tests from some COREs which have no apropriate .fbt + Test creates table and fills it with non-ascii characters using charset = UTF8. + Then it generates .sql script for running it in separate ISQL process. + This script makes connection to test DB using charset = ISO8859_1 and perform needed DML. + Result will be redirected to .log which will be opened via codecs.open(...encoding='iso-8859-1'). + Its content will be converted to UTF8 for showing in expected_stdout. +FBTEST: functional.intfunc.string.lpad_01 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(charset='ISO8859_1') -substitutions_1 = [('BLOB_ID_.*', ''), ('.*After line \\d+.*', '')] +act = python_act('db', substitutions=[('BLOB_ID_.*', ''), ('.*After line \\d+.*', '')]) -init_script_1 = """""" - -db_1 = db_factory(charset='ISO8859_1', sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import codecs -# import subprocess -# import time -# -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for i in range(len( f_names_list )): -# if type(f_names_list[i]) == file: -# del_name = f_names_list[i].name -# elif type(f_names_list[i]) == str: -# del_name = f_names_list[i] -# else: -# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# # NB: do NOT include here character "­ ­ soft hyphen" -# # It can not be properly represented on linux in utf8 codepage! -# # https://jkorpela.fi/shy.html -# data = "¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ" -# -# null_dev = os.devnull -# -# sql_txt=''' set names ISO8859_1; -# connect '%(dsn)s' user '%(user_name)s' password '%(user_password)s'; -# set list on; -# recreate table test(c varchar(32765), b blob sub_type text); -# insert into test(c, b) values( '%(data)s', '%(data)s' ); -# select lpad(c, 2 * char_length(c), b) as blob_id_01 from test; -# select lpad(b, 2 * char_length(b), c) as blob_id_02 from test; -# -# -- from CORE-2745 ("build in function LPAD result is wrong if argument is longer then length to padd parameter"): -# select lpad(c, 1, 'ÿ') as txt_lpad_03 from test; -# select lpad(b, 1, 'ÿ') as blob_id_04 from test; -# -# -- from CORE-2597: LPAD result must be varchar(1) instead of varchar(32765) in this example: -# select rpad('¿', 1, '¡') as txt_lpad_05a, rpad('À', 1, 'ÿ') as txt_lpad_05b from rdb$database; -# -# select -# lpad('÷øùúûüýþÿ',32765-char_length('÷øùúûüýþÿ'), '%(data)s') as txt_lpad_06a -# ,lpad('÷øùúûüýþÿ',32765-char_length('÷øùúûüýþÿ'), '%(data)s') as txt_lpad_06b -# from rdb$database; -# -# out %(null_dev)s; -# select lpad(c, 32765, c) from test; -- this must pass -# select lpad(c, 32766, c) from test; -- must fail: SQLSTATE = 22001 / ... string truncation / -expected length 32765, actual 32766 -# out; -# -# -- select char_length(lpad('', 2147483647, b)) as r_03 from test; -- <<< 152s :-) -# -# -# ''' % dict(globals(), **locals()) -# -# f_run_sql = open( os.path.join(context['temp_directory'], 'tmp_func_lpad_iso8859_1.sql'), 'w' ) -# f_run_sql.write( sql_txt.decode('utf8').encode('iso-8859-1') ) -# flush_and_close( f_run_sql ) -# # result: file tmp_func_lpad_iso8859_1.sql is encoded in iso8859_1 (aka 'latin-1') -# -# f_run_log = open( os.path.splitext(f_run_sql.name)[0]+'.log', 'w') -# f_run_err = open( os.path.splitext(f_run_sql.name)[0]+'.err', 'w') -# subprocess.call( [ context['isql_path'], '-q', '-i', f_run_sql.name ], -# stdout = f_run_log, -# stderr = f_run_err -# ) -# flush_and_close( f_run_log ) -# flush_and_close( f_run_err ) -# -# # result: output will be encoded in iso9959_1, error log must be empty. -# with codecs.open(f_run_log.name, 'r', encoding='iso-8859-1' ) as f: -# stdout_encoded_in_latin_1 = f.readlines() -# -# with codecs.open(f_run_err.name, 'r', encoding='iso-8859-1' ) as f: -# stderr_encoded_in_latin_1 = f.readlines() -# -# for i in stdout_encoded_in_latin_1: -# print( i.encode('utf8') ) -# -# # NO error must occur: -# ###################### -# for i in stderr_encoded_in_latin_1: -# print( 'EXPECTED STDERR: ', i.encode('utf8') ) -# -# # cleanup: -# ########### -# cleanup( (f_run_sql, f_run_log, f_run_err) ) -# -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ +expected_stdout = """ ¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ ¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ TXT_LPAD_03 ¡ @@ -162,9 +38,121 @@ expected_stdout_1 = """ EXPECTED STDERR: -expected length 32765, actual 32766 """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=3.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# test_script_1 +#--- +# +# import os +# import codecs +# import subprocess +# import time +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for i in range(len( f_names_list )): +# if type(f_names_list[i]) == file: +# del_name = f_names_list[i].name +# elif type(f_names_list[i]) == str: +# del_name = f_names_list[i] +# else: +# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# # NB: do NOT include here character "­ ­ soft hyphen" +# # It can not be properly represented on linux in utf8 codepage! +# # https://jkorpela.fi/shy.html +# data = "¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ" +# +# null_dev = os.devnull +# +# sql_txt=''' set names ISO8859_1; +# connect '%(dsn)s' user '%(user_name)s' password '%(user_password)s'; +# set list on; +# recreate table test(c varchar(32765), b blob sub_type text); +# insert into test(c, b) values( '%(data)s', '%(data)s' ); +# select lpad(c, 2 * char_length(c), b) as blob_id_01 from test; +# select lpad(b, 2 * char_length(b), c) as blob_id_02 from test; +# +# -- from CORE-2745 ("build in function LPAD result is wrong if argument is longer then length to padd parameter"): +# select lpad(c, 1, 'ÿ') as txt_lpad_03 from test; +# select lpad(b, 1, 'ÿ') as blob_id_04 from test; +# +# -- from CORE-2597: LPAD result must be varchar(1) instead of varchar(32765) in this example: +# select rpad('¿', 1, '¡') as txt_lpad_05a, rpad('À', 1, 'ÿ') as txt_lpad_05b from rdb$database; +# +# select +# lpad('÷øùúûüýþÿ',32765-char_length('÷øùúûüýþÿ'), '%(data)s') as txt_lpad_06a +# ,lpad('÷øùúûüýþÿ',32765-char_length('÷øùúûüýþÿ'), '%(data)s') as txt_lpad_06b +# from rdb$database; +# +# out %(null_dev)s; +# select lpad(c, 32765, c) from test; -- this must pass +# select lpad(c, 32766, c) from test; -- must fail: SQLSTATE = 22001 / ... string truncation / -expected length 32765, actual 32766 +# out; +# +# -- select char_length(lpad('', 2147483647, b)) as r_03 from test; -- <<< 152s :-) +# +# +# ''' % dict(globals(), **locals()) +# +# f_run_sql = open( os.path.join(context['temp_directory'], 'tmp_func_lpad_iso8859_1.sql'), 'w' ) +# f_run_sql.write( sql_txt.decode('utf8').encode('iso-8859-1') ) +# flush_and_close( f_run_sql ) +# # result: file tmp_func_lpad_iso8859_1.sql is encoded in iso8859_1 (aka 'latin-1') +# +# f_run_log = open( os.path.splitext(f_run_sql.name)[0]+'.log', 'w') +# f_run_err = open( os.path.splitext(f_run_sql.name)[0]+'.err', 'w') +# subprocess.call( [ context['isql_path'], '-q', '-i', f_run_sql.name ], +# stdout = f_run_log, +# stderr = f_run_err +# ) +# flush_and_close( f_run_log ) +# flush_and_close( f_run_err ) +# +# # result: output will be encoded in iso9959_1, error log must be empty. +# with codecs.open(f_run_log.name, 'r', encoding='iso-8859-1' ) as f: +# stdout_encoded_in_latin_1 = f.readlines() +# +# with codecs.open(f_run_err.name, 'r', encoding='iso-8859-1' ) as f: +# stderr_encoded_in_latin_1 = f.readlines() +# +# for i in stdout_encoded_in_latin_1: +# print( i.encode('utf8') ) +# +# # NO error must occur: +# ###################### +# for i in stderr_encoded_in_latin_1: +# print( 'EXPECTED STDERR: ', i.encode('utf8') ) +# +# # cleanup: +# ########### +# cleanup( (f_run_sql, f_run_log, f_run_err) ) +# +#--- diff --git a/tests/functional/intfunc/string/test_overlay_01.py b/tests/functional/intfunc/string/test_overlay_01.py index 79b22585..87f9eab9 100644 --- a/tests/functional/intfunc/string/test_overlay_01.py +++ b/tests/functional/intfunc/string/test_overlay_01.py @@ -1,38 +1,28 @@ #coding:utf-8 -# -# id: functional.intfunc.string.overlay_01 -# title: test for OVERLAY function -# decription: OVERLAY See below Returns string1 replacing the substring FROM start FOR length by string2. -# -# -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.string.overlay_01 + +""" +ID: intfunc.string.overlay +TITLE: OVERLAY function +DESCRIPTION: + Returns string1 replacing the substring FROM start FOR length by string2. +FBTEST: functional.intfunc.string.overlay_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select OVERLAY( 'il fait beau dans le sud de la france' PLACING 'NORD' FROM 22 for 4 ) from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select OVERLAY( 'il fait beau dans le sud de la france' PLACING 'NORD' FROM 22 for 4 ) from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ OVERLAY - ========================================== -il fait beau dans le NORD de la france""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """ +OVERLAY +========================================== +il fait beau dans le NORD de la france +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/string/test_position_01.py b/tests/functional/intfunc/string/test_position_01.py index 719b1b2b..c057ef4d 100644 --- a/tests/functional/intfunc/string/test_position_01.py +++ b/tests/functional/intfunc/string/test_position_01.py @@ -1,39 +1,44 @@ #coding:utf-8 -# -# id: functional.intfunc.string.position_01 -# title: test for POSITION function -# decription: POSITION( IN ) -# -# POSITION(X IN Y) returns the position of the substring X in the string Y. Returns 0 if X is not found within Y. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.string.position_01 -import pytest -from firebird.qa import db_factory, isql_act, Action - -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ select position('beau' IN 'il fait beau dans le nord' ) from rdb$database; +""" +ID: intfunc.string.position +ISSUE: 1926 +TITLE: POSITION( IN ) +DESCRIPTION: + POSITION(X IN Y) returns the position of the substring X in the string Y. + Returns 0 if X is not found within Y. +NOTES: +[03.02.2022] pcisar + Merged with "functional.intfunc.string.position_02" test to simplify the suite structure. + Now each string function has only one test file. +FBTEST: functional.intfunc.string.position_01 +JIRA: CORE-1511 """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +import pytest +from firebird.qa import * -expected_stdout_1 = """ POSITION - ============ -9""" +db = db_factory() -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +test_script = """ +select position('beau' IN 'il fait beau dans le nord' ) from rdb$database; +-- next is from functional.intfunc.string.position_02 +SELECT POSITION('beau','beau,il fait beau') C1,POSITION('beau','beau,il fait beau',2) C2 FROM RDB$DATABASE; +""" +act = isql_act('db', test_script) + +expected_stdout = """ +POSITION +============ +9 + C1 C2 +============ ============ + 1 14 +""" + +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/string/test_position_02.py b/tests/functional/intfunc/string/test_position_02.py deleted file mode 100644 index 5bcb6f25..00000000 --- a/tests/functional/intfunc/string/test_position_02.py +++ /dev/null @@ -1,38 +0,0 @@ -#coding:utf-8 -# -# id: functional.intfunc.string.position_02 -# title: Test Position -# decription: -# tracker_id: CORE-1511 -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.string.position_02 - -import pytest -from firebird.qa import db_factory, isql_act, Action - -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SELECT POSITION('beau','beau,il fait beau') C1,POSITION('beau','beau,il fait beau',2) C2 FROM RDB$DATABASE;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ - C1 C2 -============ ============ - 1 14 -""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - diff --git a/tests/functional/intfunc/string/test_replace_01.py b/tests/functional/intfunc/string/test_replace_01.py index 91cf4ee6..257de424 100644 --- a/tests/functional/intfunc/string/test_replace_01.py +++ b/tests/functional/intfunc/string/test_replace_01.py @@ -1,38 +1,28 @@ #coding:utf-8 -# -# id: functional.intfunc.string.replace_01 -# title: test for REPLACE function -# decription: REPLACE( , , ) -# -# Replaces all occurrences of in with . -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.string.replace_01 + +""" +ID: intfunc.string.replace +TITLE: REPLACE( , , ) +DESCRIPTION: + Replaces all occurrences of in with . +FBTEST: functional.intfunc.string.replace_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select REPLACE('toto','o','i') from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select REPLACE('toto','o','i') from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ REPLACE - ======= -titi""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """ +REPLACE +======= +titi +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/string/test_reverse_01.py b/tests/functional/intfunc/string/test_reverse_01.py index 6936449d..5619b83b 100644 --- a/tests/functional/intfunc/string/test_reverse_01.py +++ b/tests/functional/intfunc/string/test_reverse_01.py @@ -1,39 +1,29 @@ #coding:utf-8 -# -# id: functional.intfunc.string.reverse_01 -# title: test for REVERSE function -# decription: REVERSE( ) -# -# Returns a string in reverse order. Useful function for creating an expression index that indexes strings from right to left. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.string.reverse_01 + +""" +ID: intfunc.string.reverse +TITLE: REVERSE( ) +DESCRIPTION: + Returns a string in reverse order. Useful function for creating an expression index that + indexes strings from right to left. +FBTEST: functional.intfunc.string.reverse_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select REVERSE('DRON') from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select REVERSE('DRON') from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ - REVERSE - ======= -NORD""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """ +REVERSE +======= +NORD +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/string/test_right_01.py b/tests/functional/intfunc/string/test_right_01.py index 2cb8d0c4..ffbce81d 100644 --- a/tests/functional/intfunc/string/test_right_01.py +++ b/tests/functional/intfunc/string/test_right_01.py @@ -1,39 +1,28 @@ #coding:utf-8 -# -# id: functional.intfunc.string.right_01 -# title: test for RIGHT function -# decription: -# RIGHT( , ) -# -# Returns the substring, of the specified length, from the right-hand end of a string -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.intfunc.string.right_01 + +""" +ID: intfunc.string.right +TITLE: RIGHT( , ) +DESCRIPTION: + Returns the substring, of the specified length, from the right-hand end of a string. +FBTEST: functional.intfunc.string.right_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] +act = isql_act('db', "select RIGHT('NORD PAS DE CALAIS', 13) from rdb$database;") -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """select RIGHT('NORD PAS DE CALAIS', 13) from rdb$database;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ RIGHT - ================== -PAS DE CALAIS""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """ +RIGHT +================== +PAS DE CALAIS +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/intfunc/string/test_rpad_01.py b/tests/functional/intfunc/string/test_rpad_01.py index 90b399fc..4029631f 100644 --- a/tests/functional/intfunc/string/test_rpad_01.py +++ b/tests/functional/intfunc/string/test_rpad_01.py @@ -1,153 +1,29 @@ #coding:utf-8 -# -# id: functional.intfunc.string.rpad_01 -# title: test for RPAD function, including non-ascii characters. -# decription: -# 03-mar-2021. Re-implemented in order to have ability to run this test on Linux. -# Added tests from some COREs which have no apropriate .fbt -# Test creates table and fills it with non-ascii characters using charset = UTF8. -# Then it generates .sql script for running it in separate ISQL process. -# This script makes connection to test DB using charset = ISO8859_1 and perform needed DML. -# Result will be redirected to .log which will be opened via codecs.open(...encoding='iso-8859-1'). -# Its content will be converted to UTF8 for showing in expected_stdout. -# -# Checked on: -# * Windows: 4.0.0.2377, 3.0.8.33420 -# * Linux: 4.0.0.2377, 3.0.8.33415 -# -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: + +""" +ID: intfunc.string.rpad +TITLE: RPAD function, including non-ascii characters +DESCRIPTION: +NOTES: +[03.03.2021] + Re-implemented in order to have ability to run this test on Linux. + Added tests from some COREs which have no apropriate .fbt + Test creates table and fills it with non-ascii characters using charset = UTF8. + Then it generates .sql script for running it in separate ISQL process. + This script makes connection to test DB using charset = ISO8859_1 and perform needed DML. + Result will be redirected to .log which will be opened via codecs.open(...encoding='iso-8859-1'). + Its content will be converted to UTF8 for showing in expected_stdout. +FBTEST: functional.intfunc.string.rpad_01 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(charset='ISO8859_1') -substitutions_1 = [('BLOB_ID_.*', ''), ('.*After line \\d+.*', '')] +act = python_act('db', substitutions=[('BLOB_ID_.*', ''), ('.*After line \\d+.*', '')]) -init_script_1 = """""" - -db_1 = db_factory(charset='ISO8859_1', sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import codecs -# import subprocess -# import time -# -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for i in range(len( f_names_list )): -# if type(f_names_list[i]) == file: -# del_name = f_names_list[i].name -# elif type(f_names_list[i]) == str: -# del_name = f_names_list[i] -# else: -# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# # NB: do NOT include here character "­ ­ soft hyphen" -# # It can not be properly represented on linux in utf8 codepage! -# # https://jkorpela.fi/shy.html -# data = "¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ" -# -# null_dev = os.devnull -# -# sql_txt=''' set names ISO8859_1; -# connect '%(dsn)s' user '%(user_name)s' password '%(user_password)s'; -# set list on; -# recreate table test(c varchar(32765), b blob sub_type text); -# insert into test(c, b) values( '%(data)s', '%(data)s' ); -# select rpad(c, 2 * char_length(c), b) as blob_id_01 from test; -# select rpad(b, 2 * char_length(b), c) as blob_id_02 from test; -# -# -- from CORE-2745 ("build in function RPAD result is wrong if argument is longer then length to padd parameter"): -# select rpad(c, 1, 'ÿ') as txt_rpad_03 from test; -# select rpad(b, 1, 'ÿ') as blob_id_04 from test; -# -# -- from CORE-2597: RPAD result must be varchar(1) instead of varchar(32765) in this example: -# select rpad('¿', 1, '¡') as txt_rpad_05a, rpad('À', 1, 'ÿ') as txt_rpad_05b from rdb$database; -# -# select -# rpad('÷øùúûüýþÿ',32765-char_length('÷øùúûüýþÿ'), '%(data)s') as txt_rpad_06a -# ,rpad('÷øùúûüýþÿ',32765-char_length('÷øùúûüýþÿ'), '%(data)s') as txt_rpad_06b -# from rdb$database; -# -# out %(null_dev)s; -# select rpad(c, 32765, c) from test; -- this must pass -# select rpad(c, 32766, c) from test; -- must fail: SQLSTATE = 22001 / ... string truncation / -expected length 32765, actual 32766 -# out; -# -# -- select char_length(rpad('', 2147483647, b)) as r_03 from test; -- <<< 152s :-) -# -# -# ''' % dict(globals(), **locals()) -# -# f_run_sql = open( os.path.join(context['temp_directory'], 'tmp_func_rpad_iso8859_1.sql'), 'w' ) -# f_run_sql.write( sql_txt.decode('utf8').encode('iso-8859-1') ) -# flush_and_close( f_run_sql ) -# # result: file tmp_func_rpad_iso8859_1.sql is encoded in iso8859_1 (aka 'latin-1') -# -# f_run_log = open( os.path.splitext(f_run_sql.name)[0]+'.log', 'w') -# f_run_err = open( os.path.splitext(f_run_sql.name)[0]+'.err', 'w') -# subprocess.call( [ context['isql_path'], '-q', '-i', f_run_sql.name ], -# stdout = f_run_log, -# stderr = f_run_err -# ) -# flush_and_close( f_run_log ) -# flush_and_close( f_run_err ) -# -# # result: output will be encoded in iso9959_1, error log must be empty. -# with codecs.open(f_run_log.name, 'r', encoding='iso-8859-1' ) as f: -# stdout_encoded_in_latin_1 = f.readlines() -# -# with codecs.open(f_run_err.name, 'r', encoding='iso-8859-1' ) as f: -# stderr_encoded_in_latin_1 = f.readlines() -# -# for i in stdout_encoded_in_latin_1: -# print( i.encode('utf8') ) -# -# # NO error must occur: -# ###################### -# for i in stderr_encoded_in_latin_1: -# print( 'EXPECTED STDERR: ', i.encode('utf8') ) -# -# # cleanup: -# ########### -# cleanup( (f_run_sql, f_run_log, f_run_err) ) -# -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ +expected_stdout = """ ¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ ¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ TXT_RPAD_03 ¡ @@ -163,9 +39,121 @@ expected_stdout_1 = """ EXPECTED STDERR: -expected length 32765, actual 32766 """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=3.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# test_script_1 +#--- +# +# import os +# import codecs +# import subprocess +# import time +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for i in range(len( f_names_list )): +# if type(f_names_list[i]) == file: +# del_name = f_names_list[i].name +# elif type(f_names_list[i]) == str: +# del_name = f_names_list[i] +# else: +# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# # NB: do NOT include here character "­ ­ soft hyphen" +# # It can not be properly represented on linux in utf8 codepage! +# # https://jkorpela.fi/shy.html +# data = "¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ" +# +# null_dev = os.devnull +# +# sql_txt=''' set names ISO8859_1; +# connect '%(dsn)s' user '%(user_name)s' password '%(user_password)s'; +# set list on; +# recreate table test(c varchar(32765), b blob sub_type text); +# insert into test(c, b) values( '%(data)s', '%(data)s' ); +# select rpad(c, 2 * char_length(c), b) as blob_id_01 from test; +# select rpad(b, 2 * char_length(b), c) as blob_id_02 from test; +# +# -- from CORE-2745 ("build in function RPAD result is wrong if argument is longer then length to padd parameter"): +# select rpad(c, 1, 'ÿ') as txt_rpad_03 from test; +# select rpad(b, 1, 'ÿ') as blob_id_04 from test; +# +# -- from CORE-2597: RPAD result must be varchar(1) instead of varchar(32765) in this example: +# select rpad('¿', 1, '¡') as txt_rpad_05a, rpad('À', 1, 'ÿ') as txt_rpad_05b from rdb$database; +# +# select +# rpad('÷øùúûüýþÿ',32765-char_length('÷øùúûüýþÿ'), '%(data)s') as txt_rpad_06a +# ,rpad('÷øùúûüýþÿ',32765-char_length('÷øùúûüýþÿ'), '%(data)s') as txt_rpad_06b +# from rdb$database; +# +# out %(null_dev)s; +# select rpad(c, 32765, c) from test; -- this must pass +# select rpad(c, 32766, c) from test; -- must fail: SQLSTATE = 22001 / ... string truncation / -expected length 32765, actual 32766 +# out; +# +# -- select char_length(rpad('', 2147483647, b)) as r_03 from test; -- <<< 152s :-) +# +# +# ''' % dict(globals(), **locals()) +# +# f_run_sql = open( os.path.join(context['temp_directory'], 'tmp_func_rpad_iso8859_1.sql'), 'w' ) +# f_run_sql.write( sql_txt.decode('utf8').encode('iso-8859-1') ) +# flush_and_close( f_run_sql ) +# # result: file tmp_func_rpad_iso8859_1.sql is encoded in iso8859_1 (aka 'latin-1') +# +# f_run_log = open( os.path.splitext(f_run_sql.name)[0]+'.log', 'w') +# f_run_err = open( os.path.splitext(f_run_sql.name)[0]+'.err', 'w') +# subprocess.call( [ context['isql_path'], '-q', '-i', f_run_sql.name ], +# stdout = f_run_log, +# stderr = f_run_err +# ) +# flush_and_close( f_run_log ) +# flush_and_close( f_run_err ) +# +# # result: output will be encoded in iso9959_1, error log must be empty. +# with codecs.open(f_run_log.name, 'r', encoding='iso-8859-1' ) as f: +# stdout_encoded_in_latin_1 = f.readlines() +# +# with codecs.open(f_run_err.name, 'r', encoding='iso-8859-1' ) as f: +# stderr_encoded_in_latin_1 = f.readlines() +# +# for i in stdout_encoded_in_latin_1: +# print( i.encode('utf8') ) +# +# # NO error must occur: +# ###################### +# for i in stderr_encoded_in_latin_1: +# print( 'EXPECTED STDERR: ', i.encode('utf8') ) +# +# # cleanup: +# ########### +# cleanup( (f_run_sql, f_run_log, f_run_err) ) +# +#--- diff --git a/tests/functional/monitoring/test_01.py b/tests/functional/monitoring/test_01.py index 2dd06fa4..812b6cad 100644 --- a/tests/functional/monitoring/test_01.py +++ b/tests/functional/monitoring/test_01.py @@ -1,40 +1,31 @@ #coding:utf-8 -# -# id: functional.monitoring.01 -# title: Get isolation level of the current transaction -# decription: -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.monitoring.monitoring_01 + +""" +ID: monitoring-tables-01 +TITLE: Get isolation level of the current transaction +DESCRIPTION: +FBTEST: functional.monitoring.01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SELECT MON$ISOLATION_MODE +test_script = """SELECT MON$ISOLATION_MODE FROM MON$TRANSACTIONS WHERE MON$TRANSACTION_ID = CURRENT_TRANSACTION;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ MON$ISOLATION_MODE ================== 1 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/monitoring/test_02.py b/tests/functional/monitoring/test_02.py index 29956f1b..84a153b8 100644 --- a/tests/functional/monitoring/test_02.py +++ b/tests/functional/monitoring/test_02.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.monitoring.02 -# title: Monitoring: get data about active statements from current attachment (WHERE-filter: mon$statements.mon$state=1). -# decription: -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.monitoring.monitoring_02 + +""" +ID: monitoring-tables-02 +TITLE: Get data about active statements from current attachment (WHERE-filter: mon$statements.mon$state=1). +DESCRIPTION: +FBTEST: functional.monitoring.02 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [('SQL_TEXT.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set width usr 31; set list on; select a.mon$user usr, s.mon$sql_text sql_text @@ -31,10 +23,10 @@ test_script_1 = """ and s.mon$state = 1; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('SQL_TEXT.*', '')]) -expected_stdout_1 = """ - USR SYSDBA +expected_stdout = """ + USR SYSDBA SQL_TEXT 0:1 select a.mon$user usr, s.mon$sql_text sql_text from mon$attachments a @@ -45,8 +37,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/monitoring/test_04.py b/tests/functional/monitoring/test_04.py index bddc8a6f..f5fd6928 100644 --- a/tests/functional/monitoring/test_04.py +++ b/tests/functional/monitoring/test_04.py @@ -1,22 +1,16 @@ #coding:utf-8 -# -# id: functional.monitoring.04 -# title: Monitoring: SYSDBA must see all attachments and their transactions, non-privileged user - only those which was of his login. -# decription: -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.monitoring.monitoring_04 + +""" +ID: monitoring-tables-03 +TITLE: SYSDBA must see all attachments and their transactions, non-privileged user - only those which was of his login. +DESCRIPTION: +FBTEST: functional.monitoring.04 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None - -substitutions_1 = [('[=]{1,}', ''), ('[ \t]+', ' ')] - -init_script_1 = """ +init_script = """ set wng off; create or alter user u01 password '123'; create or alter user u02 password '456'; @@ -30,10 +24,10 @@ init_script_1 = """ -- 15.01.2019: removed detailed info about read committed TIL because of read consistency TIL that 4.0 introduces. -- Any record with t.mon$isolation_mode = 4 now is considered just as read committed, w/o any detalization (this not much needed here). ,decode( t.mon$isolation_mode, 0,'CONSISTENCY', 1,'SNAPSHOT', 2, 'READ_COMMITTED', 3, 'READ_COMMITTED', 4, 'READ_COMMITTED', '??' ) as isol_descr - from - mon$attachments a + from + mon$attachments a LEFT join mon$transactions t using(mon$attachment_id) - where + where a.mon$attachment_id is distinct from current_connection and a.mon$system_flag is distinct from 1 -- remove Cache Writer and Garbage Collector from resultset order by a.mon$user, t.mon$transaction_id; @@ -46,67 +40,11 @@ init_script_1 = """ commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# -# db_conn.close() -# -# #---------------------------------------------------------------------- -# -# con_1 = fdb.connect(dsn=dsn, user=user_name, password=user_password ) -# con_2 = fdb.connect(dsn=dsn, user='U01', password='123') -# con_3 = fdb.connect(dsn=dsn, user='U01', password='123') -# -# con_1.begin() -# con_2.begin() -# con_3.begin() -# -# #---------------------------------------------------------------------- -# -# custom_tpb4 = fdb.TPB() -# custom_tpb4.isolation_level = (fdb.isc_tpb_read_committed, fdb.isc_tpb_rec_version) -# -# custom_tpb5 = fdb.TPB() -# custom_tpb5.isolation_level = fdb.isc_tpb_concurrency -# -# custom_tpb6 = fdb.TPB() -# custom_tpb6.isolation_level = fdb.isc_tpb_consistency -# -# con_4 = fdb.connect(dsn=dsn, user='U02', password='456') -# con_5 = fdb.connect(dsn=dsn, user='U02', password='456') -# con_6 = fdb.connect(dsn=dsn, user='U02', password='456') -# -# con_4.begin( tpb = custom_tpb4 ) -# con_5.begin( tpb = custom_tpb5 ) -# con_6.begin( tpb = custom_tpb6 ) -# -# sql_chk=''' -# set width who_am_i 12; -# set width who_else 12; -# set width isol_descr 30; -# set count on; -# -# connect '$(DSN)' user 'u01' password '123'; -# select 1 as check_no, v.* from v_who v; -# commit; -# -# connect '$(DSN)' user 'SYSDBA' password 'masterkey'; -# -# select 2 as check_no, v.* from v_who v; -# commit; -# -# drop user u01; -# drop user u02; -# commit; -# ''' -# -# runProgram('isql',[ '-pag','99999','-q' ], sql_chk) -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) +act = python_act('db', substitutions=[('[=]{1,}', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ CHECK_NO WHO_AM_I WHO_ELSE TID_ROWN ISOL_MODE ISOL_DESCR 1 U01 U01 1 2 READ_COMMITTED 1 U01 U01 2 2 READ_COMMITTED @@ -122,9 +60,64 @@ expected_stdout_1 = """ Records affected: 6 """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=3.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# test_script_1 +#--- +# +# db_conn.close() +# +# #---------------------------------------------------------------------- +# +# con_1 = fdb.connect(dsn=dsn, user=user_name, password=user_password ) +# con_2 = fdb.connect(dsn=dsn, user='U01', password='123') +# con_3 = fdb.connect(dsn=dsn, user='U01', password='123') +# +# con_1.begin() +# con_2.begin() +# con_3.begin() +# +# #---------------------------------------------------------------------- +# +# custom_tpb4 = fdb.TPB() +# custom_tpb4.isolation_level = (fdb.isc_tpb_read_committed, fdb.isc_tpb_rec_version) +# +# custom_tpb5 = fdb.TPB() +# custom_tpb5.isolation_level = fdb.isc_tpb_concurrency +# +# custom_tpb6 = fdb.TPB() +# custom_tpb6.isolation_level = fdb.isc_tpb_consistency +# +# con_4 = fdb.connect(dsn=dsn, user='U02', password='456') +# con_5 = fdb.connect(dsn=dsn, user='U02', password='456') +# con_6 = fdb.connect(dsn=dsn, user='U02', password='456') +# +# con_4.begin( tpb = custom_tpb4 ) +# con_5.begin( tpb = custom_tpb5 ) +# con_6.begin( tpb = custom_tpb6 ) +# +# sql_chk=''' +# set width who_am_i 12; +# set width who_else 12; +# set width isol_descr 30; +# set count on; +# +# connect '$(DSN)' user 'u01' password '123'; +# select 1 as check_no, v.* from v_who v; +# commit; +# +# connect '$(DSN)' user 'SYSDBA' password 'masterkey'; +# +# select 2 as check_no, v.* from v_who v; +# commit; +# +# drop user u01; +# drop user u02; +# commit; +# ''' +# +# runProgram('isql',[ '-pag','99999','-q' ], sql_chk) +#--- diff --git a/tests/functional/procedure/alter/test_01.py b/tests/functional/procedure/alter/test_01.py index ced882e1..03915c62 100644 --- a/tests/functional/procedure/alter/test_01.py +++ b/tests/functional/procedure/alter/test_01.py @@ -1,26 +1,16 @@ #coding:utf-8 -# -# id: functional.procedure.alter.01 -# title: ALTER PROCEDURE - Simple ALTER -# decription: ALTER PROCEDURE - Simple ALTER -# -# Dependencies: -# CREATE DATABASE -# CREATE PROCEDURE -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.procedure.alter.alter_procedure_01 + +""" +ID: procedure.alter-01 +TITLE: ALTER PROCEDURE - Simple ALTER +DESCRIPTION: +FBTEST: functional.procedure.alter.01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """SET TERM ^; +init_script = """SET TERM ^; CREATE PROCEDURE test RETURNS (id INTEGER)AS BEGIN id=1; @@ -29,9 +19,9 @@ SET TERM ;^ commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """SET TERM ^; +test_script = """SET TERM ^; ALTER PROCEDURE test RETURNS (id INTEGER)AS BEGIN id=2; @@ -40,15 +30,16 @@ SET TERM ;^ commit; EXECUTE PROCEDURE test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ ID +expected_stdout = """ +ID ============ -2""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +2 +""" +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/procedure/alter/test_02.py b/tests/functional/procedure/alter/test_02.py index e2f5338a..22005b2e 100644 --- a/tests/functional/procedure/alter/test_02.py +++ b/tests/functional/procedure/alter/test_02.py @@ -1,45 +1,33 @@ #coding:utf-8 -# -# id: functional.procedure.alter.02 -# title: ALTER PROCEDURE - Alter non exists procedure -# decription: ALTER PROCEDURE - Alter non exists procedure -# -# Dependencies: -# CREATE DATABASE -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.procedure.alter.alter_procedure_02 + +""" +ID: procedure.alter-02 +TITLE: ALTER PROCEDURE - Alter non exists procedure +DESCRIPTION: +FBTEST: functional.procedure.alter.02 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SET TERM ^; +test_script = """SET TERM ^; ALTER PROCEDURE test RETURNS (id INTEGER)AS BEGIN id=2; END ^ SET TERM ;^""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stderr_1 = """Statement failed, SQLSTATE = 42000 +expected_stderr = """Statement failed, SQLSTATE = 42000 unsuccessful metadata update -ALTER PROCEDURE TEST failed -Procedure TEST not found""" @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/procedure/create/test_01.py b/tests/functional/procedure/create/test_01.py index 0292aef0..94dd7ebe 100644 --- a/tests/functional/procedure/create/test_01.py +++ b/tests/functional/procedure/create/test_01.py @@ -1,29 +1,18 @@ #coding:utf-8 -# -# id: functional.procedure.create.01 -# title: CREATE PROCEDURE -# decription: CREATE PROCEDURE -# -# Dependencies: -# CREATE DATABASE -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.procedure.create.create_procedure_01 + +""" +ID: procedure.create-01 +TITLE: CREATE PROCEDURE +DESCRIPTION: +FBTEST: functional.procedure.create.01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SET TERM ^; +test_script = """SET TERM ^; CREATE PROCEDURE test AS BEGIN POST_EVENT 'Test'; @@ -32,18 +21,17 @@ SET TERM ;^ commit; SHOW PROCEDURE test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """Procedure text: +expected_stdout = """Procedure text: ============================================================================= BEGIN POST_EVENT 'Test'; END =============================================================================""" -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/procedure/create/test_02.py b/tests/functional/procedure/create/test_02.py index bd5564d4..58bad5a3 100644 --- a/tests/functional/procedure/create/test_02.py +++ b/tests/functional/procedure/create/test_02.py @@ -1,29 +1,18 @@ #coding:utf-8 -# -# id: functional.procedure.create.02 -# title: CREATE PROCEDURE - Input parameters -# decription: CREATE PROCEDURE - Input parameters -# -# Dependencies: -# CREATE DATABASE -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.procedure.create.create_procedure_02 + +""" +ID: procedure.create-02 +TITLE: CREATE PROCEDURE - Input parameters +DESCRIPTION: +FBTEST: functional.procedure.create.02 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SET TERM ^; +test_script = """SET TERM ^; CREATE PROCEDURE test( p1 SMALLINT, p2 INTEGER, p3 FLOAT, p4 DOUBLE PRECISION, p5 DECIMAL(9,3), p6 NUMERIC(10,4), p7 DATE, p8 TIME, p9 TIMESTAMP, p10 CHAR(40), p11 VARCHAR(60), p12 NCHAR(70)) @@ -35,9 +24,9 @@ SET TERM ;^ commit; SHOW PROCEDURE test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """Procedure text: +expected_stdout = """Procedure text: ============================================================================= BEGIN POST_EVENT 'Test'; @@ -57,9 +46,8 @@ P10 INPUT CHAR(40) P11 INPUT VARCHAR(60) P12 INPUT CHAR(70) CHARACTER SET ISO8859_1""" -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/procedure/create/test_03.py b/tests/functional/procedure/create/test_03.py index e5238ab0..36fa1750 100644 --- a/tests/functional/procedure/create/test_03.py +++ b/tests/functional/procedure/create/test_03.py @@ -1,29 +1,18 @@ #coding:utf-8 -# -# id: functional.procedure.create.03 -# title: CREATE PROCEDURE - Output paramaters -# decription: CREATE PROCEDURE - Output paramaters -# -# Dependencies: -# CREATE DATABASE -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.procedure.create.create_procedure_03 + +""" +ID: procedure.create-03 +TITLE: CREATE PROCEDURE - Output paramaters +DESCRIPTION: +FBTEST: functional.procedure.create.03 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SET TERM ^; +test_script = """SET TERM ^; CREATE PROCEDURE test RETURNS( p1 SMALLINT, p2 INTEGER, p3 FLOAT, p4 DOUBLE PRECISION, p5 DECIMAL(9,3), p6 NUMERIC(10,4), p7 DATE, p8 TIME, p9 TIMESTAMP, p10 CHAR(40), p11 VARCHAR(60), p12 NCHAR(70)) @@ -46,9 +35,9 @@ SET TERM ;^ commit; SHOW PROCEDURE test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """Procedure text: +expected_stdout = """Procedure text: ============================================================================= BEGIN p1=1; @@ -79,9 +68,8 @@ P10 OUTPUT CHAR(40) P11 OUTPUT VARCHAR(60) P12 OUTPUT CHAR(70) CHARACTER SET ISO8859_1""" -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/procedure/create/test_04.py b/tests/functional/procedure/create/test_04.py index aa29d1d2..30e65223 100644 --- a/tests/functional/procedure/create/test_04.py +++ b/tests/functional/procedure/create/test_04.py @@ -1,29 +1,18 @@ #coding:utf-8 -# -# id: functional.procedure.create.04 -# title: CREATE PROCEDURE - Output paramaters -# decription: CREATE PROCEDURE - Output paramaters -# -# Dependencies: -# CREATE DATABASE -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.procedure.create.create_procedure_04 + +""" +ID: procedure.create-04 +TITLE: CREATE PROCEDURE - Output paramaters +DESCRIPTION: +FBTEST: functional.procedure.create.04 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SET TERM ^; +test_script = """SET TERM ^; CREATE PROCEDURE test AS DECLARE VARIABLE p1 SMALLINT; @@ -56,9 +45,9 @@ SET TERM ;^ commit; SHOW PROCEDURE test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """Procedure text: +expected_stdout = """Procedure text: ============================================================================= DECLARE VARIABLE p1 SMALLINT; DECLARE VARIABLE p2 INTEGER; @@ -88,9 +77,8 @@ BEGIN END =============================================================================""" -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/procedure/create/test_05.py b/tests/functional/procedure/create/test_05.py index 3d8ff454..27b48d25 100644 --- a/tests/functional/procedure/create/test_05.py +++ b/tests/functional/procedure/create/test_05.py @@ -1,33 +1,22 @@ #coding:utf-8 -# -# id: functional.procedure.create.05 -# title: CREATE PROCEDURE - PSQL Stataments -# decription: CREATE PROCEDURE - PSQL Stataments -# -# Dependencies: -# CREATE DATABASE -# CREATE EXCEPTION -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.procedure.create.create_procedure_05 + +""" +ID: procedure.create-05 +TITLE: CREATE PROCEDURE - PSQL Stataments +DESCRIPTION: +FBTEST: functional.procedure.create.05 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE EXCEPTION test 'test exception'; +init_script = """CREATE EXCEPTION test 'test exception'; CREATE TABLE tb(id INT, text VARCHAR(32)); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """SET TERM ^; +test_script = """SET TERM ^; CREATE PROCEDURE dummy (id INT) AS BEGIN id=id; @@ -77,9 +66,9 @@ SET TERM ;^ commit; SHOW PROCEDURE test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """Procedure text: +expected_stdout = """Procedure text: ============================================================================= DECLARE VARIABLE p1 SMALLINT; BEGIN @@ -116,9 +105,8 @@ BEGIN END =============================================================================""" -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/procedure/create/test_06.py b/tests/functional/procedure/create/test_06.py index 71bed281..a55398b7 100644 --- a/tests/functional/procedure/create/test_06.py +++ b/tests/functional/procedure/create/test_06.py @@ -1,29 +1,18 @@ #coding:utf-8 -# -# id: functional.procedure.create.06 -# title: CREATE PROCEDURE - PSQL Stataments - SUSPEND -# decription: CREATE PROCEDURE - PSQL Stataments - SUSPEND -# -# Dependencies: -# CREATE DATABASE -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.procedure.create.create_procedure_06 + +""" +ID: procedure.create-06 +TITLE: CREATE PROCEDURE - PSQL Stataments - SUSPEND +DESCRIPTION: +FBTEST: functional.procedure.create.06 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SET TERM ^; +test_script = """SET TERM ^; CREATE PROCEDURE test RETURNS(id INT)AS BEGIN ID=4; @@ -35,9 +24,9 @@ SET TERM ;^ commit; SHOW PROCEDURE test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """Procedure text: +expected_stdout = """Procedure text: ============================================================================= BEGIN @@ -50,9 +39,8 @@ END Parameters: ID OUTPUT INTEGER""" -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/procedure/create/test_07.py b/tests/functional/procedure/create/test_07.py index 8fbdaa5e..c506f065 100644 --- a/tests/functional/procedure/create/test_07.py +++ b/tests/functional/procedure/create/test_07.py @@ -1,26 +1,16 @@ #coding:utf-8 -# -# id: functional.procedure.create.07 -# title: CREATE PROCEDURE - try create SP with same name -# decription: CREATE PROCEDURE - try create SP with same name -# -# Dependencies: -# CREATE DATABASE -# CREATE PROCEDURE -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.procedure.create.create_procedure_07 + +""" +ID: procedure.create-07 +TITLE: CREATE PROCEDURE - try create SP with same name +DESCRIPTION: +FBTEST: functional.procedure.create.07 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """SET TERM ^; +init_script = """SET TERM ^; CREATE PROCEDURE test RETURNS(id INT)AS BEGIN ID=4; @@ -29,9 +19,9 @@ END ^ SET TERM ;^ commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """SET TERM ^; +test_script = """SET TERM ^; CREATE PROCEDURE test RETURNS(id INT)AS BEGIN ID=5; @@ -39,16 +29,15 @@ BEGIN END ^ SET TERM ;^""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stderr_1 = """Statement failed, SQLSTATE = 42000 +expected_stderr = """Statement failed, SQLSTATE = 42000 unsuccessful metadata update -CREATE PROCEDURE TEST failed -Procedure TEST already exists""" @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/procedure/create/test_08.py b/tests/functional/procedure/create/test_08.py index 00d51cc5..514eb047 100644 --- a/tests/functional/procedure/create/test_08.py +++ b/tests/functional/procedure/create/test_08.py @@ -1,47 +1,35 @@ #coding:utf-8 -# -# id: functional.procedure.create.08 -# title: CREATE PROCEDURE - COMMIT in SP is not alowed -# decription: CREATE PROCEDURE - COMMIT in SP is not alowed -# -# Dependencies: -# CREATE DATABASE -# tracker_id: -# min_versions: [] -# versions: 2.5.0 -# qmid: functional.procedure.create.create_procedure_08 + +""" +ID: procedure.create-08 +TITLE: CREATE PROCEDURE - COMMIT in SP is not alowed +DESCRIPTION: +FBTEST: functional.procedure.create.08 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SET TERM ^; +test_script = """SET TERM ^; CREATE PROCEDURE test RETURNS(id INT)AS BEGIN COMMIT; END ^ SET TERM ;^""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stderr_1 = """Statement failed, SQLSTATE = 42000 +expected_stderr = """Statement failed, SQLSTATE = 42000 Dynamic SQL Error -SQL error code = -104 -Token unknown - line 3, column 3 -COMMIT""" -@pytest.mark.version('>=2.5.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/procedure/create/test_15.py b/tests/functional/procedure/create/test_15.py index d72d24ca..8e4c441b 100644 --- a/tests/functional/procedure/create/test_15.py +++ b/tests/functional/procedure/create/test_15.py @@ -1,22 +1,18 @@ #coding:utf-8 -# -# id: functional.procedure.create.15 -# title: COLLATE IN STORED PROCEDURE -# decription: -# tracker_id: CORE-684 -# min_versions: [] -# versions: 2.1 -# qmid: functional.procedure.create.create_procedure_15 + +""" +ID: procedure.create-09 +ISSUE: 1049 +TITLE: COLLATE IN STORED PROCEDURE +DESCRIPTION: +FBTEST: functional.procedure.create.15 +JIRA: CORE-684 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """SET TERM !!; +init_script = """SET TERM !!; CREATE PROCEDURE NEW_PROCEDURE (NOM1 VARCHAR(20) CHARACTER SET ISO8859_1 COLLATE FR_FR) RETURNS (NOM3 VARCHAR(20) CHARACTER SET ISO8859_1 COLLATE ISO8859_1) AS @@ -28,19 +24,16 @@ BEGIN END !! SET TERM ;!! COMMIT; - - - """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """SHOW PROCEDURE NEW_PROCEDURE; +test_script = """SHOW PROCEDURE NEW_PROCEDURE; SELECT * FROM NEW_PROCEDURE('TEST');""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """Procedure text: +expected_stdout = """Procedure text: ============================================================================= DECLARE VARIABLE NOM2 VARCHAR(20) CHARACTER SET ISO8859_1 COLLATE FR_CA; BEGIN @@ -58,9 +51,8 @@ NOM3 TEST """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/procedure/create/test_16.py b/tests/functional/procedure/create/test_16.py index d9c03523..b972f152 100644 --- a/tests/functional/procedure/create/test_16.py +++ b/tests/functional/procedure/create/test_16.py @@ -1,27 +1,18 @@ #coding:utf-8 -# -# id: functional.procedure.create.16 -# title: Type Flaq for Stored Procedures -# decription: -# Checked on: -# 2.5.9.27126: OK, 0.579s. -# 3.0.5.33086: OK, 1.219s. -# 4.0.0.1378: OK, 8.219s. -# -# tracker_id: CORE-779 -# min_versions: [] -# versions: 2.5 -# qmid: functional.procedure.create.create_procedure_16 + +""" +ID: procedure.create-10 +ISSUE: 1161 +TITLE: Type Flag for Stored Procedures +DESCRIPTION: +FBTEST: functional.procedure.create.16 +JIRA: CORE-779 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None - -substitutions_1 = [] - -init_script_1 = """ +init_script = """ set term ^; create or alter procedure with_suspend (nom1 varchar(20) character set iso8859_1 collate fr_fr) returns (nom3 varchar(20) character set iso8859_1 collate iso8859_1) as @@ -31,7 +22,7 @@ init_script_1 = """ nom3=nom2; suspend; end ^ - + create or alter procedure no_suspend returns(p1 smallint) as begin p1=1; @@ -40,19 +31,19 @@ init_script_1 = """ commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ +test_script = """ set list on; select p.rdb$procedure_name, p.rdb$procedure_type from rdb$procedures p where upper(p.rdb$procedure_name) in ( upper('with_suspend'), upper('no_suspend') ) - order by 1; + order by 1; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ RDB$PROCEDURE_NAME NO_SUSPEND RDB$PROCEDURE_TYPE 2 @@ -60,9 +51,8 @@ expected_stdout_1 = """ RDB$PROCEDURE_TYPE 1 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/replication/test_blob_not_found_in_rw_replica_if_target_row_exists.py b/tests/functional/replication/test_blob_not_found_in_rw_replica_if_target_row_exists.py index 32b78c75..e3317ece 100644 --- a/tests/functional/replication/test_blob_not_found_in_rw_replica_if_target_row_exists.py +++ b/tests/functional/replication/test_blob_not_found_in_rw_replica_if_target_row_exists.py @@ -1,153 +1,155 @@ #coding:utf-8 -# -# id: tests.functional.replication.blob_not_found_in_rw_replica_if_target_row_exists -# title: Error "BLOB is not found" while replication converts INSERT into UPDATE for a conflicting record -# decription: -# See: https://github.com/FirebirdSQL/firebird/issues/7070 -# -# Test temporary changes mode of replica using external call: gfix -replica read_write ... -# Then we create table TEST(ID int, b blob) on master, without adding records in it, and WAIT -# until this table will appear on replica DB. -# Maximal waiting time is limited by variable MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG). -# Message starting with phrase 'POINT-1A ...' is printed when this table appears in replica DB. -# -# NB. Despite that we can create such table at the same time on replica, this ticket issue can be -# reproduced only when we make table on master and wait until it will be replicated on replica. -# -# After this we: -# * add one record with ID = 1 and non-empty blob into this table on REPLICA database and do commit. -# * add record with the same ID = 1 on MASTER database and do commit. -# -# Message "WARNING: Record being inserted into table TEST already exists, updating instead" must appear -# in the replication log at this point but after that message about successfully replicated segment must also be. -# Message 'ERROR: Blob ... is not found for table TEST' must NOT appear. -# If these conditions are met then script issues message starting with 'Point-1B ...' -# -# Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). -# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). -# -# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). -# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, -# thus metadata difference must not be issued. -# -# #################### -# ### CRUCIAL NOTE ### -# #################### -# Currently, 25.06.2021, there is bug in FB 4.x and 5.x which can be seen on SECOND run of this test: message with text -# "ERROR: Record format with length 68 is not found for table TEST" will appear in it after inserting 1st record in master. -# The reason of that is "dirty" pages that remain in RDB$RELATION_FIELDS both on master and replica after dropping table. -# Following query show different data that appear in replica DB on 1st and 2nd run (just after table was created on master): -# ======= -# set blobdisplay 6; -# select rdb$descriptor as fmt_descr -# from rdb$formats natural join rdb$relations where rdb$relation_name = 'TEST'; -# ======= -# This bug was explained by dimitr, see letters 25.06.2021 11:49 and 25.06.2021 16:56. -# It will be fixed later. -# -# The only workaround to solve this problem is to make SWEEP after all DB objects have been dropped. -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# !NB! BOTH master and replica must be cleaned up by sweep! -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# -# ################ -# ### N O T E ### -# ################ -# Test assumes that master and replica DB have been created beforehand. -# Also, it assumes that %FB_HOME% -# eplication.conf has been prepared with apropriate parameters for replication. -# Particularly, name of directories and databases must have info about checked FB major version and ServerMode. -# * verbose = true // in order to find out line with message that required segment was replicated -# * section for master database with specified parameters: -# journal_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.journal" -# journal_archive_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# journal_archive_command = "copy $(pathname) $(archivepathname)" -# journal_archive_timeout = 10 -# * section for replica database with specified parameter: -# journal_source_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# -# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: -# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) -# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) -# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) -# -# These two databases must NOT be dropped in any of tests related to replication! -# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode -# and make cleanup after it, i.e. when all tests will be completed. -# -# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. -# -# Temporary comment. For debug purpoces: -# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); -# 2) copy file %fbt-repo% ests -# unctional abloidatches\\setup-fb-for-replication.bat.txt -# to some place and rename it "*.bat"; -# 3) open this .bat in editor and asjust value of 'fbt_repo' variable; -# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] -# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: -# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc -# 5) batch 'setup-fb-for-replication.bat' will: -# * stop selected FB instance -# * create test databases (in !fbt_repo! mp\\); -# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); -# * replace %fb_home% -# eplication.conf with apropriate -# * start selected FB instance -# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): -# %fpt_repo% -# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc -# -# Confirmed bug on 4.0.1.2682 and 5.0.0.338, got in the replication.log: -# ERROR: Blob 128.0 is not found for table TEST -# Checked on: -# 4.0.1.2691 SS/CS (32.3s/33.9s) -# 5.0.0.351 SS/CS (28.5s/35.3s) -# -# -# tracker_id: -# min_versions: ['4.0.1'] -# versions: 4.0.1 -# qmid: None + +""" +ID: replication.blob_not_found_in_rw_replica_if_target_row_exists +ISSUE: 7070 +TITLE: Error "BLOB is not found" while replication converts INSERT into UPDATE for a conflicting record +DESCRIPTION: + Test temporary changes mode of replica using external call: gfix -replica read_write ... + Then we create table TEST(ID int, b blob) on master, without adding records in it, and WAIT + until this table will appear on replica DB. + Maximal waiting time is limited by variable MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG). + Message starting with phrase 'POINT-1A ...' is printed when this table appears in replica DB. + + NB. Despite that we can create such table at the same time on replica, this ticket issue can be + reproduced only when we make table on master and wait until it will be replicated on replica. + + After this we: + * add one record with ID = 1 and non-empty blob into this table on REPLICA database and do commit. + * add record with the same ID = 1 on MASTER database and do commit. + + Message "WARNING: Record being inserted into table TEST already exists, updating instead" must appear + in the replication log at this point but after that message about successfully replicated segment must also be. + Message 'ERROR: Blob ... is not found for table TEST' must NOT appear. + If these conditions are met then script issues message starting with 'Point-1B ...' + + Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). + After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). + + Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). + The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, + thus metadata difference must not be issued. + + #################### + ### CRUCIAL NOTE ### + #################### + Currently, 25.06.2021, there is bug in FB 4.x and 5.x which can be seen on SECOND run of this test: message with text + "ERROR: Record format with length 68 is not found for table TEST" will appear in it after inserting 1st record in master. + The reason of that is "dirty" pages that remain in RDB$RELATION_FIELDS both on master and replica after dropping table. + Following query show different data that appear in replica DB on 1st and 2nd run (just after table was created on master): + ======= + set blobdisplay 6; + select rdb$descriptor as fmt_descr + from rdb$formats natural join rdb$relations where rdb$relation_name = 'TEST'; + ======= + This bug was explained by dimitr, see letters 25.06.2021 11:49 and 25.06.2021 16:56. + It will be fixed later. + + The only workaround to solve this problem is to make SWEEP after all DB objects have been dropped. + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + !NB! BOTH master and replica must be cleaned up by sweep! + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + ################ + ### N O T E ### + ################ + Test assumes that master and replica DB have been created beforehand. + Also, it assumes that %FB_HOME%/replication.conf has been prepared with apropriate parameters for replication. + Particularly, name of directories and databases must have info about checked FB major version and ServerMode. + * verbose = true // in order to find out line with message that required segment was replicated + * section for master database with specified parameters: + journal_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.journal" + journal_archive_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + journal_archive_command = "copy $(pathname) $(archivepathname)" + journal_archive_timeout = 10 + * section for replica database with specified parameter: + journal_source_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + + Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: + 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) + 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) + NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) + + These two databases must NOT be dropped in any of tests related to replication! + They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode + and make cleanup after it, i.e. when all tests will be completed. + + NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. + + Temporary comment. For debug purpoces: + 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); + 2) copy file %fbt-repo%/tests/functional/tabloid/batches/setup-fb-for-replication.bat.txt + to some place and rename it "*.bat"; + 3) open this .bat in editor and asjust value of 'fbt_repo' variable; + 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] + where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: + DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc + 5) batch 'setup-fb-for-replication.bat' will: + * stop selected FB instance + * create test databases (in !fbt_repo!/tmp); + * prepare journal/archive sub-folders for replication (also in !fbt_repo!/tmp); + * replace %fb_home%/replication.conf with apropriate + * start selected FB instance + 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): + %fpt_repo%/fbt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc + + Confirmed bug on 4.0.1.2682 and 5.0.0.338, got in the replication.log: + ERROR: Blob 128.0 is not found for table TEST + Checked on: + 4.0.1.2691 SS/CS (32.3s/33.9s) + 5.0.0.351 SS/CS (28.5s/35.3s) +FBTEST: tests.functional.replication.blob_not_found_in_rw_replica_if_target_row_exists +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0.1 -# resources: None +substitutions = [('Start removing objects in:.*', 'Start removing objects'), + ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), + ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR')] -substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR')] +db = db_factory() -init_script_1 = """""" +act = python_act('db', substitutions=substitutions) -db_1 = db_factory(sql_dialect=3, init=init_script_1) +expected_stdout = """ + POINT-1A FOUND message about replicated segment. + POINT-1B FOUND message about replicated segment. + Start removing objects + Finish. Total objects removed + POINT-2 FOUND message about replicated segment. +""" + +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=4.0.1') +@pytest.mark.platform('Windows') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import subprocess # import re # import difflib # import shutil # import time -# +# # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password -# +# # ##################################### # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65 # ##################################### -# +# # svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) # FB_HOME = svc.get_home_directory() # svc.close() -# +# # engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float # fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' -# +# # cur = db_conn.cursor() # cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") # server_mode = 'XX' @@ -159,36 +161,36 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # elif r[0] == 'Classic': # server_mode = 'CS' # cur.close() -# +# # # 'fbt-main.fb50.ss.fdb' etc: # db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) # db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') -# +# # runProgram('gfix', ['-replica', 'read_write', 'localhost:' + db_repl]) -# +# # # Folders for journalling and archieving segments. # repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) # repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) -# +# # db_conn.close() -# +# # #-------------------------------------------- -# +# # def flush_and_close( file_handle ): # # https://docs.python.org/2/library/os.html#os.fsync # # If you're starting with a Python file object f, # # first do f.flush(), and # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # global os -# +# # file_handle.flush() # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # # otherwise: "OSError: [Errno 9] Bad file descriptor"! # os.fsync(file_handle.fileno()) # file_handle.close() -# +# # #-------------------------------------------- -# +# # def cleanup( f_names_list ): # global os # for i in range(len( f_names_list )): @@ -199,22 +201,22 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # else: # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # del_name = None -# +# # if del_name and os.path.isfile( del_name ): # os.remove( del_name ) -# +# # #-------------------------------------------- -# +# # def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): -# +# # global re # global difflib # global time -# +# # replold_lines = [] # with open( os.path.join(fb_home,'replication.log'), 'r') as f: # replold_lines = f.readlines() -# +# # con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) # cur = con.cursor() # cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") @@ -222,150 +224,150 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # last_generated_repl_segment = r[0] # cur.close() # con.close() -# +# # #print('last_generated_repl_segment:', last_generated_repl_segment) -# +# # # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file # p_expected=re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) # p_unexpected = re.compile( '\\+\\s+ERROR: Blob .* not found', re.IGNORECASE) -# +# # found_required_message, found_unexpected_message = False, False # for i in range(0,max_allowed_time_for_wait): # time.sleep(1) -# +# # # Get content of fb_home replication.log _after_ isql finish: # f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') # diff_data = difflib.unified_diff( -# replold_lines, +# replold_lines, # f_repllog_new.readlines() # ) # f_repllog_new.close() -# +# # for k,d in enumerate(diff_data): # if p_unexpected.search(d): # print( (prefix_msg + ' ' if prefix_msg else '') + 'UNEXPECTED message encountered:' ) # print(d) # found_unexpected_message = True # break -# +# # if p_expected.search(d): # print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' ) # found_required_message = True # break -# +# # if found_required_message or found_unexpected_message: # break -# +# # if not found_required_message: # print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait) -# -# +# +# # #-------------------------------------------- -# +# # sql_ddl = ''' set bail on; # set list on; # --set blob all; # set blobdisplay 6; -# +# # recreate table test(id int primary key using index test_pk, bindata blob); # commit; -# +# # -- for debug only: # select rdb$get_context('SYSTEM', 'DB_NAME'), rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') as last_generated_repl_segment from rdb$database; -# select +# select # RDB$DESCRIPTOR as fmt_descr # from RDB$FORMATS natural join RDB$RELATIONS # where RDB$RELATION_NAME = 'TEST'; # quit; # ''' % locals() -# -# +# +# # f_sql_ddl = open( os.path.join(context['temp_directory'],'tmp_gh_7070_init.sql'), 'w') # f_sql_ddl.write(sql_ddl) # flush_and_close( f_sql_ddl ) -# +# # f_ddl_log = open( ''.join( (os.path.splitext(f_sql_ddl.name)[0], '.log' ) ), 'w') # f_ddl_err = open( ''.join( (os.path.splitext(f_sql_ddl.name)[0], '.err' ) ), 'w') # subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_ddl.name ], stdout = f_ddl_log, stderr = f_ddl_err) # flush_and_close( f_ddl_log ) # flush_and_close( f_ddl_err ) -# +# # ############################################################################## # ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### # ############################################################################## # wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1A' ) -# +# # con_repl = fdb.connect( dsn = 'localhost:' + db_repl) # cur_repl = con_repl.cursor() # cur_repl.execute('insert into test(id,bindata) values(?, ?)', (2, 'qwerty-on-replica')) # con_repl.commit() # cur_repl.close() # con_repl.close() -# +# # con_main = fdb.connect( dsn = 'localhost:' + db_main) # cur_main = con_main.cursor() # cur_main.execute('insert into test(id,bindata) values(?, ?)', (2, 'qwerty-on-master')) # con_main.commit() # cur_main.close() # con_main.close() -# +# # ############################################################################## # ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### # ############################################################################## # wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1B' ) -# -# +# +# # runProgram('gfix', ['-replica', 'read_only', 'localhost:' + db_repl]) -# +# # # return initial state of master DB: # # remove all DB objects (tables, views, ...): # # -------------------------------------------- # sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') -# +# # f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects-gh_7070.log'), 'w') # f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) # flush_and_close(f_clean_log) # flush_and_close(f_clean_err) -# +# # with open(f_clean_err.name,'r') as f: # for line in f: # print('UNEXPECTED STDERR in cleanup SQL: ' + line) # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# +# # with open(f_clean_log.name,'r') as f: # for line in f: # # show number of dropped objects # print(line) -# -# +# +# # ############################################################################## # ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### # ############################################################################## # wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) -# +# # f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_gh_7070.sql'), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) # flush_and_close( f_main_meta_sql ) -# +# # f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_gh_7070.sql'), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) # flush_and_close( f_repl_meta_sql ) -# +# # db_main_meta=open(f_main_meta_sql.name, 'r') # db_repl_meta=open(f_repl_meta_sql.name, 'r') -# +# # diffmeta = ''.join(difflib.unified_diff( -# db_main_meta.readlines(), +# db_main_meta.readlines(), # db_repl_meta.readlines() # )) # db_main_meta.close() # db_repl_meta.close() -# +# # f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_gh_7070.txt'), 'w', buffering = 0) # f_meta_diff.write(diffmeta) # flush_and_close( f_meta_diff ) -# +# # # Following must issue only TWO rows: # # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ # # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ @@ -374,8 +376,8 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # for line in f: # if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): # print('UNEXPECTED METADATA DIFF.: ' + line) -# -# +# +# # ###################### # ### A C H T U N G ### # ###################### @@ -384,27 +386,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # runProgram('gfix', ['-sweep', 'localhost:' + db_repl]) # runProgram('gfix', ['-sweep', 'localhost:' + db_main]) # ####################### -# +# # # cleanup: # ########## # cleanup( (f_sql_ddl, f_ddl_log, f_ddl_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) ) -# -# +# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - POINT-1A FOUND message about replicated segment. - POINT-1B FOUND message about replicated segment. - Start removing objects - Finish. Total objects removed - POINT-2 FOUND message about replicated segment. -""" - -@pytest.mark.version('>=4.0.1') -@pytest.mark.platform('Windows') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/replication/test_blob_segments_longer_32kb_are_not_replicated.py b/tests/functional/replication/test_blob_segments_longer_32kb_are_not_replicated.py index 9144043a..935cc713 100644 --- a/tests/functional/replication/test_blob_segments_longer_32kb_are_not_replicated.py +++ b/tests/functional/replication/test_blob_segments_longer_32kb_are_not_replicated.py @@ -1,123 +1,126 @@ #coding:utf-8 -# -# id: functional.replication.blob_segments_longer_32kb_are_not_replicated -# title: Problem with replication of BLOB segments longer than 32KB -# decription: -# https://github.com/FirebirdSQL/firebird/issues/6893 -# -# Test creates table with blob column and performs trivial scenario: -# insert into test(id, b) values( 1, ); -# -# After this we do connect and query ID of last generated segment by querying REPLICATION_SEQUENCE variable -# from SYSTEM context namespace. -# -# Then we wait until replica becomes actual to master, and this delay will last no more then threshold -# that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). -# During this delay, we check every second for replication log and search there line with number of last generated -# segment (which was replicated and deleting finally). -# We can assume that replication finished OK only when such line is found see ('POINT-1'). -# -# Then we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). -# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). -# -# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). -# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, -# thus metadata difference must not be issued. -# -# ################ -# ### N O T E ### -# ################ -# Test assumes that master and replica DB have been created beforehand. -# Also, it assumes that %FB_HOME% -# eplication.conf has been prepared with apropriate parameters for replication. -# Particularly, name of directories and databases must have info about checked FB major version and ServerMode. -# * verbose = true // in order to find out line with message that required segment was replicated -# * section for master database with specified parameters: -# journal_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.journal" -# journal_archive_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# journal_archive_command = "copy $(pathname) $(archivepathname)" -# journal_archive_timeout = 10 -# * section for replica database with specified parameter: -# journal_source_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# -# Master and replica databases must be created in "%FBT_REPO% mp" directory and have names like these: -# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) -# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) -# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) -# -# These two databases must NOT be dropped in any of tests related to replication! -# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode -# and make cleanup after it, i.e. when all tests will be completed. -# -# NB. Currently this task presents only in Windows batch, thus test has attribute platform = 'Windows'. -# -# Temporary comment. For debug purpoces: -# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); -# 2) copy file %fbt-repo% ests -# unctional abloidatches\\setup-fb-for-replication.bat.txt -# to some place and rename it "*.bat"; -# 3) open this .bat in editor and asjust value of 'fbt_repo' variable; -# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] -# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: -# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc -# 5) batch 'setup-fb-for-replication.bat' will: -# * stop selected FB instance -# * create test databases (in !fbt_repo! mp\\); -# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); -# * replace %fb_home% -# eplication.conf with apropriate -# * start selected FB instance -# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): -# %fpt_repo% -# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc -# -# Confirmed bug on 5.0.0.88, 4.0.1.2523: record appears on replica but blob will be NULL. -# Checked on: 5.0.0.120, 4.0.1.2547 -- all OK. -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: None + +""" +ID: replication.blob_segments_longer_32kb_are_not_replicated +ISSUE: 6893 +TITLE: Problem with replication of BLOB segments longer than 32KB +DESCRIPTION: + Test creates table with blob column and performs trivial scenario: + insert into test(id, b) values( 1, ); + + After this we do connect and query ID of last generated segment by querying REPLICATION_SEQUENCE variable + from SYSTEM context namespace. + + Then we wait until replica becomes actual to master, and this delay will last no more then threshold + that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). + During this delay, we check every second for replication log and search there line with number of last generated + segment (which was replicated and deleting finally). + We can assume that replication finished OK only when such line is found see ('POINT-1'). + + Then we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). + After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). + + Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). + The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, + thus metadata difference must not be issued. + + ################ + ### N O T E ### + ################ + Test assumes that master and replica DB have been created beforehand. + Also, it assumes that %FB_HOME%/replication.conf has been prepared with apropriate parameters for replication. + Particularly, name of directories and databases must have info about checked FB major version and ServerMode. + * verbose = true // in order to find out line with message that required segment was replicated + * section for master database with specified parameters: + journal_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.journal" + journal_archive_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + journal_archive_command = "copy $(pathname) $(archivepathname)" + journal_archive_timeout = 10 + * section for replica database with specified parameter: + journal_source_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + + Master and replica databases must be created in "%FBT_REPO% mp" directory and have names like these: + 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) + 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) + NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) + + These two databases must NOT be dropped in any of tests related to replication! + They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode + and make cleanup after it, i.e. when all tests will be completed. + + NB. Currently this task presents only in Windows batch, thus test has attribute platform = 'Windows'. + + Temporary comment. For debug purpoces: + 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); + 2) copy file %fbt-repo%/tests/functional/tabloid/batches/setup-fb-for-replication.bat.txt + to some place and rename it "*.bat"; + 3) open this .bat in editor and asjust value of 'fbt_repo' variable; + 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] + where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: + DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc + 5) batch 'setup-fb-for-replication.bat' will: + * stop selected FB instance + * create test databases (in !fbt_repo!/tmp); + * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); + * replace %fb_home%/replication.conf with apropriate + * start selected FB instance + 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): + %fpt_repo%/fbt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc + + Confirmed bug on 5.0.0.88, 4.0.1.2523: record appears on replica but blob will be NULL. + Checked on: 5.0.0.120, 4.0.1.2547 -- all OK. +FBTEST: functional.replication.blob_segments_longer_32kb_are_not_replicated +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +substitutions = [('Start removing objects in:.*', 'Start removing objects'), + ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), + ('.* CREATE DATABASE .*', '')] -substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', '')] +db = db_factory() -init_script_1 = """""" +act = python_act('db', substitutions=substitutions) -db_1 = db_factory(sql_dialect=3, init=init_script_1) +expected_stdout = """ + POINT-1 FOUND message about replicated segment. + REPLICATED_BLOB_OCTET_LEN 65533 + Start removing objects + Finish. Total objects removed + POINT-2 FOUND message about replicated segment. +""" + +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=4.0') +@pytest.mark.platform('Windows') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import subprocess # import re # import difflib # import shutil # import time -# +# # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password -# +# # ##################################### # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65 # ##################################### -# +# # svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) # FB_HOME = svc.get_home_directory() # svc.close() -# +# # engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float # fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' -# +# # cur = db_conn.cursor() # cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") # server_mode = 'XX' @@ -129,34 +132,34 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # elif r[0] == 'Classic': # server_mode = 'CS' # cur.close() -# +# # # 'fbt-main.fb50.ss.fdb' etc: # db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) # db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') -# +# # # Folders for journalling and archieving segments. # repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) # repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) -# +# # db_conn.close() -# +# # #-------------------------------------------- -# +# # def flush_and_close( file_handle ): # # https://docs.python.org/2/library/os.html#os.fsync # # If you're starting with a Python file object f, # # first do f.flush(), and # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # global os -# +# # file_handle.flush() # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # # otherwise: "OSError: [Errno 9] Bad file descriptor"! # os.fsync(file_handle.fileno()) # file_handle.close() -# +# # #-------------------------------------------- -# +# # def cleanup( f_names_list ): # global os # for i in range(len( f_names_list )): @@ -167,22 +170,22 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # else: # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # del_name = None -# +# # if del_name and os.path.isfile( del_name ): # os.remove( del_name ) -# +# # #-------------------------------------------- -# +# # def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): -# +# # global re # global difflib # global time -# +# # replold_lines = [] # with open( os.path.join(fb_home,'replication.log'), 'r') as f: # replold_lines = f.readlines() -# +# # con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) # cur = con.cursor() # cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") @@ -190,127 +193,127 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # last_generated_repl_segment = r[0] # cur.close() # con.close() -# +# # #print('last_generated_repl_segment:', last_generated_repl_segment) -# +# # # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file # p=re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) -# +# # found_required_message = False # for i in range(0,max_allowed_time_for_wait): # time.sleep(1) -# +# # # Get content of fb_home replication.log _after_ isql finish: # f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') # diff_data = difflib.unified_diff( -# replold_lines, +# replold_lines, # f_repllog_new.readlines() # ) # f_repllog_new.close() -# +# # for k,d in enumerate(diff_data): # if p.search(d): # print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' ) # found_required_message = True # break -# +# # if found_required_message: # break -# +# # if not found_required_message: # print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait) -# +# # #-------------------------------------------- -# +# # sql_ddl = ''' set bail on; # set list on; -# +# # recreate table test(id int primary key, b blob); # insert into test(id, b) values(1, lpad('',65533,gen_uuid())); # commit; -# +# # select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') as last_generated_repl_segment from rdb$database; # quit; # ''' % locals() -# -# +# +# # f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_gh_6893_test.sql'), 'w') # f_sql_chk.write(sql_ddl) # flush_and_close( f_sql_chk ) -# +# # f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w') # f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w') # subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err) # flush_and_close( f_sql_log ) # flush_and_close( f_sql_err ) -# +# # last_generated_repl_segment = 0 -# +# # with open(f_sql_err.name,'r') as f: # for line in f: # print('UNEXPECTED STDERR in initial SQL: ' + line) # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# +# # if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors -# +# # ############################################################################## # ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### # ############################################################################## # wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' ) -# +# # # runProgram('isql', ['localhost:' + db_repl, '-nod'], "set list on; select count(*) as long_blob_on_replica from test where octet_length(b) > 32768 and id = 1;") # runProgram('isql', ['localhost:' + db_repl, '-nod'], "set list on; select octet_length(t.b) as replicated_blob_octet_len from rdb$database r left join test t on t.id = 1;") -# +# # # return initial state of master DB: # # remove all DB objects (tables, views, ...): # # -------------------------------------------- # sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') -# +# # f_clean_log=open( os.path.join(context['temp_directory'],'tmp_gh_6893_drop-all-db-objects.log'), 'w') # f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) # flush_and_close(f_clean_log) # flush_and_close(f_clean_err) -# +# # with open(f_clean_err.name,'r') as f: # for line in f: # print('UNEXPECTED STDERR in cleanup SQL: ' + line) # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# +# # with open(f_clean_log.name,'r') as f: # for line in f: # # show number of dropped objects # print(line) -# +# # if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors -# +# # ############################################################################## # ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### # ############################################################################## # wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) -# +# # f_main_meta_sql=open( os.path.join(context['temp_directory'],'tmp_gh_6893_db_main_meta.sql'), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) # flush_and_close( f_main_meta_sql ) -# +# # f_repl_meta_sql=open( os.path.join(context['temp_directory'],'tmp_gh_6893_db_repl_meta.sql'), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) # flush_and_close( f_repl_meta_sql ) -# +# # db_main_meta=open(f_main_meta_sql.name, 'r') # db_repl_meta=open(f_repl_meta_sql.name, 'r') -# +# # diffmeta = ''.join(difflib.unified_diff( -# db_main_meta.readlines(), +# db_main_meta.readlines(), # db_repl_meta.readlines() # )) # db_main_meta.close() # db_repl_meta.close() -# +# # f_meta_diff=open( os.path.join(context['temp_directory'],'tmp_gh_6893_db_meta_diff.txt'), 'w', buffering = 0) # f_meta_diff.write(diffmeta) # flush_and_close( f_meta_diff ) -# +# # # Following must issue only TWO rows: # # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ # # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ @@ -319,8 +322,8 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # for line in f: # if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): # print('UNEXPECTED METADATA DIFF.: ' + line) -# -# +# +# # ###################### # ### A C H T U N G ### # ###################### @@ -329,26 +332,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # runProgram('gfix', ['-sweep', 'localhost:' + db_repl]) # runProgram('gfix', ['-sweep', 'localhost:' + db_main]) # ####################### -# +# # # cleanup: # ########## # cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) ) -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - POINT-1 FOUND message about replicated segment. - REPLICATED_BLOB_OCTET_LEN 65533 - Start removing objects - Finish. Total objects removed - POINT-2 FOUND message about replicated segment. -""" - -@pytest.mark.version('>=4.0') -@pytest.mark.platform('Windows') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/replication/test_dblevel_triggers_must_not_fire_on_replica.py b/tests/functional/replication/test_dblevel_triggers_must_not_fire_on_replica.py index 0e266d37..76b96665 100644 --- a/tests/functional/replication/test_dblevel_triggers_must_not_fire_on_replica.py +++ b/tests/functional/replication/test_dblevel_triggers_must_not_fire_on_replica.py @@ -1,457 +1,93 @@ #coding:utf-8 -# -# id: tests.functional.replication.dblevel_triggers_must_not_fire_on_replica -# title: Replica DB must not fire DB-level triggers but their activity on master must be eventually seen in replica. -# decription: -# https://github.com/FirebirdSQL/firebird/issues/6850 -# -# Test creates five DB-level triggers in the master DB (on connect/disconnect; on tx start/commit/rollback). -# Each of them registers apropriate event in the table with name 'log_db_triggers_activity'. -# This table must eventually have five records in BOTH databases (i.e. not only in master, but in replica also). -# After creating metadata we make test connect to master DB to fire these triggers. -# -# Then we wait until replica becomes actual to master, and this delay will last no more then threshold -# that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). -# During this delay, we check every second for replication log and search there line with number of last generated -# segment (which was replicated and deleting finally). -# We can assume that replication finished OK only when such line is found see ('POINT-1'). -# -# After this, we do query master and replica databases and obtain data from 'log_db_triggers_activity' table: it must -# have records about every fired trigger. Content of this table must be identical on master and replica, see queries -# to v_log_db_triggers_activity (both on master and replica DB). -# -# Then we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). -# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). -# -# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). -# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, -# thus metadata difference must not be issued. -# -# ################ -# ### N O T E ### -# ################ -# Test assumes that master and replica DB have been created beforehand. -# Also, it assumes that %FB_HOME% -# eplication.conf has been prepared with apropriate parameters for replication. -# Particularly, name of directories and databases must have info about checked FB major version and ServerMode. -# * verbose = true // in order to find out line with message that required segment was replicated -# * section for master database with specified parameters: -# journal_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.journal" -# journal_archive_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# journal_archive_command = "copy $(pathname) $(archivepathname)" -# journal_archive_timeout = 10 -# * section for replica database with specified parameter: -# journal_source_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# -# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: -# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) -# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) -# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) -# -# These two databases must NOT be dropped in any of tests related to replication! -# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode -# and make cleanup after it, i.e. when all tests will be completed. -# -# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. -# -# Temporary comment. For debug purpoces: -# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); -# 2) copy file %fbt-repo% ests -# unctional abloidatches\\setup-fb-for-replication.bat.txt -# to some place and rename it "*.bat"; -# 3) open this .bat in editor and asjust value of 'fbt_repo' variable; -# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] -# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: -# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc -# 5) batch 'setup-fb-for-replication.bat' will: -# * stop selected FB instance -# * create test databases (in !fbt_repo! mp\\); -# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); -# * replace %fb_home% -# eplication.conf with apropriate -# * start selected FB instance -# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): -# %fpt_repo% -# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc -# -# Checked on: -# 4.0.1.2519 SS: 56.48s, CS: 99.31s -# 5.0.0.82 SS: 20.63s, CS: 21.39s -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: None + +""" +ID: replication.dblevel_triggers_must_not_fire_on_replica +ISSUE: 6850 +TITLE: Replica DB must not fire DB-level triggers but their activity on master must be eventually seen in replica +DESCRIPTION: + Test creates five DB-level triggers in the master DB (on connect/disconnect; on tx start/commit/rollback). + Each of them registers apropriate event in the table with name 'log_db_triggers_activity'. + This table must eventually have five records in BOTH databases (i.e. not only in master, but in replica also). + After creating metadata we make test connect to master DB to fire these triggers. + + Then we wait until replica becomes actual to master, and this delay will last no more then threshold + that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). + During this delay, we check every second for replication log and search there line with number of last generated + segment (which was replicated and deleting finally). + We can assume that replication finished OK only when such line is found see ('POINT-1'). + + After this, we do query master and replica databases and obtain data from 'log_db_triggers_activity' table: it must + have records about every fired trigger. Content of this table must be identical on master and replica, see queries + to v_log_db_triggers_activity (both on master and replica DB). + + Then we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). + After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). + + Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). + The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, + thus metadata difference must not be issued. + + ################ + ### N O T E ### + ################ + Test assumes that master and replica DB have been created beforehand. + Also, it assumes that %FB_HOME%/replication.conf has been prepared with apropriate parameters for replication. + Particularly, name of directories and databases must have info about checked FB major version and ServerMode. + * verbose = true // in order to find out line with message that required segment was replicated + * section for master database with specified parameters: + journal_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.journal" + journal_archive_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + journal_archive_command = "copy $(pathname) $(archivepathname)" + journal_archive_timeout = 10 + * section for replica database with specified parameter: + journal_source_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + + Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: + 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) + 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) + NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) + + These two databases must NOT be dropped in any of tests related to replication! + They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode + and make cleanup after it, i.e. when all tests will be completed. + + NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. + + Temporary comment. For debug purpoces: + 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); + 2) copy file %fbt-repo%/tests/functional/tabloid/batches/setup-fb-for-replication.bat.txt + to some place and rename it "*.bat"; + 3) open this .bat in editor and asjust value of 'fbt_repo' variable; + 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] + where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: + DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc + 5) batch 'setup-fb-for-replication.bat' will: + * stop selected FB instance + * create test databases (in !fbt_repo!/tmp); + * prepare journal/archive sub-folders for replication (also in !fbt_repo!/tmp); + * replace %fb_home%/replication.conf with apropriate + * start selected FB instance + 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): + %fpt_repo%/fbt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc + + Checked on: + 4.0.1.2519 SS: 56.48s, CS: 99.31s + 5.0.0.82 SS: 20.63s, CS: 21.39s +FBTEST: tests.functional.replication.dblevel_triggers_must_not_fire_on_replica +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +substitutions = [('Start removing objects in:.*', 'Start removing objects'), + ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), + ('.* CREATE DATABASE .*', '')] -substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', '')] +db = db_factory() -init_script_1 = """""" +act = python_act('db', substitutions=substitutions) -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import subprocess -# import re -# import difflib -# import shutil -# import time -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# ##################################### -# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65 -# ##################################### -# -# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) -# FB_HOME = svc.get_home_directory() -# svc.close() -# -# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float -# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' -# -# cur = db_conn.cursor() -# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") -# server_mode = 'XX' -# for r in cur: -# if r[0] == 'Super': -# server_mode = 'SS' -# elif r[0] == 'SuperClassic': -# server_mode = 'SC' -# elif r[0] == 'Classic': -# server_mode = 'CS' -# cur.close() -# -# # 'fbt-main.fb50.ss.fdb' etc: -# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) -# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') -# -# # Folders for journalling and archieving segments. -# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) -# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) -# -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for i in range(len( f_names_list )): -# if type(f_names_list[i]) == file: -# del_name = f_names_list[i].name -# elif type(f_names_list[i]) == str: -# del_name = f_names_list[i] -# else: -# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): -# -# global re -# global difflib -# global time -# -# replold_lines = [] -# with open( os.path.join(fb_home,'replication.log'), 'r') as f: -# replold_lines = f.readlines() -# -# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) -# cur = con.cursor() -# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") -# for r in cur: -# last_generated_repl_segment = r[0] -# cur.close() -# con.close() -# -# #print('last_generated_repl_segment:', last_generated_repl_segment) -# -# -# # +IMAGE-PC1 (replica) Fri Jun 11 17:57:01 2021 -# # + Database: C:\\FBTESTING\\QA\\FBT-REPO\\TMP\\FBT-REPL.FB50.FDB -# # + VERBOSE: Added 1 segment(s) to the processing queue -# # + -# # +IMAGE-PC1 (replica) Fri Jun 11 17:57:04 2021 -# # + Database: C:\\FBTESTING\\QA\\FBT-REPO\\TMP\\FBT-REPL.FB50.FDB -# # + VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file -# -# p=re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) -# -# found_required_message = False -# for i in range(0,max_allowed_time_for_wait): -# time.sleep(1) -# -# # Get content of fb_home replication.log _after_ isql finish: -# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') -# diff_data = difflib.unified_diff( -# replold_lines, -# f_repllog_new.readlines() -# ) -# f_repllog_new.close() -# -# for k,d in enumerate(diff_data): -# if p.search(d): -# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' ) -# found_required_message = True -# break -# -# if found_required_message: -# break -# -# if not found_required_message: -# print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait) -# -# #-------------------------------------------- -# -# sql_ddl = ''' set bail on; -# set list on; -# -# select mon$database_name from mon$database; -# -# set term ^; -# execute block as -# begin -# -- Define context variable in order to prevent -# -- DB-level triggers from firing during this execution: -# rdb$set_context('USER_SESSION', 'SKIP_DBLEVEL_TRG','1'); -# end -# ^ -# set term ;^ -# -# -- ::: NB ::: -# -- We can not start this script from 'zero-point', i.e. 'create table ...; create view ... ;' etc, -# -- because it will fail if master or replica DB contain some objects which could remain there -# -- due to fail of some previous test which also had deal with replication and used these databases. -# -- Here we must remove all dependencies and only after this table can be recreated: -# create or alter trigger trg_tx_start on transaction start as begin end; -# create or alter trigger trg_tx_commit on transaction commit as begin end; -# create or alter trigger trg_tx_rollback on transaction rollback as begin end; -# create or alter trigger trg_connect active on connect as begin end; -# create or alter trigger trg_disconnect active on disconnect as begin end; -# create or alter procedure sp_log_dblevel_trg_event as begin end; -# create or alter view v_log_db_triggers_activity as select 1 x from rdb$database; -# -# -- result: no more objects that depend on table 'log_db_triggers_activity', now we can recreate it. -# -# recreate table log_db_triggers_activity ( -# id int generated by default as identity constraint pk_log_db_triggers_activity primary key -# ,dts timestamp default 'now' -# ,att integer default current_connection -# ,trn integer default current_transaction -# ,app varchar(80) -# ,evt varchar(80) -# ); -# -# create or alter view v_log_db_triggers_activity as select * from log_db_triggers_activity; -# -# set term ^; -# create or alter procedure sp_log_dblevel_trg_event ( -# a_event_type varchar(80) -- type of column log_db_triggers_activity.evt -# ,a_working_tx int default null -# ) -# as -# declare v_app varchar(255); -# declare p smallint; -# declare back_slash char(1); -# begin -# v_app = reverse( right(rdb$get_context('SYSTEM','CLIENT_PROCESS'), 80) ); -# back_slash = ascii_char(92); -- backward slash; do NOT specify it literally otherwise Python will handle it as empty string! -# p = maxvalue(position(back_slash in v_app ), position('/' in v_app )); -# v_app = reverse(substring(v_app from 1 for p-1)); -# execute statement( 'insert into v_log_db_triggers_activity( trn, app, evt) values( ?, ?, ? )' ) ( coalesce(:a_working_tx, current_transaction), :v_app, :a_event_type) ; -# -# end -# ^ -# -# create or alter trigger trg_tx_start on transaction start as -# begin -# if (rdb$get_context('USER_SESSION', 'SKIP_DBLEVEL_TRG') is null ) then -# -- execute procedure sp_log_dblevel_trg_event( 'TX_START, TIL=' || coalesce( rdb$get_context('SYSTEM', 'ISOLATION_LEVEL'), '[null]' ) ); -# execute procedure sp_log_dblevel_trg_event( 'TX_START' ); -# end -# ^ -# -# create or alter trigger trg_tx_commit on transaction commit as -# begin -# if (rdb$get_context('USER_SESSION', 'SKIP_DBLEVEL_TRG') is null ) then -# -- execute procedure sp_log_dblevel_trg_event( 'TX_COMMIT, TIL=' || coalesce( rdb$get_context('SYSTEM', 'ISOLATION_LEVEL'), '[null]' ) ); -# execute procedure sp_log_dblevel_trg_event( 'TX_COMMIT' ); -# end -# ^ -# -# create or alter trigger trg_tx_rollback on transaction rollback as -# declare v_current_tx int; -# begin -# v_current_tx = current_transaction; -# if (rdb$get_context('USER_SESSION', 'SKIP_DBLEVEL_TRG') is null ) then -# in autonomous transaction do -# -- execute procedure sp_log_dblevel_trg_event( 'TX_ROLLBACK, TIL=' || coalesce( rdb$get_context('SYSTEM', 'ISOLATION_LEVEL'), '[null]' ), v_current_tx ); -# execute procedure sp_log_dblevel_trg_event( 'TX_ROLLBACK' ); -# end -# ^ -# -# create or alter trigger trg_connect active on connect position 0 as -# begin -# if (rdb$get_context('USER_SESSION', 'SKIP_DBLEVEL_TRG') is null ) then -# execute procedure sp_log_dblevel_trg_event( 'DB_ATTACH' ); -# end -# ^ -# -# create or alter trigger trg_disconnect active on disconnect position 0 as -# declare v_current_tx int; -# begin -# if (rdb$get_context('USER_SESSION', 'SKIP_DBLEVEL_TRG') is null ) then -# execute procedure sp_log_dblevel_trg_event( 'DB_DETACH'); -# end -# ^ -# set term ;^ -# commit; -# -# select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') as last_generated_repl_segment from rdb$database; -# quit; -# ''' % locals() -# -# -# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_repltest_skip_db_trg.sql'), 'w') -# f_sql_chk.write(sql_ddl) -# flush_and_close( f_sql_chk ) -# -# # Get content of FB_HOME replication.log _before_ launching ISQL: -# ############# -# -# replold_lines = [] -# with open( os.path.join(FB_HOME,'replication.log'), 'r') as f: -# replold_lines = f.readlines() -# -# f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w') -# f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w') -# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err) -# flush_and_close( f_sql_log ) -# flush_and_close( f_sql_err ) -# -# last_generated_repl_segment = 0 -# -# with open(f_sql_err.name,'r') as f: -# for line in f: -# print('UNEXPECTED STDERR in initial SQL: ' + line) -# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# -# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors -# -# # Test connect to master DB, just to fire DB-level triggers: -# ########################### -# con1 = fdb.connect( dsn = 'localhost:' + db_main) -# con1.execute_immediate('recreate table test(id int)') -# con1.close() -# -# ############################################################################## -# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### -# ############################################################################## -# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' ) -# -# runProgram('isql', ['localhost:' + db_main, '-nod'], 'set count on; set list on; select evt as main_db_trigger_fired from v_log_db_triggers_activity order by id;') -# runProgram('isql', ['localhost:' + db_repl, '-nod'], 'set count on; set list on; select evt as repl_db_trigger_fired from v_log_db_triggers_activity order by id;') -# -# -# # return initial state of master DB: -# # remove all DB objects (tables, views, ...): -# # -------------------------------------------- -# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') -# -# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects.log'), 'w') -# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') -# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) -# flush_and_close(f_clean_log) -# flush_and_close(f_clean_err) -# -# with open(f_clean_err.name,'r') as f: -# for line in f: -# print('UNEXPECTED STDERR in cleanup SQL: ' + line) -# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# -# with open(f_clean_log.name,'r') as f: -# for line in f: -# # show number of dropped objects -# print(line) -# -# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors -# -# ############################################################################## -# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### -# ############################################################################## -# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) -# -# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta.sql'), 'w') -# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) -# flush_and_close( f_main_meta_sql ) -# -# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta.sql'), 'w') -# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) -# flush_and_close( f_repl_meta_sql ) -# -# db_main_meta=open(f_main_meta_sql.name, 'r') -# db_repl_meta=open(f_repl_meta_sql.name, 'r') -# -# diffmeta = ''.join(difflib.unified_diff( -# db_main_meta.readlines(), -# db_repl_meta.readlines() -# )) -# db_main_meta.close() -# db_repl_meta.close() -# -# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff.txt'), 'w', buffering = 0) -# f_meta_diff.write(diffmeta) -# flush_and_close( f_meta_diff ) -# -# # Following must issue only TWO rows: -# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ -# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ -# # Only thes lines will be suppressed further (see subst. section): -# with open(f_meta_diff.name, 'r') as f: -# for line in f: -# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): -# print('UNEXPECTED METADATA DIFF.: ' + line) -# -# -# # cleanup: -# ########## -# cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) ) -# -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ +expected_stdout = """ POINT-1 FOUND message about replicated segment. MAIN_DB_TRIGGER_FIRED DB_ATTACH @@ -474,10 +110,360 @@ expected_stdout_1 = """ POINT-2 FOUND message about replicated segment. """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=4.0') @pytest.mark.platform('Windows') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# test_script_1 +#--- +# +# import os +# import subprocess +# import re +# import difflib +# import shutil +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# ##################################### +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65 +# ##################################### +# +# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) +# FB_HOME = svc.get_home_directory() +# svc.close() +# +# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float +# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' +# +# cur = db_conn.cursor() +# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") +# server_mode = 'XX' +# for r in cur: +# if r[0] == 'Super': +# server_mode = 'SS' +# elif r[0] == 'SuperClassic': +# server_mode = 'SC' +# elif r[0] == 'Classic': +# server_mode = 'CS' +# cur.close() +# +# # 'fbt-main.fb50.ss.fdb' etc: +# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) +# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') +# +# # Folders for journalling and archieving segments. +# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) +# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for i in range(len( f_names_list )): +# if type(f_names_list[i]) == file: +# del_name = f_names_list[i].name +# elif type(f_names_list[i]) == str: +# del_name = f_names_list[i] +# else: +# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): +# +# global re +# global difflib +# global time +# +# replold_lines = [] +# with open( os.path.join(fb_home,'replication.log'), 'r') as f: +# replold_lines = f.readlines() +# +# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) +# cur = con.cursor() +# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") +# for r in cur: +# last_generated_repl_segment = r[0] +# cur.close() +# con.close() +# +# #print('last_generated_repl_segment:', last_generated_repl_segment) +# +# +# # +IMAGE-PC1 (replica) Fri Jun 11 17:57:01 2021 +# # + Database: C:\\FBTESTING\\QA\\FBT-REPO\\TMP\\FBT-REPL.FB50.FDB +# # + VERBOSE: Added 1 segment(s) to the processing queue +# # + +# # +IMAGE-PC1 (replica) Fri Jun 11 17:57:04 2021 +# # + Database: C:\\FBTESTING\\QA\\FBT-REPO\\TMP\\FBT-REPL.FB50.FDB +# # + VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file +# +# p=re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) +# +# found_required_message = False +# for i in range(0,max_allowed_time_for_wait): +# time.sleep(1) +# +# # Get content of fb_home replication.log _after_ isql finish: +# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') +# diff_data = difflib.unified_diff( +# replold_lines, +# f_repllog_new.readlines() +# ) +# f_repllog_new.close() +# +# for k,d in enumerate(diff_data): +# if p.search(d): +# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' ) +# found_required_message = True +# break +# +# if found_required_message: +# break +# +# if not found_required_message: +# print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait) +# +# #-------------------------------------------- +# +# sql_ddl = ''' set bail on; +# set list on; +# +# select mon$database_name from mon$database; +# +# set term ^; +# execute block as +# begin +# -- Define context variable in order to prevent +# -- DB-level triggers from firing during this execution: +# rdb$set_context('USER_SESSION', 'SKIP_DBLEVEL_TRG','1'); +# end +# ^ +# set term ;^ +# +# -- ::: NB ::: +# -- We can not start this script from 'zero-point', i.e. 'create table ...; create view ... ;' etc, +# -- because it will fail if master or replica DB contain some objects which could remain there +# -- due to fail of some previous test which also had deal with replication and used these databases. +# -- Here we must remove all dependencies and only after this table can be recreated: +# create or alter trigger trg_tx_start on transaction start as begin end; +# create or alter trigger trg_tx_commit on transaction commit as begin end; +# create or alter trigger trg_tx_rollback on transaction rollback as begin end; +# create or alter trigger trg_connect active on connect as begin end; +# create or alter trigger trg_disconnect active on disconnect as begin end; +# create or alter procedure sp_log_dblevel_trg_event as begin end; +# create or alter view v_log_db_triggers_activity as select 1 x from rdb$database; +# +# -- result: no more objects that depend on table 'log_db_triggers_activity', now we can recreate it. +# +# recreate table log_db_triggers_activity ( +# id int generated by default as identity constraint pk_log_db_triggers_activity primary key +# ,dts timestamp default 'now' +# ,att integer default current_connection +# ,trn integer default current_transaction +# ,app varchar(80) +# ,evt varchar(80) +# ); +# +# create or alter view v_log_db_triggers_activity as select * from log_db_triggers_activity; +# +# set term ^; +# create or alter procedure sp_log_dblevel_trg_event ( +# a_event_type varchar(80) -- type of column log_db_triggers_activity.evt +# ,a_working_tx int default null +# ) +# as +# declare v_app varchar(255); +# declare p smallint; +# declare back_slash char(1); +# begin +# v_app = reverse( right(rdb$get_context('SYSTEM','CLIENT_PROCESS'), 80) ); +# back_slash = ascii_char(92); -- backward slash; do NOT specify it literally otherwise Python will handle it as empty string! +# p = maxvalue(position(back_slash in v_app ), position('/' in v_app )); +# v_app = reverse(substring(v_app from 1 for p-1)); +# execute statement( 'insert into v_log_db_triggers_activity( trn, app, evt) values( ?, ?, ? )' ) ( coalesce(:a_working_tx, current_transaction), :v_app, :a_event_type) ; +# +# end +# ^ +# +# create or alter trigger trg_tx_start on transaction start as +# begin +# if (rdb$get_context('USER_SESSION', 'SKIP_DBLEVEL_TRG') is null ) then +# -- execute procedure sp_log_dblevel_trg_event( 'TX_START, TIL=' || coalesce( rdb$get_context('SYSTEM', 'ISOLATION_LEVEL'), '[null]' ) ); +# execute procedure sp_log_dblevel_trg_event( 'TX_START' ); +# end +# ^ +# +# create or alter trigger trg_tx_commit on transaction commit as +# begin +# if (rdb$get_context('USER_SESSION', 'SKIP_DBLEVEL_TRG') is null ) then +# -- execute procedure sp_log_dblevel_trg_event( 'TX_COMMIT, TIL=' || coalesce( rdb$get_context('SYSTEM', 'ISOLATION_LEVEL'), '[null]' ) ); +# execute procedure sp_log_dblevel_trg_event( 'TX_COMMIT' ); +# end +# ^ +# +# create or alter trigger trg_tx_rollback on transaction rollback as +# declare v_current_tx int; +# begin +# v_current_tx = current_transaction; +# if (rdb$get_context('USER_SESSION', 'SKIP_DBLEVEL_TRG') is null ) then +# in autonomous transaction do +# -- execute procedure sp_log_dblevel_trg_event( 'TX_ROLLBACK, TIL=' || coalesce( rdb$get_context('SYSTEM', 'ISOLATION_LEVEL'), '[null]' ), v_current_tx ); +# execute procedure sp_log_dblevel_trg_event( 'TX_ROLLBACK' ); +# end +# ^ +# +# create or alter trigger trg_connect active on connect position 0 as +# begin +# if (rdb$get_context('USER_SESSION', 'SKIP_DBLEVEL_TRG') is null ) then +# execute procedure sp_log_dblevel_trg_event( 'DB_ATTACH' ); +# end +# ^ +# +# create or alter trigger trg_disconnect active on disconnect position 0 as +# declare v_current_tx int; +# begin +# if (rdb$get_context('USER_SESSION', 'SKIP_DBLEVEL_TRG') is null ) then +# execute procedure sp_log_dblevel_trg_event( 'DB_DETACH'); +# end +# ^ +# set term ;^ +# commit; +# +# select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') as last_generated_repl_segment from rdb$database; +# quit; +# ''' % locals() +# +# +# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_repltest_skip_db_trg.sql'), 'w') +# f_sql_chk.write(sql_ddl) +# flush_and_close( f_sql_chk ) +# +# # Get content of FB_HOME replication.log _before_ launching ISQL: +# ############# +# +# replold_lines = [] +# with open( os.path.join(FB_HOME,'replication.log'), 'r') as f: +# replold_lines = f.readlines() +# +# f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w') +# f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w') +# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err) +# flush_and_close( f_sql_log ) +# flush_and_close( f_sql_err ) +# +# last_generated_repl_segment = 0 +# +# with open(f_sql_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in initial SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors +# +# # Test connect to master DB, just to fire DB-level triggers: +# ########################### +# con1 = fdb.connect( dsn = 'localhost:' + db_main) +# con1.execute_immediate('recreate table test(id int)') +# con1.close() +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' ) +# +# runProgram('isql', ['localhost:' + db_main, '-nod'], 'set count on; set list on; select evt as main_db_trigger_fired from v_log_db_triggers_activity order by id;') +# runProgram('isql', ['localhost:' + db_repl, '-nod'], 'set count on; set list on; select evt as repl_db_trigger_fired from v_log_db_triggers_activity order by id;') +# +# +# # return initial state of master DB: +# # remove all DB objects (tables, views, ...): +# # -------------------------------------------- +# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') +# +# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects.log'), 'w') +# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) +# flush_and_close(f_clean_log) +# flush_and_close(f_clean_err) +# +# with open(f_clean_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in cleanup SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# with open(f_clean_log.name,'r') as f: +# for line in f: +# # show number of dropped objects +# print(line) +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) +# +# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_main_meta_sql ) +# +# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_repl_meta_sql ) +# +# db_main_meta=open(f_main_meta_sql.name, 'r') +# db_repl_meta=open(f_repl_meta_sql.name, 'r') +# +# diffmeta = ''.join(difflib.unified_diff( +# db_main_meta.readlines(), +# db_repl_meta.readlines() +# )) +# db_main_meta.close() +# db_repl_meta.close() +# +# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff.txt'), 'w', buffering = 0) +# f_meta_diff.write(diffmeta) +# flush_and_close( f_meta_diff ) +# +# # Following must issue only TWO rows: +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ +# # Only thes lines will be suppressed further (see subst. section): +# with open(f_meta_diff.name, 'r') as f: +# for line in f: +# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): +# print('UNEXPECTED METADATA DIFF.: ' + line) +# +# +# # cleanup: +# ########## +# cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) ) +# +#--- diff --git a/tests/functional/replication/test_ddl_triggers_must_not_fire_on_replica.py b/tests/functional/replication/test_ddl_triggers_must_not_fire_on_replica.py index 503239bf..62122e83 100644 --- a/tests/functional/replication/test_ddl_triggers_must_not_fire_on_replica.py +++ b/tests/functional/replication/test_ddl_triggers_must_not_fire_on_replica.py @@ -2,62 +2,57 @@ # # id: tests.functional.replication.ddl_triggers_must_not_fire_on_replica # title: DDL-triggers must fire only on master DB. -# decription: +# decription: # Test creates all kinds of DDL triggers in the master DB. # Each of them registers apropriate event in the table with name 'log_ddl_triggers_activity'. # After this we create all kinds of DB objects (tables, procedure, function, etc) in master DB to fire these triggers. -# +# # Then we wait until replica becomes actual to master, and this delay will last no more then threshold # that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). # During this delay, we check every second for replication log and search there line with number of last generated # segment (which was replicated and deleting finally). # We can assume that replication finished OK only when such line is found see ('POINT-1'). -# +# # After this, we do following: # 1) compare metadata of master and replica DB, they must be equal (except file names); # 2) obtain data from 'log_ddl_triggers_activity' table: # 2.1) on master it must have record about every DDL-trigger that fired; # 2.2) on replica this table must be EMPTY (bacause DDL triggers must not fire on replica). -# +# # Then we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). # After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). -# +# # Finally, we extract metadata for master and replica after this cleanup and compare them. -# +# # ################ # ### N O T E ### # ################ # Test assumes that master and replica DB have been created beforehand. -# Also, it assumes that %FB_HOME% -# eplication.conf has been prepared with apropriate parameters for replication. +# Also, it assumes that %FB_HOME%/replication.conf has been prepared with apropriate parameters for replication. # Particularly, name of directories and databases must have info about checked FB major version and ServerMode. # * verbose = true // in order to find out line with message that required segment was replicated # * section for master database with specified parameters: -# journal_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.journal" -# journal_archive_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" +# journal_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.journal" +# journal_archive_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" # journal_archive_command = "copy $(pathname) $(archivepathname)" # journal_archive_timeout = 10 # * section for replica database with specified parameter: -# journal_source_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# +# journal_source_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" +# # Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: # 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) # 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) # NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) -# +# # These two databases must NOT be dropped in any of tests related to replication! # They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode # and make cleanup after it, i.e. when all tests will be completed. -# +# # NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. -# +# # Temporary comment. For debug purpoces: # 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); -# 2) copy file %fbt-repo% ests -# unctional abloidatches\\setup-fb-for-replication.bat.txt +# 2) copy file %fbt-repo%/tests/functional/tabloid/batches/setup-fb-for-replication.bat.txt # to some place and rename it "*.bat"; # 3) open this .bat in editor and asjust value of 'fbt_repo' variable; # 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] @@ -65,517 +60,106 @@ # DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc # 5) batch 'setup-fb-for-replication.bat' will: # * stop selected FB instance -# * create test databases (in !fbt_repo! mp\\); -# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); -# * replace %fb_home% -# eplication.conf with apropriate +# * create test databases (in !fbt_repo!/tmp); +# * prepare journal/archive sub-folders for replication (also in !fbt_repo!/tmp); +# * replace %fb_home%/replication.conf with apropriate # * start selected FB instance # 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): -# %fpt_repo% -# bt-run2.bat ddl-triggers-must-not-fire-on-replica.fbt 50ss, etc -# +# %fpt_repo%/fbt-run2.bat ddl-triggers-must-not-fire-on-replica.fbt 50ss, etc +# # Checked on: # 4.0.1.2547 (SS/CS), 5.0.0.120 (SS/CS). -# -# tracker_id: +# +# tracker_id: # min_versions: ['4.0'] # versions: 4.0 # qmid: None +""" +ID: replication.ddl_triggers_must_not_fire_on_replica +TITLE: DDL-triggers must fire only on master DB +DESCRIPTION: + Test creates all kinds of DDL triggers in the master DB. + Each of them registers apropriate event in the table with name 'log_ddl_triggers_activity'. + After this we create all kinds of DB objects (tables, procedure, function, etc) in master DB to fire these triggers. + + Then we wait until replica becomes actual to master, and this delay will last no more then threshold + that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). + During this delay, we check every second for replication log and search there line with number of last generated + segment (which was replicated and deleting finally). + We can assume that replication finished OK only when such line is found see ('POINT-1'). + + After this, we do following: + 1) compare metadata of master and replica DB, they must be equal (except file names); + 2) obtain data from 'log_ddl_triggers_activity' table: + 2.1) on master it must have record about every DDL-trigger that fired; + 2.2) on replica this table must be EMPTY (bacause DDL triggers must not fire on replica). + + Then we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). + After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). + + Finally, we extract metadata for master and replica after this cleanup and compare them. + + ################ + ### N O T E ### + ################ + Test assumes that master and replica DB have been created beforehand. + Also, it assumes that %FB_HOME%/replication.conf has been prepared with apropriate parameters for replication. + Particularly, name of directories and databases must have info about checked FB major version and ServerMode. + * verbose = true // in order to find out line with message that required segment was replicated + * section for master database with specified parameters: + journal_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.journal" + journal_archive_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + journal_archive_command = "copy $(pathname) $(archivepathname)" + journal_archive_timeout = 10 + * section for replica database with specified parameter: + journal_source_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + + Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: + 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) + 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) + NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) + + These two databases must NOT be dropped in any of tests related to replication! + They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode + and make cleanup after it, i.e. when all tests will be completed. + + NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. + + Temporary comment. For debug purpoces: + 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); + 2) copy file %fbt-repo%/tests/functional/tabloid/batches/setup-fb-for-replication.bat.txt + to some place and rename it "*.bat"; + 3) open this .bat in editor and asjust value of 'fbt_repo' variable; + 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] + where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: + DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc + 5) batch 'setup-fb-for-replication.bat' will: + * stop selected FB instance + * create test databases (in !fbt_repo!/tmp); + * prepare journal/archive sub-folders for replication (also in !fbt_repo!/tmp); + * replace %fb_home%/replication.conf with apropriate + * start selected FB instance + 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): + %fpt_repo%/fbt-run2.bat ddl-triggers-must-not-fire-on-replica.fbt 50ss, etc + + Checked on: + 4.0.1.2547 (SS/CS), 5.0.0.120 (SS/CS). +FBTEST: tests.functional.replication.ddl_triggers_must_not_fire_on_replica +""" + import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +substitutions = [('Start removing objects in:.*', 'Start removing objects'), + ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), + ('.* CREATE DATABASE .*', '')] -substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', '')] +db = db_factory(charset='UTF8') -init_script_1 = """""" +act = python_act('db', substitutions=substitutions) -db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import subprocess -# import re -# import difflib -# import shutil -# import time -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# ##################################### -# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 75 -# KEEP_LOGS_FOR_DEBUG = 0 -# ##################################### -# -# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) -# FB_HOME = svc.get_home_directory() -# svc.close() -# -# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float -# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' -# -# cur = db_conn.cursor() -# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") -# server_mode = 'XX' -# for r in cur: -# if r[0] == 'Super': -# server_mode = 'SS' -# elif r[0] == 'SuperClassic': -# server_mode = 'SC' -# elif r[0] == 'Classic': -# server_mode = 'CS' -# cur.close() -# -# # 'fbt-main.fb50.ss.fdb' etc: -# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) -# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') -# -# # Folders for journalling and archieving segments. -# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) -# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) -# -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for i in range(len( f_names_list )): -# if type(f_names_list[i]) == file: -# del_name = f_names_list[i].name -# elif type(f_names_list[i]) == str: -# del_name = f_names_list[i] -# else: -# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): -# -# global re -# global difflib -# global time -# -# replold_lines = [] -# with open( os.path.join(fb_home,'replication.log'), 'r') as f: -# replold_lines = f.readlines() -# -# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) -# cur = con.cursor() -# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") -# for r in cur: -# last_generated_repl_segment = r[0] -# cur.close() -# con.close() -# -# #print('last_generated_repl_segment:', last_generated_repl_segment) -# -# # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file -# p=re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) -# -# found_required_message = False -# for i in range(0,max_allowed_time_for_wait): -# time.sleep(1) -# -# # Get content of fb_home replication.log _after_ isql finish: -# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') -# diff_data = difflib.unified_diff( -# replold_lines, -# f_repllog_new.readlines() -# ) -# f_repllog_new.close() -# -# for k,d in enumerate(diff_data): -# if p.search(d): -# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' ) -# found_required_message = True -# break -# -# if found_required_message: -# break -# -# if not found_required_message: -# print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait) -# -# #-------------------------------------------- -# -# def compare_metadata(db_main, db_repl, nm_suffix = '', keep4dbg = 0): -# -# global subprocess, difflib, flush_and_close, cleanup -# -# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta'+nm_suffix+'.sql'), 'w') -# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) -# flush_and_close( f_main_meta_sql ) -# -# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta'+nm_suffix+'.sql'), 'w') -# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) -# flush_and_close( f_repl_meta_sql ) -# -# db_main_meta=open(f_main_meta_sql.name, 'r') -# db_repl_meta=open(f_repl_meta_sql.name, 'r') -# -# diffmeta = ''.join(difflib.unified_diff( -# db_main_meta.readlines(), -# db_repl_meta.readlines() -# )) -# db_main_meta.close() -# db_repl_meta.close() -# -# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff'+nm_suffix+'.txt'), 'w') -# f_meta_diff.write(diffmeta) -# flush_and_close( f_meta_diff ) -# -# # Following must issue only TWO rows: -# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ -# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ -# # Only thes lines will be suppressed further (see subst. section): -# with open(f_meta_diff.name, 'r') as f: -# for line in f: -# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): -# print('UNEXPECTED METADATA DIFF.: ' + line) -# -# if not keep4dbg: -# cleanup( (f_main_meta_sql,f_repl_meta_sql,f_meta_diff ) ) -# -# #-------------------------------------------- -# -# sql_ddl = ''' set bail on; -# set list on; -# -# select mon$database_name from mon$database; -# -# recreate table log_ddl_triggers_activity ( -# id int generated by default as identity constraint pk_log_ddl_triggers_activity primary key -# ,ddl_trigger_name varchar(64) -# ,event_type varchar(25) not null -# ,object_type varchar(25) not null -# ,ddl_event varchar(25) not null -# ,object_name varchar(64) not null -# ); -# -# -# set term ^; -# execute block as -# declare v_lf char(1) = x'0A'; -# begin -# rdb$set_context('USER_SESSION', 'SKIP_DDL_TRIGGER', '1'); -# -# for -# with -# a as ( -# select 'ANY DDL STATEMENT' x from rdb$database union all -# select 'CREATE TABLE' from rdb$database union all -# select 'ALTER TABLE' from rdb$database union all -# select 'DROP TABLE' from rdb$database union all -# select 'CREATE PROCEDURE' from rdb$database union all -# select 'ALTER PROCEDURE' from rdb$database union all -# select 'DROP PROCEDURE' from rdb$database union all -# select 'CREATE FUNCTION' from rdb$database union all -# select 'ALTER FUNCTION' from rdb$database union all -# select 'DROP FUNCTION' from rdb$database union all -# select 'CREATE TRIGGER' from rdb$database union all -# select 'ALTER TRIGGER' from rdb$database union all -# select 'DROP TRIGGER' from rdb$database union all -# select 'CREATE EXCEPTION' from rdb$database union all -# select 'ALTER EXCEPTION' from rdb$database union all -# select 'DROP EXCEPTION' from rdb$database union all -# select 'CREATE VIEW' from rdb$database union all -# select 'ALTER VIEW' from rdb$database union all -# select 'DROP VIEW' from rdb$database union all -# select 'CREATE DOMAIN' from rdb$database union all -# select 'ALTER DOMAIN' from rdb$database union all -# select 'DROP DOMAIN' from rdb$database union all -# select 'CREATE ROLE' from rdb$database union all -# select 'ALTER ROLE' from rdb$database union all -# select 'DROP ROLE' from rdb$database union all -# select 'CREATE SEQUENCE' from rdb$database union all -# select 'ALTER SEQUENCE' from rdb$database union all -# select 'DROP SEQUENCE' from rdb$database union all -# select 'CREATE USER' from rdb$database union all -# select 'ALTER USER' from rdb$database union all -# select 'DROP USER' from rdb$database union all -# select 'CREATE INDEX' from rdb$database union all -# select 'ALTER INDEX' from rdb$database union all -# select 'DROP INDEX' from rdb$database union all -# select 'CREATE COLLATION' from rdb$database union all -# select 'DROP COLLATION' from rdb$database union all -# select 'ALTER CHARACTER SET' from rdb$database union all -# select 'CREATE PACKAGE' from rdb$database union all -# select 'ALTER PACKAGE' from rdb$database union all -# select 'DROP PACKAGE' from rdb$database union all -# select 'CREATE PACKAGE BODY' from rdb$database union all -# select 'DROP PACKAGE BODY' from rdb$database -# ) -# ,e as ( -# select 'before' w from rdb$database union all select 'after' from rdb$database -# ) -# ,t as ( -# select upper(trim(replace(trim(a.x),' ','_')) || iif(e.w='before', '_before', '_after')) as trg_name, a.x, e.w -# from e, a -# ) -# -# select -# 'create or alter trigger trg_' || t.trg_name -# || ' active ' || t.w || ' ' || trim(t.x) || ' as ' -# || :v_lf -# || 'begin' -# || :v_lf -# || q'{ if (rdb$get_context('USER_SESSION', 'SKIP_DDL_TRIGGER') is null) then}' -# || :v_lf -# || ' insert into log_ddl_triggers_activity(ddl_trigger_name, event_type, object_type, ddl_event, object_name) values(' -# || :v_lf -# || q'{'}' || trim(t.trg_name) || q'{'}' -# || :v_lf -# || q'{, rdb$get_context('DDL_TRIGGER', 'EVENT_TYPE')}' -# || :v_lf -# || q'{, rdb$get_context('DDL_TRIGGER', 'OBJECT_TYPE')}' -# || :v_lf -# || q'{, rdb$get_context('DDL_TRIGGER', 'DDL_EVENT')}' -# || :v_lf -# || q'{, rdb$get_context('DDL_TRIGGER', 'OBJECT_NAME')}' -# || :v_lf -# || ');' -# || :v_lf -# || ' end' -# as sttm -# from t -# as cursor c -# do begin -# execute statement(c.sttm) with autonomous transaction; -# end -# -# rdb$set_context('USER_SESSION', 'SKIP_DDL_TRIGGER', null); -# end -# ^ -# set term ;^ -# commit; -# -# /* -# select rt.rdb$trigger_name,rt.rdb$relation_name,rt.rdb$trigger_type,rt.rdb$trigger_source -# from rdb$triggers rt -# where -# rt.rdb$system_flag is distinct from 1 -# and rt.rdb$trigger_inactive is distinct from 1; -# -# select * from log_ddl_triggers_activity; -# */ -# -# -- set count on; -# -- set echo on; -# -# set term ^; -# -# create table test(id int not null, name varchar(10)) -# ^ -# alter table test add constraint test_pk primary key(id) -# ^ -# ---------- -# create procedure sp_test as begin end -# ^ -# alter procedure sp_test as declare x int; begin x=1; end -# ^ -# ---------- -# create function fn_test(a_id int) returns bigint as -# begin -# return a_id * a_id; -# end -# ^ -# alter function fn_test(a_id int) returns int128 as -# begin -# return a_id * a_id * a_id; -# end -# ^ -# ---------- -# create trigger trg_connect_test on connect as -# begin -# end -# ^ -# alter trigger trg_connect_test as -# declare x int; -# begin -# x = 1; -# end -# ^ -# ---------- -# create exception exc_test 'Invalud value: @1' -# ^ -# alter exception exc_test 'Bad values: @1 and @2' -# ^ -# ---------- -# create view v_test as select 1 x from rdb$database -# ^ -# alter view v_test as select 1 x, 2 y from rdb$database -# ^ -# ---------- -# create domain dm_test int -# ^ -# alter domain dm_test set not null -# ^ -# ---------- -# create role r_test -# ^ -# alter role r_test set system privileges to use_gstat_utility, ignore_db_triggers -# ^ -# ---------- -# create sequence g_test -# ^ -# alter sequence g_test restart with 123 -# ^ -# ---------- -# /* -# create or alter user u_test password '123' using plugin Srp -# ^ -# alter user u_test password '456' -# ^ -# */ -# ---------- -# create index test_name on test(name) -# ^ -# alter index test_name inactive -# ^ -# ---------- -# create collation name_coll for utf8 from unicode case insensitive -# ^ -# ---------- -# alter character set iso8859_1 set default collation pt_br -# ^ -# ---------- -# create or alter package pg_test as -# begin -# function pg_fn1 returns int; -# end -# ^ -# alter package pg_test as -# begin -# function pg_fn1(a_x int) returns int128; -# end -# ^ -# -# create package body pg_test as -# begin -# function pg_fn1(a_x int) returns int128 as -# begin -# return a_x * a_x * a_x; -# end -# end -# ^ -# set term ;^ -# commit; -# -# -# select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') as last_generated_repl_segment from rdb$database; -# quit; -# ''' % locals() -# -# -# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_repltest_skip_ddl_trg.sql'), 'w') -# f_sql_chk.write(sql_ddl) -# flush_and_close( f_sql_chk ) -# -# f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w') -# f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w') -# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err) -# flush_and_close( f_sql_log ) -# flush_and_close( f_sql_err ) -# -# with open(f_sql_err.name,'r') as f: -# for line in f: -# print('UNEXPECTED STDERR in initial SQL: ' + line) -# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# -# -# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors -# -# ############################################################################## -# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### -# ############################################################################## -# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' ) -# -# compare_metadata(db_main, db_repl, '.1', KEEP_LOGS_FOR_DEBUG) -# -# sql_get_result = ''' -# set list on; -# set count on; -# select -# iif(coalesce(rdb$get_context('SYSTEM','REPLICA_MODE'),'') = '', 'MASTER', 'REPLICA') as replication_mode -# ,a.id -# ,a.ddl_trigger_name -# ,a.event_type -# ,a.object_type -# ,a.ddl_event -# ,a.object_name -# from rdb$database r -# left join log_ddl_triggers_activity a on 1=1 -# order by a.id; -# ''' -# -# runProgram('isql', ['localhost:' + db_main, '-nod'], sql_get_result) -# runProgram('isql', ['localhost:' + db_repl, '-nod'], sql_get_result) -# -# -# # return initial state of master DB: -# # remove all DB objects (tables, views, ...): -# # -------------------------------------------- -# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') -# -# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects.log'), 'w') -# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') -# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) -# flush_and_close(f_clean_log) -# flush_and_close(f_clean_err) -# -# with open(f_clean_err.name,'r') as f: -# for line in f: -# print('UNEXPECTED STDERR in cleanup SQL: ' + line) -# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# -# with open(f_clean_log.name,'r') as f: -# for line in f: -# # show number of dropped objects -# print(line) -# -# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors -# -# ############################################################################## -# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### -# ############################################################################## -# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) -# -# compare_metadata(db_main, db_repl, '.2', KEEP_LOGS_FOR_DEBUG) -# -# # cleanup: -# ########## -# if not KEEP_LOGS_FOR_DEBUG: -# cleanup( (f_sql_chk, f_sql_log, f_sql_err, f_clean_log, f_clean_err) ) -# -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ +expected_stdout = """ POINT-1 FOUND message about replicated segment. REPLICATION_MODE MASTER @@ -1280,7 +864,7 @@ expected_stdout_1 = """ OBJECT_NAME PG_TEST Records affected: 100 - + REPLICATION_MODE REPLICA ID DDL_TRIGGER_NAME @@ -1297,10 +881,488 @@ expected_stdout_1 = """ """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=4.0') @pytest.mark.platform('Windows') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# test_script_1 +#--- +# +# import os +# import subprocess +# import re +# import difflib +# import shutil +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# ##################################### +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 75 +# KEEP_LOGS_FOR_DEBUG = 0 +# ##################################### +# +# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) +# FB_HOME = svc.get_home_directory() +# svc.close() +# +# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float +# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' +# +# cur = db_conn.cursor() +# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") +# server_mode = 'XX' +# for r in cur: +# if r[0] == 'Super': +# server_mode = 'SS' +# elif r[0] == 'SuperClassic': +# server_mode = 'SC' +# elif r[0] == 'Classic': +# server_mode = 'CS' +# cur.close() +# +# # 'fbt-main.fb50.ss.fdb' etc: +# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) +# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') +# +# # Folders for journalling and archieving segments. +# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) +# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for i in range(len( f_names_list )): +# if type(f_names_list[i]) == file: +# del_name = f_names_list[i].name +# elif type(f_names_list[i]) == str: +# del_name = f_names_list[i] +# else: +# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): +# +# global re +# global difflib +# global time +# +# replold_lines = [] +# with open( os.path.join(fb_home,'replication.log'), 'r') as f: +# replold_lines = f.readlines() +# +# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) +# cur = con.cursor() +# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") +# for r in cur: +# last_generated_repl_segment = r[0] +# cur.close() +# con.close() +# +# #print('last_generated_repl_segment:', last_generated_repl_segment) +# +# # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file +# p=re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) +# +# found_required_message = False +# for i in range(0,max_allowed_time_for_wait): +# time.sleep(1) +# +# # Get content of fb_home replication.log _after_ isql finish: +# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') +# diff_data = difflib.unified_diff( +# replold_lines, +# f_repllog_new.readlines() +# ) +# f_repllog_new.close() +# +# for k,d in enumerate(diff_data): +# if p.search(d): +# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' ) +# found_required_message = True +# break +# +# if found_required_message: +# break +# +# if not found_required_message: +# print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait) +# +# #-------------------------------------------- +# +# def compare_metadata(db_main, db_repl, nm_suffix = '', keep4dbg = 0): +# +# global subprocess, difflib, flush_and_close, cleanup +# +# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta'+nm_suffix+'.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_main_meta_sql ) +# +# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta'+nm_suffix+'.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_repl_meta_sql ) +# +# db_main_meta=open(f_main_meta_sql.name, 'r') +# db_repl_meta=open(f_repl_meta_sql.name, 'r') +# +# diffmeta = ''.join(difflib.unified_diff( +# db_main_meta.readlines(), +# db_repl_meta.readlines() +# )) +# db_main_meta.close() +# db_repl_meta.close() +# +# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff'+nm_suffix+'.txt'), 'w') +# f_meta_diff.write(diffmeta) +# flush_and_close( f_meta_diff ) +# +# # Following must issue only TWO rows: +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ +# # Only thes lines will be suppressed further (see subst. section): +# with open(f_meta_diff.name, 'r') as f: +# for line in f: +# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): +# print('UNEXPECTED METADATA DIFF.: ' + line) +# +# if not keep4dbg: +# cleanup( (f_main_meta_sql,f_repl_meta_sql,f_meta_diff ) ) +# +# #-------------------------------------------- +# +# sql_ddl = ''' set bail on; +# set list on; +# +# select mon$database_name from mon$database; +# +# recreate table log_ddl_triggers_activity ( +# id int generated by default as identity constraint pk_log_ddl_triggers_activity primary key +# ,ddl_trigger_name varchar(64) +# ,event_type varchar(25) not null +# ,object_type varchar(25) not null +# ,ddl_event varchar(25) not null +# ,object_name varchar(64) not null +# ); +# +# +# set term ^; +# execute block as +# declare v_lf char(1) = x'0A'; +# begin +# rdb$set_context('USER_SESSION', 'SKIP_DDL_TRIGGER', '1'); +# +# for +# with +# a as ( +# select 'ANY DDL STATEMENT' x from rdb$database union all +# select 'CREATE TABLE' from rdb$database union all +# select 'ALTER TABLE' from rdb$database union all +# select 'DROP TABLE' from rdb$database union all +# select 'CREATE PROCEDURE' from rdb$database union all +# select 'ALTER PROCEDURE' from rdb$database union all +# select 'DROP PROCEDURE' from rdb$database union all +# select 'CREATE FUNCTION' from rdb$database union all +# select 'ALTER FUNCTION' from rdb$database union all +# select 'DROP FUNCTION' from rdb$database union all +# select 'CREATE TRIGGER' from rdb$database union all +# select 'ALTER TRIGGER' from rdb$database union all +# select 'DROP TRIGGER' from rdb$database union all +# select 'CREATE EXCEPTION' from rdb$database union all +# select 'ALTER EXCEPTION' from rdb$database union all +# select 'DROP EXCEPTION' from rdb$database union all +# select 'CREATE VIEW' from rdb$database union all +# select 'ALTER VIEW' from rdb$database union all +# select 'DROP VIEW' from rdb$database union all +# select 'CREATE DOMAIN' from rdb$database union all +# select 'ALTER DOMAIN' from rdb$database union all +# select 'DROP DOMAIN' from rdb$database union all +# select 'CREATE ROLE' from rdb$database union all +# select 'ALTER ROLE' from rdb$database union all +# select 'DROP ROLE' from rdb$database union all +# select 'CREATE SEQUENCE' from rdb$database union all +# select 'ALTER SEQUENCE' from rdb$database union all +# select 'DROP SEQUENCE' from rdb$database union all +# select 'CREATE USER' from rdb$database union all +# select 'ALTER USER' from rdb$database union all +# select 'DROP USER' from rdb$database union all +# select 'CREATE INDEX' from rdb$database union all +# select 'ALTER INDEX' from rdb$database union all +# select 'DROP INDEX' from rdb$database union all +# select 'CREATE COLLATION' from rdb$database union all +# select 'DROP COLLATION' from rdb$database union all +# select 'ALTER CHARACTER SET' from rdb$database union all +# select 'CREATE PACKAGE' from rdb$database union all +# select 'ALTER PACKAGE' from rdb$database union all +# select 'DROP PACKAGE' from rdb$database union all +# select 'CREATE PACKAGE BODY' from rdb$database union all +# select 'DROP PACKAGE BODY' from rdb$database +# ) +# ,e as ( +# select 'before' w from rdb$database union all select 'after' from rdb$database +# ) +# ,t as ( +# select upper(trim(replace(trim(a.x),' ','_')) || iif(e.w='before', '_before', '_after')) as trg_name, a.x, e.w +# from e, a +# ) +# +# select +# 'create or alter trigger trg_' || t.trg_name +# || ' active ' || t.w || ' ' || trim(t.x) || ' as ' +# || :v_lf +# || 'begin' +# || :v_lf +# || q'{ if (rdb$get_context('USER_SESSION', 'SKIP_DDL_TRIGGER') is null) then}' +# || :v_lf +# || ' insert into log_ddl_triggers_activity(ddl_trigger_name, event_type, object_type, ddl_event, object_name) values(' +# || :v_lf +# || q'{'}' || trim(t.trg_name) || q'{'}' +# || :v_lf +# || q'{, rdb$get_context('DDL_TRIGGER', 'EVENT_TYPE')}' +# || :v_lf +# || q'{, rdb$get_context('DDL_TRIGGER', 'OBJECT_TYPE')}' +# || :v_lf +# || q'{, rdb$get_context('DDL_TRIGGER', 'DDL_EVENT')}' +# || :v_lf +# || q'{, rdb$get_context('DDL_TRIGGER', 'OBJECT_NAME')}' +# || :v_lf +# || ');' +# || :v_lf +# || ' end' +# as sttm +# from t +# as cursor c +# do begin +# execute statement(c.sttm) with autonomous transaction; +# end +# +# rdb$set_context('USER_SESSION', 'SKIP_DDL_TRIGGER', null); +# end +# ^ +# set term ;^ +# commit; +# +# /* +# select rt.rdb$trigger_name,rt.rdb$relation_name,rt.rdb$trigger_type,rt.rdb$trigger_source +# from rdb$triggers rt +# where +# rt.rdb$system_flag is distinct from 1 +# and rt.rdb$trigger_inactive is distinct from 1; +# +# select * from log_ddl_triggers_activity; +# */ +# +# -- set count on; +# -- set echo on; +# +# set term ^; +# +# create table test(id int not null, name varchar(10)) +# ^ +# alter table test add constraint test_pk primary key(id) +# ^ +# ---------- +# create procedure sp_test as begin end +# ^ +# alter procedure sp_test as declare x int; begin x=1; end +# ^ +# ---------- +# create function fn_test(a_id int) returns bigint as +# begin +# return a_id * a_id; +# end +# ^ +# alter function fn_test(a_id int) returns int128 as +# begin +# return a_id * a_id * a_id; +# end +# ^ +# ---------- +# create trigger trg_connect_test on connect as +# begin +# end +# ^ +# alter trigger trg_connect_test as +# declare x int; +# begin +# x = 1; +# end +# ^ +# ---------- +# create exception exc_test 'Invalud value: @1' +# ^ +# alter exception exc_test 'Bad values: @1 and @2' +# ^ +# ---------- +# create view v_test as select 1 x from rdb$database +# ^ +# alter view v_test as select 1 x, 2 y from rdb$database +# ^ +# ---------- +# create domain dm_test int +# ^ +# alter domain dm_test set not null +# ^ +# ---------- +# create role r_test +# ^ +# alter role r_test set system privileges to use_gstat_utility, ignore_db_triggers +# ^ +# ---------- +# create sequence g_test +# ^ +# alter sequence g_test restart with 123 +# ^ +# ---------- +# /* +# create or alter user u_test password '123' using plugin Srp +# ^ +# alter user u_test password '456' +# ^ +# */ +# ---------- +# create index test_name on test(name) +# ^ +# alter index test_name inactive +# ^ +# ---------- +# create collation name_coll for utf8 from unicode case insensitive +# ^ +# ---------- +# alter character set iso8859_1 set default collation pt_br +# ^ +# ---------- +# create or alter package pg_test as +# begin +# function pg_fn1 returns int; +# end +# ^ +# alter package pg_test as +# begin +# function pg_fn1(a_x int) returns int128; +# end +# ^ +# +# create package body pg_test as +# begin +# function pg_fn1(a_x int) returns int128 as +# begin +# return a_x * a_x * a_x; +# end +# end +# ^ +# set term ;^ +# commit; +# +# +# select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') as last_generated_repl_segment from rdb$database; +# quit; +# ''' % locals() +# +# +# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_repltest_skip_ddl_trg.sql'), 'w') +# f_sql_chk.write(sql_ddl) +# flush_and_close( f_sql_chk ) +# +# f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w') +# f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w') +# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err) +# flush_and_close( f_sql_log ) +# flush_and_close( f_sql_err ) +# +# with open(f_sql_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in initial SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' ) +# +# compare_metadata(db_main, db_repl, '.1', KEEP_LOGS_FOR_DEBUG) +# +# sql_get_result = ''' +# set list on; +# set count on; +# select +# iif(coalesce(rdb$get_context('SYSTEM','REPLICA_MODE'),'') = '', 'MASTER', 'REPLICA') as replication_mode +# ,a.id +# ,a.ddl_trigger_name +# ,a.event_type +# ,a.object_type +# ,a.ddl_event +# ,a.object_name +# from rdb$database r +# left join log_ddl_triggers_activity a on 1=1 +# order by a.id; +# ''' +# +# runProgram('isql', ['localhost:' + db_main, '-nod'], sql_get_result) +# runProgram('isql', ['localhost:' + db_repl, '-nod'], sql_get_result) +# +# +# # return initial state of master DB: +# # remove all DB objects (tables, views, ...): +# # -------------------------------------------- +# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') +# +# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects.log'), 'w') +# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) +# flush_and_close(f_clean_log) +# flush_and_close(f_clean_err) +# +# with open(f_clean_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in cleanup SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# with open(f_clean_log.name,'r') as f: +# for line in f: +# # show number of dropped objects +# print(line) +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) +# +# compare_metadata(db_main, db_repl, '.2', KEEP_LOGS_FOR_DEBUG) +# +# # cleanup: +# ########## +# if not KEEP_LOGS_FOR_DEBUG: +# cleanup( (f_sql_chk, f_sql_log, f_sql_err, f_clean_log, f_clean_err) ) +# +#--- diff --git a/tests/functional/replication/test_duplicates_in_rw_replica_after_conflicting_insert.py b/tests/functional/replication/test_duplicates_in_rw_replica_after_conflicting_insert.py index 041c847b..7ca54bf1 100644 --- a/tests/functional/replication/test_duplicates_in_rw_replica_after_conflicting_insert.py +++ b/tests/functional/replication/test_duplicates_in_rw_replica_after_conflicting_insert.py @@ -1,155 +1,165 @@ #coding:utf-8 -# -# id: tests.functional.replication.duplicates_in_rw_replica_after_conflicting_insert -# title: Conflicting INSERT propagated into a read-write replica may cause duplicate records to appear -# decription: -# See: https://github.com/FirebirdSQL/firebird/issues/6849 -# -# Test temporary changes mode of replica using external call: gfix -replica read_write ... -# We create table on master with integer column (PK) and text field that allows to see who is "author" of this record. -# Then we add one record (1,'master, initially') and do commit. -# -# After this we wait until replica becomes actual to master, and this delay will last no more then threshold -# that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). -# During this delay, we check every second for replication log and search there line with number of last generated -# segment (which was replicated and deleting finally). -# We can assume that replication finished OK only when such line is found see ('POINT-1'). -# -# Then we open two FDB-connections and add records: -# 1) first, in replica: (2, 'RW-replica') + commit; -# 2) second, in master with ID that conflicts with just added record in replica: (2, 'master, finally') + commit. -# -# Message "Record being inserted into table TEST already exists, updating instead" will appear after this in replication log. -# We have to wait again until replica becomes actual to master (see above). -# After this we query data from table 'TEST' on *replica* DB. This table must have onl two records: -# (ID = 1, WHO_MADE = 'master, initially') and (ID = 2, WHO_MADE = 'master, finally'). -# Record (2, 'RW-replica') must be overwritten! -# -# Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). -# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). -# -# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). -# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, -# thus metadata difference must not be issued. -# -# #################### -# ### CRUCIAL NOTE ### -# #################### -# Currently, 25.06.2021, there is bug in FB 4.x and 5.x which can be seen on SECOND run of this test: message with text -# "ERROR: Record format with length 68 is not found for table TEST" will appear in it after inserting 1st record in master. -# The reason of that is "dirty" pages that remain in RDB$RELATION_FIELDS both on master and replica after dropping table. -# Following query show different data that appear in replica DB on 1st and 2nd run (just after table was created on master): -# ======= -# set blobdisplay 6; -# select rdb$descriptor as fmt_descr -# from rdb$formats natural join rdb$relations where rdb$relation_name = 'TEST'; -# ======= -# This bug was explained by dimitr, see letters 25.06.2021 11:49 and 25.06.2021 16:56. -# It will be fixed later. -# -# The only workaround to solve this problem is to make SWEEP after all DB objects have been dropped. -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# !NB! BOTH master and replica must be cleaned up by sweep! -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# -# ################ -# ### N O T E ### -# ################ -# Test assumes that master and replica DB have been created beforehand. -# Also, it assumes that %FB_HOME% -# eplication.conf has been prepared with apropriate parameters for replication. -# Particularly, name of directories and databases must have info about checked FB major version and ServerMode. -# * verbose = true // in order to find out line with message that required segment was replicated -# * section for master database with specified parameters: -# journal_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.journal" -# journal_archive_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# journal_archive_command = "copy $(pathname) $(archivepathname)" -# journal_archive_timeout = 10 -# * section for replica database with specified parameter: -# journal_source_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# -# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: -# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) -# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) -# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) -# -# These two databases must NOT be dropped in any of tests related to replication! -# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode -# and make cleanup after it, i.e. when all tests will be completed. -# -# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. -# -# Temporary comment. For debug purpoces: -# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); -# 2) copy file %fbt-repo% ests -# unctional abloidatches\\setup-fb-for-replication.bat.txt -# to some place and rename it "*.bat"; -# 3) open this .bat in editor and asjust value of 'fbt_repo' variable; -# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] -# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: -# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc -# 5) batch 'setup-fb-for-replication.bat' will: -# * stop selected FB instance -# * create test databases (in !fbt_repo! mp\\); -# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); -# * replace %fb_home% -# eplication.conf with apropriate -# * start selected FB instance -# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): -# %fpt_repo% -# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc -# -# Confirmed bug on 5.0.0.63, 4.0.0.2508 (date of both snapshots: 08-jun-2021, i.e. just before fix). -# Checked on: -# 5.0.0.85 SS: 34.951s. -# 5.0.0.85 CS: 36.813s. -# 4.0.1.2520 SS: 38.939s. -# 4.0.1.2519 CS: 32.376s. -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: None + +""" +ID: replication.duplicates_in_rw_replica_after_conflicting_insert +ISSUE: 6849 +TITLE: Conflicting INSERT propagated into a read-write replica may cause duplicate records to appear +DESCRIPTION: + Test temporary changes mode of replica using external call: gfix -replica read_write ... + We create table on master with integer column (PK) and text field that allows to see who is "author" of this record. + Then we add one record (1,'master, initially') and do commit. + + After this we wait until replica becomes actual to master, and this delay will last no more then threshold + that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). + During this delay, we check every second for replication log and search there line with number of last generated + segment (which was replicated and deleting finally). + We can assume that replication finished OK only when such line is found see ('POINT-1'). + + Then we open two FDB-connections and add records: + 1) first, in replica: (2, 'RW-replica') + commit; + 2) second, in master with ID that conflicts with just added record in replica: (2, 'master, finally') + commit. + + Message "Record being inserted into table TEST already exists, updating instead" will appear after this in replication log. + We have to wait again until replica becomes actual to master (see above). + After this we query data from table 'TEST' on *replica* DB. This table must have onl two records: + (ID = 1, WHO_MADE = 'master, initially') and (ID = 2, WHO_MADE = 'master, finally'). + Record (2, 'RW-replica') must be overwritten! + + Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). + After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). + + Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). + The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, + thus metadata difference must not be issued. + + #################### + ### CRUCIAL NOTE ### + #################### + Currently, 25.06.2021, there is bug in FB 4.x and 5.x which can be seen on SECOND run of this test: message with text + "ERROR: Record format with length 68 is not found for table TEST" will appear in it after inserting 1st record in master. + The reason of that is "dirty" pages that remain in RDB$RELATION_FIELDS both on master and replica after dropping table. + Following query show different data that appear in replica DB on 1st and 2nd run (just after table was created on master): + ======= + set blobdisplay 6; + select rdb$descriptor as fmt_descr + from rdb$formats natural join rdb$relations where rdb$relation_name = 'TEST'; + ======= + This bug was explained by dimitr, see letters 25.06.2021 11:49 and 25.06.2021 16:56. + It will be fixed later. + + The only workaround to solve this problem is to make SWEEP after all DB objects have been dropped. + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + !NB! BOTH master and replica must be cleaned up by sweep! + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + ################ + ### N O T E ### + ################ + Test assumes that master and replica DB have been created beforehand. + Also, it assumes that %FB_HOME%/replication.conf has been prepared with apropriate parameters for replication. + Particularly, name of directories and databases must have info about checked FB major version and ServerMode. + * verbose = true // in order to find out line with message that required segment was replicated + * section for master database with specified parameters: + journal_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.journal" + journal_archive_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + journal_archive_command = "copy $(pathname) $(archivepathname)" + journal_archive_timeout = 10 + * section for replica database with specified parameter: + journal_source_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + + Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: + 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) + 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) + NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) + + These two databases must NOT be dropped in any of tests related to replication! + They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode + and make cleanup after it, i.e. when all tests will be completed. + + NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. + + Temporary comment. For debug purpoces: + 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); + 2) copy file %fbt-repo%/tests/functional/tabloid/batches/setup-fb-for-replication.bat.txt + to some place and rename it "*.bat"; + 3) open this .bat in editor and asjust value of 'fbt_repo' variable; + 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] + where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: + DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc + 5) batch 'setup-fb-for-replication.bat' will: + * stop selected FB instance + * create test databases (in !fbt_repo!/tmp); + * prepare journal/archive sub-folders for replication (also in !fbt_repo!/tmp); + * replace %fb_home%/replication.conf with apropriate + * start selected FB instance + 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): + %fpt_repo%/fbt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc + + Confirmed bug on 5.0.0.63, 4.0.0.2508 (date of both snapshots: 08-jun-2021, i.e. just before fix). + Checked on: + 5.0.0.85 SS: 34.951s. + 5.0.0.85 CS: 36.813s. + 4.0.1.2520 SS: 38.939s. + 4.0.1.2519 CS: 32.376s. +FBTEST: tests.functional.replication.duplicates_in_rw_replica_after_conflicting_insert +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +substitutions = [('Start removing objects in:.*', 'Start removing objects'), + ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), + ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR')] -substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR')] +db = db_factory() -init_script_1 = """""" +act = python_act('db', substitutions=substitutions) -db_1 = db_factory(sql_dialect=3, init=init_script_1) +expected_stdout = """ + POINT-1A FOUND message about replicated segment. + POINT-1B FOUND message about replicated segment. + + ID 1 + WHO_MADE master, initially + ID 2 + WHO_MADE master, finally + + Start removing objects + Finish. Total objects removed + + POINT-2 FOUND message about replicated segment. +""" + +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=4.0') +@pytest.mark.platform('Windows') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import subprocess # import re # import difflib # import shutil # import time -# +# # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password -# +# # ##################################### # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65 # ##################################### -# +# # svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) # FB_HOME = svc.get_home_directory() # svc.close() -# +# # engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float # fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' -# +# # cur = db_conn.cursor() # cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") # server_mode = 'XX' @@ -161,36 +171,36 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # elif r[0] == 'Classic': # server_mode = 'CS' # cur.close() -# +# # # 'fbt-main.fb50.ss.fdb' etc: # db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) # db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') -# +# # runProgram('gfix', ['-replica', 'read_write', 'localhost:' + db_repl]) -# +# # # Folders for journalling and archieving segments. # repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) # repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) -# +# # db_conn.close() -# +# # #-------------------------------------------- -# +# # def flush_and_close( file_handle ): # # https://docs.python.org/2/library/os.html#os.fsync # # If you're starting with a Python file object f, # # first do f.flush(), and # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # global os -# +# # file_handle.flush() # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # # otherwise: "OSError: [Errno 9] Bad file descriptor"! # os.fsync(file_handle.fileno()) # file_handle.close() -# +# # #-------------------------------------------- -# +# # def cleanup( f_names_list ): # global os # for i in range(len( f_names_list )): @@ -201,22 +211,22 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # else: # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # del_name = None -# +# # if del_name and os.path.isfile( del_name ): # os.remove( del_name ) -# +# # #-------------------------------------------- -# +# # def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): -# +# # global re # global difflib # global time -# +# # replold_lines = [] # with open( os.path.join(fb_home,'replication.log'), 'r') as f: # replold_lines = f.readlines() -# +# # con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) # cur = con.cursor() # cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") @@ -224,91 +234,91 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # last_generated_repl_segment = r[0] # cur.close() # con.close() -# +# # #print('last_generated_repl_segment:', last_generated_repl_segment) -# +# # # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file # p=re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) -# +# # found_required_message = False # for i in range(0,max_allowed_time_for_wait): # time.sleep(1) -# +# # # Get content of fb_home replication.log _after_ isql finish: # f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') # diff_data = difflib.unified_diff( -# replold_lines, +# replold_lines, # f_repllog_new.readlines() # ) # f_repllog_new.close() -# +# # for k,d in enumerate(diff_data): # if p.search(d): # print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' ) # found_required_message = True # break -# +# # if found_required_message: # break -# +# # if not found_required_message: # print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait) -# +# # #-------------------------------------------- -# +# # sql_ddl = ''' set bail on; # set list on; # --set blob all; # set blobdisplay 6; -# +# # recreate table test(id int primary key using index test_pk, dts timestamp default 'now', who_made varchar(50)); # commit; # insert into test(id, who_made) values(1,'master, initially'); # commit; -# +# # -- for debug only: # select rdb$get_context('SYSTEM', 'DB_NAME'), rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') as last_generated_repl_segment from rdb$database; -# select +# select # RDB$DESCRIPTOR as fmt_descr # from RDB$FORMATS natural join RDB$RELATIONS # where RDB$RELATION_NAME = 'TEST'; # quit; # ''' % locals() -# -# +# +# # f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_gh_6849_init.sql'), 'w') # f_sql_chk.write(sql_ddl) # flush_and_close( f_sql_chk ) -# +# # f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w') # f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w') # subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err) # flush_and_close( f_sql_log ) # flush_and_close( f_sql_err ) -# +# # last_generated_repl_segment = 0 -# +# # with open(f_sql_err.name,'r') as f: # for line in f: # print('UNEXPECTED STDERR in initial SQL: ' + line) # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# +# # if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors -# +# # ############################################################################## # ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### # ############################################################################## # wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1A' ) -# +# # con_repl = fdb.connect( dsn = 'localhost:' + db_repl) # cur_repl = con_repl.cursor() # cur_repl.execute('insert into test(id,who_made) values(?, ?)', (2, 'RW-replica')) -# +# # # con_repl.execute_immediate( "insert into test(id,who_made) values( 2, )" ) # con_repl.commit() # cur_repl.close() # con_repl.close() -# +# # con_main = fdb.connect( dsn = 'localhost:' + db_main) # cur_main = con_main.cursor() # cur_main.execute('insert into test(id,who_made) values(?, ?)', (2, 'master, finally')) @@ -316,70 +326,70 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # con_main.commit() # cur_main.close() # con_main.close() -# +# # ############################################################################## # ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### # ############################################################################## # wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1B' ) -# +# # # Here we must check that replica has no duplicates in PK column test.id: # #################### # # for debug only: runProgram('isql', ['localhost:' + db_repl, '-nod'], "set list on; set blobdisplay 6; select id, who_made from test order by dts; select rdb$descriptor as fmt_descr from rdb$formats natural join rdb$relations where rdb$relation_name = 'TEST';") # runProgram('isql', ['localhost:' + db_repl, '-nod'], "set list on; select id, who_made from test order by dts;") -# -# +# +# # runProgram('gfix', ['-replica', 'read_only', 'localhost:' + db_repl]) -# +# # # return initial state of master DB: # # remove all DB objects (tables, views, ...): # # -------------------------------------------- # sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') -# +# # f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects-gh_6849.log'), 'w') # f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) # flush_and_close(f_clean_log) # flush_and_close(f_clean_err) -# +# # with open(f_clean_err.name,'r') as f: # for line in f: # print('UNEXPECTED STDERR in cleanup SQL: ' + line) # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# +# # with open(f_clean_log.name,'r') as f: # for line in f: # # show number of dropped objects # print(line) -# +# # if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors -# +# # ############################################################################## # ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### # ############################################################################## # wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) -# +# # f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_gh_6849.sql'), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) # flush_and_close( f_main_meta_sql ) -# +# # f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_gh_6849.sql'), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) # flush_and_close( f_repl_meta_sql ) -# +# # db_main_meta=open(f_main_meta_sql.name, 'r') # db_repl_meta=open(f_repl_meta_sql.name, 'r') -# +# # diffmeta = ''.join(difflib.unified_diff( -# db_main_meta.readlines(), +# db_main_meta.readlines(), # db_repl_meta.readlines() # )) # db_main_meta.close() # db_repl_meta.close() -# +# # f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_gh_6849.txt'), 'w', buffering = 0) # f_meta_diff.write(diffmeta) # flush_and_close( f_meta_diff ) -# +# # # Following must issue only TWO rows: # # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ # # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ @@ -388,7 +398,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # for line in f: # if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): # print('UNEXPECTED METADATA DIFF.: ' + line) -# +# # ###################### # ### A C H T U N G ### # ###################### @@ -397,7 +407,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # runProgram('gfix', ['-sweep', 'localhost:' + db_repl]) # runProgram('gfix', ['-sweep', 'localhost:' + db_main]) # ####################### -# +# # #If we skip 'gfix -sweep' then following will be: # # # #MASTER and REPLICA, after 1st run: @@ -417,35 +427,11 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # # 0 8 16 TIMESTAMP 8 0 0x00 # # 1 16 3 VARCHAR 52 0 0x00 # # 2 68 9 LONG 4 0 0x00 -# -# +# +# # # cleanup: # ########## # cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) ) -# -# +# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - POINT-1A FOUND message about replicated segment. - POINT-1B FOUND message about replicated segment. - - ID 1 - WHO_MADE master, initially - ID 2 - WHO_MADE master, finally - - Start removing objects - Finish. Total objects removed - - POINT-2 FOUND message about replicated segment. -""" - -@pytest.mark.version('>=4.0') -@pytest.mark.platform('Windows') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/replication/test_failed_DDL_commands_can_be_replicated.py b/tests/functional/replication/test_failed_DDL_commands_can_be_replicated.py index 78da738b..24b4c7a5 100644 --- a/tests/functional/replication/test_failed_DDL_commands_can_be_replicated.py +++ b/tests/functional/replication/test_failed_DDL_commands_can_be_replicated.py @@ -1,426 +1,113 @@ #coding:utf-8 -# -# id: functional.replication.failed_DDL_commands_can_be_replicated -# title: Failed DDL commands can be replicated -# decription: -# https://github.com/FirebirdSQL/firebird/issues/6907 -# -# We create table, insert three rows in it (with null value in one of them) and, according to ticket info, run -# several DDL statements that for sure must fail, namely: -# * add new column with NOT-null requirement for its values (can not be done because nmon-empty table); -# * change DDL of existing column: add NON-null requirement to it (also can not be done because of NULL in one of rows); -# * create domain that initially allows null, then recreate table and add several rows in in (with NULL in one of them), -# and finally - try to change domain DDL by add NOT-null check. It must fail because of existing nulls in the table. -# -# After this we wait until replica becomes actual to master, and this delay will last no more then threshold -# that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). -# During this delay, we check every second for replication log and search there line with number of last generated -# segment (which was replicated and deleting finally). -# We can assume that replication finished OK only when such line is found see ('POINT-1'). -# -# Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). -# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). -# -# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). -# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, -# thus metadata difference must not be issued. -# -# #################### -# ### CRUCIAL NOTE ### -# #################### -# Currently, 25.06.2021, there is bug in FB 4.x and 5.x which can be seen on SECOND run of this test: message with text -# "ERROR: Record format with length 68 is not found for table TEST" will appear in it after inserting 1st record in master. -# The reason of that is "dirty" pages that remain in RDB$RELATION_FIELDS both on master and replica after dropping table. -# Following query show different data that appear in replica DB on 1st and 2nd run (just after table was created on master): -# ======= -# set blobdisplay 6; -# select rdb$descriptor as fmt_descr -# from rdb$formats natural join rdb$relations where rdb$relation_name = 'TEST'; -# ======= -# This bug was explained by dimitr, see letters 25.06.2021 11:49 and 25.06.2021 16:56. -# It will be fixed later. -# -# The only workaround to solve this problem is to make SWEEP after all DB objects have been dropped. -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# !NB! BOTH master and replica must be cleaned up by sweep! -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# -# ################ -# ### N O T E ### -# ################ -# Test assumes that master and replica DB have been created beforehand. -# Also, it assumes that %FB_HOME% -# eplication.conf has been prepared with apropriate parameters for replication. -# Particularly, name of directories and databases must have info about checked FB major version and ServerMode. -# * verbose = true // in order to find out line with message that required segment was replicated -# * section for master database with specified parameters: -# journal_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.journal" -# journal_archive_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# journal_archive_command = "copy $(pathname) $(archivepathname)" -# journal_archive_timeout = 10 -# * section for replica database with specified parameter: -# journal_source_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# -# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: -# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) -# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) -# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) -# -# These two databases must NOT be dropped in any of tests related to replication! -# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode -# and make cleanup after it, i.e. when all tests will be completed. -# -# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. -# -# Temporary comment. For debug purpoces: -# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); -# 2) copy file %fbt-repo% ests -# unctional abloidatches\\setup-fb-for-replication.bat.txt -# to some place and rename it "*.bat"; -# 3) open this .bat in editor and asjust value of 'fbt_repo' variable; -# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] -# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: -# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc -# 5) batch 'setup-fb-for-replication.bat' will: -# * stop selected FB instance -# * create test databases (in !fbt_repo! mp\\); -# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); -# * replace %fb_home% -# eplication.conf with apropriate -# * start selected FB instance -# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): -# %fpt_repo% -# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc -# -# Confirmed bug on 4.0.0.126, 4.0.1.2556: message "Cannot make field Y of table TEST NOT NULL because there are NULLs" -# will be added into replication log and after this replication gets stuck. -# -# Checked on: 5.0.0.131 SS/CS; 4.0.1.2563 SS/CS. -# -# -# tracker_id: -# min_versions: ['4.0.1'] -# versions: 4.0.1 -# qmid: None + +""" +ID: replication.failed_DDL_commands_can_be_replicated +ISSUE: 6907 +TITLE: Failed DDL commands can be replicated +DESCRIPTION: + We create table, insert three rows in it (with null value in one of them) and, according to ticket info, run + several DDL statements that for sure must fail, namely: + * add new column with NOT-null requirement for its values (can not be done because nmon-empty table); + * change DDL of existing column: add NON-null requirement to it (also can not be done because of NULL in one of rows); + * create domain that initially allows null, then recreate table and add several rows in in (with NULL in one of them), + and finally - try to change domain DDL by add NOT-null check. It must fail because of existing nulls in the table. + + After this we wait until replica becomes actual to master, and this delay will last no more then threshold + that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). + During this delay, we check every second for replication log and search there line with number of last generated + segment (which was replicated and deleting finally). + We can assume that replication finished OK only when such line is found see ('POINT-1'). + + Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). + After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). + + Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). + The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, + thus metadata difference must not be issued. + + #################### + ### CRUCIAL NOTE ### + #################### + Currently, 25.06.2021, there is bug in FB 4.x and 5.x which can be seen on SECOND run of this test: message with text + "ERROR: Record format with length 68 is not found for table TEST" will appear in it after inserting 1st record in master. + The reason of that is "dirty" pages that remain in RDB$RELATION_FIELDS both on master and replica after dropping table. + Following query show different data that appear in replica DB on 1st and 2nd run (just after table was created on master): + ======= + set blobdisplay 6; + select rdb$descriptor as fmt_descr + from rdb$formats natural join rdb$relations where rdb$relation_name = 'TEST'; + ======= + This bug was explained by dimitr, see letters 25.06.2021 11:49 and 25.06.2021 16:56. + It will be fixed later. + + The only workaround to solve this problem is to make SWEEP after all DB objects have been dropped. + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + !NB! BOTH master and replica must be cleaned up by sweep! + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + ################ + ### N O T E ### + ################ + Test assumes that master and replica DB have been created beforehand. + Also, it assumes that %FB_HOME%/replication.conf has been prepared with apropriate parameters for replication. + Particularly, name of directories and databases must have info about checked FB major version and ServerMode. + * verbose = true // in order to find out line with message that required segment was replicated + * section for master database with specified parameters: + journal_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.journal" + journal_archive_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + journal_archive_command = "copy $(pathname) $(archivepathname)" + journal_archive_timeout = 10 + * section for replica database with specified parameter: + journal_source_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + + Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: + 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) + 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) + NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) + + These two databases must NOT be dropped in any of tests related to replication! + They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode + and make cleanup after it, i.e. when all tests will be completed. + + NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. + + Temporary comment. For debug purpoces: + 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); + 2) copy file %fbt-repo%/tests/functional/tabloid/batches/setup-fb-for-replication.bat.txt + to some place and rename it "*.bat"; + 3) open this .bat in editor and asjust value of 'fbt_repo' variable; + 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] + where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: + DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc + 5) batch 'setup-fb-for-replication.bat' will: + * stop selected FB instance + * create test databases (in !fbt_repo!/tmp); + * prepare journal/archive sub-folders for replication (also in !fbt_repo!/tmp); + * replace %fb_home%/replication.conf with apropriate + * start selected FB instance + 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): + %fpt_repo%/fbt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc + + Confirmed bug on 4.0.0.126, 4.0.1.2556: message "Cannot make field Y of table TEST NOT NULL because there are NULLs" + will be added into replication log and after this replication gets stuck. + + Checked on: 5.0.0.131 SS/CS; 4.0.1.2563 SS/CS. +FBTEST: functional.replication.failed_DDL_commands_can_be_replicated +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0.1 -# resources: None +substitutions = [('Start removing objects in:.*', 'Start removing objects'), + ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), + ('.* CREATE DATABASE .*', ''), + ('FOUND message about replicated segment N .*', 'FOUND message about replicated segment')] -substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', ''), ('FOUND message about replicated segment N .*', 'FOUND message about replicated segment')] +db = db_factory() -init_script_1 = """""" +act = python_act('db', substitutions=substitutions) -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import subprocess -# import re -# import difflib -# import shutil -# import time -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# ##################################### -# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65 -# ##################################### -# -# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) -# FB_HOME = svc.get_home_directory() -# svc.close() -# -# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float -# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' -# -# cur = db_conn.cursor() -# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") -# server_mode = 'XX' -# for r in cur: -# if r[0] == 'Super': -# server_mode = 'SS' -# elif r[0] == 'SuperClassic': -# server_mode = 'SC' -# elif r[0] == 'Classic': -# server_mode = 'CS' -# cur.close() -# -# # 'fbt-main.fb50.ss.fdb' etc: -# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) -# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') -# -# # Folders for journalling and archieving segments. -# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) -# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) -# -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for i in range(len( f_names_list )): -# if type(f_names_list[i]) == file: -# del_name = f_names_list[i].name -# elif type(f_names_list[i]) == str: -# del_name = f_names_list[i] -# else: -# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): -# -# global re -# global difflib -# global time -# -# replold_lines = [] -# with open( os.path.join(fb_home,'replication.log'), 'r') as f: -# replold_lines = f.readlines() -# -# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) -# cur = con.cursor() -# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") -# for r in cur: -# last_generated_repl_segment = r[0] -# cur.close() -# con.close() -# -# #print('last_generated_repl_segment:', last_generated_repl_segment) -# -# # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file -# p_successfully_replicated = re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) -# -# # VERBOSE: Segment 16 replication failure at offset 33628 -# p_replication_failure = re.compile('segment\\s+\\d+\\s+replication\\s+failure', re.IGNORECASE) -# -# found_required_message = False -# found_replfail_message = False -# found_common_error_msg = False -# -# for i in range(0,max_allowed_time_for_wait): -# -# time.sleep(1) -# -# # Get content of fb_home replication.log _after_ isql finish: -# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') -# diff_data = difflib.unified_diff( -# replold_lines, -# f_repllog_new.readlines() -# ) -# f_repllog_new.close() -# -# ''' -# print('i=%d' % i) -# print('check replold_lines:') -# print('- - - - - - - - - - - -') -# for k in replold_lines: -# print(' ', k) -# print('- - - - - - - - - - - -') -# -# print('check diff_data:') -# print('- - - - - - - - - - - -') -# for k in diff_data: -# print(' ', k) -# print('- - - - - - - - - - - -') -# ''' -# -# for k,d in enumerate(diff_data): -# if p_successfully_replicated.search(d): -# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment N %(last_generated_repl_segment)s.' % locals() ) -# found_required_message = True -# break -# -# if p_replication_failure.search(d): -# print( (prefix_msg + ' ' if prefix_msg else '') + '@@@ SEGMENT REPLICATION FAILURE @@@ ' + d ) -# found_replfail_message = True -# break -# -# if 'ERROR:' in d: -# print( (prefix_msg + ' ' if prefix_msg else '') + '@@@ REPLICATION ERROR @@@ ' + d ) -# found_common_error_msg = True -# break -# -# if found_required_message or found_replfail_message or found_common_error_msg: -# break -# -# if not found_required_message: -# print('UNEXPECTED RESULT: no message about replicating segment N %(last_generated_repl_segment)s for %(max_allowed_time_for_wait)s seconds.' % locals()) -# -# #-------------------------------------------- -# -# sql_ddl = ''' -- do NOT use in this test -- set bail on; -# set list on; -# -# recreate table test(id bigint primary key, x int); -# insert into test(id, x) values(9223372036854775807, 1111); -# insert into test(id, x) values(9223372036854775806, null); -# insert into test(id, x) values(9223372036854775805, 3333); -# commit; -# -# -- must fail: -# alter table test add y int not null; -# commit; -# -# -- must fail: -# alter table test alter column x set not null; -# commit; -# -# -# create domain dm_nn int; -# -# recreate table test(id smallint primary key, x dm_nn); -# insert into test(id, x) values(-1, 1111); -# insert into test(id, x) values(-2, null); -# insert into test(id, x) values(-3, 3333); -# commit; -# -# -- must fail: -# alter domain dm_nn set not null; -# commit; -# -# -- connect 'localhost:%(db_main)s'; -# -# select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') as last_generated_segment from rdb$database; -# commit; -# ''' % locals() -# -# -# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_gh_6907_test.sql'), 'w') -# f_sql_chk.write(sql_ddl) -# flush_and_close( f_sql_chk ) -# -# f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w') -# f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w') -# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = subprocess.STDOUT) -# flush_and_close( f_sql_log ) -# flush_and_close( f_sql_err ) -# -# last_generated_repl_segment = 0 -# -# # do NOT check STDERR of initial SQL: it must contain errors -# # because we try to run DDL statement that for sure will FAIL: -# #with open(f_sql_err.name,'r') as f: -# # for line in f: -# # print('UNEXPECTED STDERR in initial SQL: ' + line) -# # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# -# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors -# -# ############################################################################## -# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### -# ############################################################################## -# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' ) -# -# runProgram('isql', ['localhost:' + db_main, '-nod'], 'set list on; select t.id as main_id, t.x as main_x from rdb$database r left join test t on 1=1 order by t.id;') -# runProgram('isql', ['localhost:' + db_repl, '-nod'], 'set list on; select t.id as repl_id, t.x as repl_x from rdb$database r left join test t on 1=1 order by t.id;') -# -# # return initial state of master DB: -# # remove all DB objects (tables, views, ...): -# # -------------------------------------------- -# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') -# -# f_clean_log=open( os.path.join(context['temp_directory'],'tmp_gh_6907_drop-all-db-objects.log'), 'w') -# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') -# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) -# flush_and_close(f_clean_log) -# flush_and_close(f_clean_err) -# -# with open(f_clean_err.name,'r') as f: -# for line in f: -# print('UNEXPECTED STDERR in cleanup SQL: ' + line) -# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# -# with open(f_clean_log.name,'r') as f: -# for line in f: -# # show number of dropped objects -# print(line) -# -# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> cleanup SQL script finished w/o errors -# -# ############################################################################## -# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### -# ############################################################################## -# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) -# -# f_main_meta_sql=open( os.path.join(context['temp_directory'],'tmp_gh_6907_db_main_meta.sql'), 'w') -# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) -# flush_and_close( f_main_meta_sql ) -# -# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'tmp_gh_6907_db_repl_meta.sql'), 'w') -# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) -# flush_and_close( f_repl_meta_sql ) -# -# db_main_meta=open(f_main_meta_sql.name, 'r') -# db_repl_meta=open(f_repl_meta_sql.name, 'r') -# -# diffmeta = ''.join(difflib.unified_diff( -# db_main_meta.readlines(), -# db_repl_meta.readlines() -# )) -# db_main_meta.close() -# db_repl_meta.close() -# -# f_meta_diff=open( os.path.join(context['temp_directory'],'tmp_gh_6907_db_meta_diff.txt'), 'w', buffering = 0) -# f_meta_diff.write(diffmeta) -# flush_and_close( f_meta_diff ) -# -# # Following must issue only TWO rows: -# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ -# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ -# # Only thes lines will be suppressed further (see subst. section): -# with open(f_meta_diff.name, 'r') as f: -# for line in f: -# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): -# print('UNEXPECTED METADATA DIFF.: ' + line) -# -# ###################### -# ### A C H T U N G ### -# ###################### -# # MANDATORY, OTHERWISE REPLICATION GETS STUCK ON SECOND RUN OF THIS TEST -# # WITH 'ERROR: Record format with length NN is not found for table TEST': -# runProgram('gfix', ['-sweep', 'localhost:' + db_repl]) -# runProgram('gfix', ['-sweep', 'localhost:' + db_main]) -# ####################### -# -# -# # cleanup: -# ########## -# cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) ) -# -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ +expected_stdout = """ POINT-1 FOUND message about replicated segment N 1. MAIN_ID -3 @@ -442,10 +129,309 @@ expected_stdout_1 = """ POINT-2 FOUND message about replicated segment N 2. """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=4.0.1') @pytest.mark.platform('Windows') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# test_script_1 +#--- +# +# import os +# import subprocess +# import re +# import difflib +# import shutil +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# ##################################### +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65 +# ##################################### +# +# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) +# FB_HOME = svc.get_home_directory() +# svc.close() +# +# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float +# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' +# +# cur = db_conn.cursor() +# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") +# server_mode = 'XX' +# for r in cur: +# if r[0] == 'Super': +# server_mode = 'SS' +# elif r[0] == 'SuperClassic': +# server_mode = 'SC' +# elif r[0] == 'Classic': +# server_mode = 'CS' +# cur.close() +# +# # 'fbt-main.fb50.ss.fdb' etc: +# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) +# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') +# +# # Folders for journalling and archieving segments. +# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) +# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for i in range(len( f_names_list )): +# if type(f_names_list[i]) == file: +# del_name = f_names_list[i].name +# elif type(f_names_list[i]) == str: +# del_name = f_names_list[i] +# else: +# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): +# +# global re +# global difflib +# global time +# +# replold_lines = [] +# with open( os.path.join(fb_home,'replication.log'), 'r') as f: +# replold_lines = f.readlines() +# +# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) +# cur = con.cursor() +# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") +# for r in cur: +# last_generated_repl_segment = r[0] +# cur.close() +# con.close() +# +# #print('last_generated_repl_segment:', last_generated_repl_segment) +# +# # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file +# p_successfully_replicated = re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) +# +# # VERBOSE: Segment 16 replication failure at offset 33628 +# p_replication_failure = re.compile('segment\\s+\\d+\\s+replication\\s+failure', re.IGNORECASE) +# +# found_required_message = False +# found_replfail_message = False +# found_common_error_msg = False +# +# for i in range(0,max_allowed_time_for_wait): +# +# time.sleep(1) +# +# # Get content of fb_home replication.log _after_ isql finish: +# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') +# diff_data = difflib.unified_diff( +# replold_lines, +# f_repllog_new.readlines() +# ) +# f_repllog_new.close() +# +# ''' +# print('i=%d' % i) +# print('check replold_lines:') +# print('- - - - - - - - - - - -') +# for k in replold_lines: +# print(' ', k) +# print('- - - - - - - - - - - -') +# +# print('check diff_data:') +# print('- - - - - - - - - - - -') +# for k in diff_data: +# print(' ', k) +# print('- - - - - - - - - - - -') +# ''' +# +# for k,d in enumerate(diff_data): +# if p_successfully_replicated.search(d): +# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment N %(last_generated_repl_segment)s.' % locals() ) +# found_required_message = True +# break +# +# if p_replication_failure.search(d): +# print( (prefix_msg + ' ' if prefix_msg else '') + '@@@ SEGMENT REPLICATION FAILURE @@@ ' + d ) +# found_replfail_message = True +# break +# +# if 'ERROR:' in d: +# print( (prefix_msg + ' ' if prefix_msg else '') + '@@@ REPLICATION ERROR @@@ ' + d ) +# found_common_error_msg = True +# break +# +# if found_required_message or found_replfail_message or found_common_error_msg: +# break +# +# if not found_required_message: +# print('UNEXPECTED RESULT: no message about replicating segment N %(last_generated_repl_segment)s for %(max_allowed_time_for_wait)s seconds.' % locals()) +# +# #-------------------------------------------- +# +# sql_ddl = ''' -- do NOT use in this test -- set bail on; +# set list on; +# +# recreate table test(id bigint primary key, x int); +# insert into test(id, x) values(9223372036854775807, 1111); +# insert into test(id, x) values(9223372036854775806, null); +# insert into test(id, x) values(9223372036854775805, 3333); +# commit; +# +# -- must fail: +# alter table test add y int not null; +# commit; +# +# -- must fail: +# alter table test alter column x set not null; +# commit; +# +# +# create domain dm_nn int; +# +# recreate table test(id smallint primary key, x dm_nn); +# insert into test(id, x) values(-1, 1111); +# insert into test(id, x) values(-2, null); +# insert into test(id, x) values(-3, 3333); +# commit; +# +# -- must fail: +# alter domain dm_nn set not null; +# commit; +# +# -- connect 'localhost:%(db_main)s'; +# +# select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') as last_generated_segment from rdb$database; +# commit; +# ''' % locals() +# +# +# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_gh_6907_test.sql'), 'w') +# f_sql_chk.write(sql_ddl) +# flush_and_close( f_sql_chk ) +# +# f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w') +# f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w') +# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = subprocess.STDOUT) +# flush_and_close( f_sql_log ) +# flush_and_close( f_sql_err ) +# +# last_generated_repl_segment = 0 +# +# # do NOT check STDERR of initial SQL: it must contain errors +# # because we try to run DDL statement that for sure will FAIL: +# #with open(f_sql_err.name,'r') as f: +# # for line in f: +# # print('UNEXPECTED STDERR in initial SQL: ' + line) +# # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' ) +# +# runProgram('isql', ['localhost:' + db_main, '-nod'], 'set list on; select t.id as main_id, t.x as main_x from rdb$database r left join test t on 1=1 order by t.id;') +# runProgram('isql', ['localhost:' + db_repl, '-nod'], 'set list on; select t.id as repl_id, t.x as repl_x from rdb$database r left join test t on 1=1 order by t.id;') +# +# # return initial state of master DB: +# # remove all DB objects (tables, views, ...): +# # -------------------------------------------- +# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') +# +# f_clean_log=open( os.path.join(context['temp_directory'],'tmp_gh_6907_drop-all-db-objects.log'), 'w') +# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) +# flush_and_close(f_clean_log) +# flush_and_close(f_clean_err) +# +# with open(f_clean_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in cleanup SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# with open(f_clean_log.name,'r') as f: +# for line in f: +# # show number of dropped objects +# print(line) +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> cleanup SQL script finished w/o errors +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) +# +# f_main_meta_sql=open( os.path.join(context['temp_directory'],'tmp_gh_6907_db_main_meta.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_main_meta_sql ) +# +# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'tmp_gh_6907_db_repl_meta.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_repl_meta_sql ) +# +# db_main_meta=open(f_main_meta_sql.name, 'r') +# db_repl_meta=open(f_repl_meta_sql.name, 'r') +# +# diffmeta = ''.join(difflib.unified_diff( +# db_main_meta.readlines(), +# db_repl_meta.readlines() +# )) +# db_main_meta.close() +# db_repl_meta.close() +# +# f_meta_diff=open( os.path.join(context['temp_directory'],'tmp_gh_6907_db_meta_diff.txt'), 'w', buffering = 0) +# f_meta_diff.write(diffmeta) +# flush_and_close( f_meta_diff ) +# +# # Following must issue only TWO rows: +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ +# # Only thes lines will be suppressed further (see subst. section): +# with open(f_meta_diff.name, 'r') as f: +# for line in f: +# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): +# print('UNEXPECTED METADATA DIFF.: ' + line) +# +# ###################### +# ### A C H T U N G ### +# ###################### +# # MANDATORY, OTHERWISE REPLICATION GETS STUCK ON SECOND RUN OF THIS TEST +# # WITH 'ERROR: Record format with length NN is not found for table TEST': +# runProgram('gfix', ['-sweep', 'localhost:' + db_repl]) +# runProgram('gfix', ['-sweep', 'localhost:' + db_main]) +# ####################### +# +# +# # cleanup: +# ########## +# cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) ) +# +#--- diff --git a/tests/functional/replication/test_generator_could_not_be_transferred_to_replica.py b/tests/functional/replication/test_generator_could_not_be_transferred_to_replica.py index 8cb8331d..f4b1851d 100644 --- a/tests/functional/replication/test_generator_could_not_be_transferred_to_replica.py +++ b/tests/functional/replication/test_generator_could_not_be_transferred_to_replica.py @@ -1,363 +1,89 @@ #coding:utf-8 -# -# id: tests.functional.replication.generator_could_not_be_transferred_to_replica -# title: Generator values may not replicate after commit -# decription: -# https://github.com/FirebirdSQL/firebird/issues/6848 -# -# Test creates table and two sequences: 'g_1' and 'g_2'. -# Then we add record in the table and use 'g_2' (i.e. sequence which was created LAST). -# After this we do update record and use 'g_1' (seq. which was created FIRST). -# -# Then we wait until replica becomes actual to master, and this delay will last no more then threshold -# that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). -# During this delay, we check every second for replication log and search there line with number of last generated -# segment (which was replicated and deleting finally). -# We can assume that replication finished OK only when such line is found see ('POINT-1'). -# -# After this, we do query master and replica databases and obtain count() for table and values of sequences. -# Obtained values must be identical on master and replica. -# -# Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). -# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). -# -# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). -# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, -# thus metadata difference must not be issued. -# -# ################ -# ### N O T E ### -# ################ -# Test assumes that master and replica DB have been created beforehand. -# Also, it assumes that %FB_HOME% -# eplication.conf has been prepared with apropriate parameters for replication. -# Particularly, name of directories and databases must have info about checked FB major version and ServerMode. -# * verbose = true // in order to find out line with message that required segment was replicated -# * section for master database with specified parameters: -# journal_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.journal" -# journal_archive_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# journal_archive_command = "copy $(pathname) $(archivepathname)" -# journal_archive_timeout = 10 -# * section for replica database with specified parameter: -# journal_source_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# -# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: -# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) -# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) -# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) -# -# These two databases must NOT be dropped in any of tests related to replication! -# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode -# and make cleanup after it, i.e. when all tests will be completed. -# -# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. -# -# Temporary comment. For debug purpoces: -# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); -# 2) copy file %fbt-repo% ests -# unctional abloidatches\\setup-fb-for-replication.bat.txt -# to some place and rename it "*.bat"; -# 3) open this .bat in editor and asjust value of 'fbt_repo' variable; -# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] -# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: -# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc -# 5) batch 'setup-fb-for-replication.bat' will: -# * stop selected FB instance -# * create test databases (in !fbt_repo! mp\\); -# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); -# * replace %fb_home% -# eplication.conf with apropriate -# * start selected FB instance -# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): -# %fpt_repo% -# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc -# -# Confirmed bug on 4.0.0.2465: sequence with least ID ('g_1') is not replicated, its value on replica remains 0. -# Checked on: -# 4.0.1.2519 SS; 5.0.0.82 SS/CS. -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: None + +""" +ID: replication.generator_could_not_be_transferred_to_replica +ISSUE: 6848 +TITLE: Generator values may not replicate after commit +DESCRIPTION: + Test creates table and two sequences: 'g_1' and 'g_2'. + Then we add record in the table and use 'g_2' (i.e. sequence which was created LAST). + After this we do update record and use 'g_1' (seq. which was created FIRST). + + Then we wait until replica becomes actual to master, and this delay will last no more then threshold + that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). + During this delay, we check every second for replication log and search there line with number of last generated + segment (which was replicated and deleting finally). + We can assume that replication finished OK only when such line is found see ('POINT-1'). + + After this, we do query master and replica databases and obtain count() for table and values of sequences. + Obtained values must be identical on master and replica. + + Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). + After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). + + Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). + The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, + thus metadata difference must not be issued. + + ################ + ### N O T E ### + ################ + Test assumes that master and replica DB have been created beforehand. + Also, it assumes that %FB_HOME%/replication.conf has been prepared with apropriate parameters for replication. + Particularly, name of directories and databases must have info about checked FB major version and ServerMode. + * verbose = true // in order to find out line with message that required segment was replicated + * section for master database with specified parameters: + journal_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.journal" + journal_archive_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + journal_archive_command = "copy $(pathname) $(archivepathname)" + journal_archive_timeout = 10 + * section for replica database with specified parameter: + journal_source_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + + Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: + 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) + 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) + NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) + + These two databases must NOT be dropped in any of tests related to replication! + They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode + and make cleanup after it, i.e. when all tests will be completed. + + NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. + + Temporary comment. For debug purpoces: + 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); + 2) copy file %fbt-repo%/tests/functional/tabloid/batches/setup-fb-for-replication.bat.txt + to some place and rename it "*.bat"; + 3) open this .bat in editor and asjust value of 'fbt_repo' variable; + 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] + where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: + DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc + 5) batch 'setup-fb-for-replication.bat' will: + * stop selected FB instance + * create test databases (in !fbt_repo!/tmp); + * prepare journal/archive sub-folders for replication (also in !fbt_repo!/tmp); + * replace %fb_home%/replication.conf with apropriate + * start selected FB instance + 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): + %fpt_repo%/fbt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc + + Confirmed bug on 4.0.0.2465: sequence with least ID ('g_1') is not replicated, its value on replica remains 0. + Checked on: + 4.0.1.2519 SS; 5.0.0.82 SS/CS. +FBTEST: tests.functional.replication.generator_could_not_be_transferred_to_replica +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +substitutions = [('Start removing objects in:.*', 'Start removing objects'), + ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), + ('.* CREATE DATABASE .*', '')] -substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', '')] +db = db_factory() -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import subprocess -# import re -# import difflib -# import shutil -# import time -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# ##################################### -# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65 -# ##################################### -# -# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) -# FB_HOME = svc.get_home_directory() -# svc.close() -# -# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float -# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' -# -# cur = db_conn.cursor() -# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") -# server_mode = 'XX' -# for r in cur: -# if r[0] == 'Super': -# server_mode = 'SS' -# elif r[0] == 'SuperClassic': -# server_mode = 'SC' -# elif r[0] == 'Classic': -# server_mode = 'CS' -# cur.close() -# -# # 'fbt-main.fb50.ss.fdb' etc: -# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) -# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') -# -# # Folders for journalling and archieving segments. -# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) -# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) -# -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for i in range(len( f_names_list )): -# if type(f_names_list[i]) == file: -# del_name = f_names_list[i].name -# elif type(f_names_list[i]) == str: -# del_name = f_names_list[i] -# else: -# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): -# -# global re -# global difflib -# global time -# -# replold_lines = [] -# with open( os.path.join(fb_home,'replication.log'), 'r') as f: -# replold_lines = f.readlines() -# -# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) -# cur = con.cursor() -# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") -# for r in cur: -# last_generated_repl_segment = r[0] -# cur.close() -# con.close() -# -# #print('last_generated_repl_segment:', last_generated_repl_segment) -# -# -# # +IMAGE-PC1 (replica) Fri Jun 11 17:57:01 2021 -# # + Database: C:\\FBTESTING\\QA\\FBT-REPO\\TMP\\FBT-REPL.FB50.FDB -# # + VERBOSE: Added 1 segment(s) to the processing queue -# # + -# # +IMAGE-PC1 (replica) Fri Jun 11 17:57:04 2021 -# # + Database: C:\\FBTESTING\\QA\\FBT-REPO\\TMP\\FBT-REPL.FB50.FDB -# # + VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file -# -# p=re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) -# -# found_required_message = False -# for i in range(0,max_allowed_time_for_wait): -# time.sleep(1) -# -# # Get content of fb_home replication.log _after_ isql finish: -# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') -# diff_data = difflib.unified_diff( -# replold_lines, -# f_repllog_new.readlines() -# ) -# f_repllog_new.close() -# -# for k,d in enumerate(diff_data): -# if p.search(d): -# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' ) -# found_required_message = True -# break -# -# if found_required_message: -# break -# -# if not found_required_message: -# print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait) -# -# #-------------------------------------------- -# -# sql_ddl = ''' set bail on; -# set list on; -# -# select mon$database_name from mon$database; -# recreate sequence g_1; -# recreate sequence g_2; -# recreate table test(id int primary key); -# commit; -# -# select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') as last_generated_repl_segment from rdb$database; -# quit; -# ''' % locals() -# -# -# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_chk_skip_gen_repl.sql'), 'w') -# f_sql_chk.write(sql_ddl) -# flush_and_close( f_sql_chk ) -# -# f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w') -# f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w') -# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err) -# flush_and_close( f_sql_log ) -# flush_and_close( f_sql_err ) -# -# last_generated_repl_segment = 0 -# -# with open(f_sql_err.name,'r') as f: -# for line in f: -# print('UNEXPECTED STDERR in initial SQL: ' + line) -# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# -# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors -# -# # Test connect to master DB, just to fire DB-level triggers: -# ########################### -# con1 = fdb.connect( dsn = 'localhost:' + db_main) -# con1.execute_immediate( 'insert into test(id) values( gen_id(g_2, 1) )' ) -# con1.execute_immediate( 'update test set id = id + gen_id(g_1, 1)' ) -# con1.commit() -# con1.close() -# -# ############################################################################## -# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### -# ############################################################################## -# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' ) -# -# runProgram('isql', ['localhost:' + db_main, '-nod'], 'set list on; select count(*) test_rows_on_main, gen_id(g_1,0) as g1_curr_val_on_main, gen_id(g_2,0) as g2_curr_val_on_main from test;') -# runProgram('isql', ['localhost:' + db_repl, '-nod'], 'set list on; select count(*) test_rows_on_repl, gen_id(g_1,0) as g1_curr_val_on_repl, gen_id(g_2,0) as g2_curr_val_on_repl from test;') -# -# -# # return initial state of master DB: -# # remove all DB objects (tables, views, ...): -# # -------------------------------------------- -# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') -# -# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects.log'), 'w') -# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') -# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) -# flush_and_close(f_clean_log) -# flush_and_close(f_clean_err) -# -# with open(f_clean_err.name,'r') as f: -# for line in f: -# print('UNEXPECTED STDERR in cleanup SQL: ' + line) -# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# -# with open(f_clean_log.name,'r') as f: -# for line in f: -# # show number of dropped objects -# print(line) -# -# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors -# -# ############################################################################## -# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### -# ############################################################################## -# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) -# -# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_skip_gen_repl.sql'), 'w') -# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) -# flush_and_close( f_main_meta_sql ) -# -# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_skip_gen_repl.sql'), 'w') -# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) -# flush_and_close( f_repl_meta_sql ) -# -# db_main_meta=open(f_main_meta_sql.name, 'r') -# db_repl_meta=open(f_repl_meta_sql.name, 'r') -# -# diffmeta = ''.join(difflib.unified_diff( -# db_main_meta.readlines(), -# db_repl_meta.readlines() -# )) -# db_main_meta.close() -# db_repl_meta.close() -# -# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_skip_gen_repl.txt'), 'w', buffering = 0) -# f_meta_diff.write(diffmeta) -# flush_and_close( f_meta_diff ) -# -# # Following must issue only TWO rows: -# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ -# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ -# # Only thes lines will be suppressed further (see subst. section): -# with open(f_meta_diff.name, 'r') as f: -# for line in f: -# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): -# print('UNEXPECTED METADATA DIFF.: ' + line) -# -# ###################### -# ### A C H T U N G ### -# ###################### -# # MANDATORY, OTHERWISE REPLICATION MAY GET STUCK ON TESTS THAT ARE EXECUTED AFTER THIS -# # (e.g. on "updating-blob-with-empty-string-stops-replication") -# # with 'ERROR: Record format with length is not found for table TEST': -# runProgram('gfix', ['-sweep', 'localhost:' + db_repl]) -# runProgram('gfix', ['-sweep', 'localhost:' + db_main]) -# ####################### -# -# # cleanup: -# ########## -# cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) ) -# -# -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) +act = python_act('db', substitutions=substitutions) expected_stdout_1 = """ POINT-1 FOUND message about replicated segment. @@ -375,10 +101,270 @@ expected_stdout_1 = """ POINT-2 FOUND message about replicated segment. """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=4.0') @pytest.mark.platform('Windows') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# test_script_1 +#--- +# +# import os +# import subprocess +# import re +# import difflib +# import shutil +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# ##################################### +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65 +# ##################################### +# +# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) +# FB_HOME = svc.get_home_directory() +# svc.close() +# +# engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float +# fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' +# +# cur = db_conn.cursor() +# cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") +# server_mode = 'XX' +# for r in cur: +# if r[0] == 'Super': +# server_mode = 'SS' +# elif r[0] == 'SuperClassic': +# server_mode = 'SC' +# elif r[0] == 'Classic': +# server_mode = 'CS' +# cur.close() +# +# # 'fbt-main.fb50.ss.fdb' etc: +# db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) +# db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') +# +# # Folders for journalling and archieving segments. +# repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) +# repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for i in range(len( f_names_list )): +# if type(f_names_list[i]) == file: +# del_name = f_names_list[i].name +# elif type(f_names_list[i]) == str: +# del_name = f_names_list[i] +# else: +# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): +# +# global re +# global difflib +# global time +# +# replold_lines = [] +# with open( os.path.join(fb_home,'replication.log'), 'r') as f: +# replold_lines = f.readlines() +# +# con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) +# cur = con.cursor() +# cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") +# for r in cur: +# last_generated_repl_segment = r[0] +# cur.close() +# con.close() +# +# #print('last_generated_repl_segment:', last_generated_repl_segment) +# +# +# # +IMAGE-PC1 (replica) Fri Jun 11 17:57:01 2021 +# # + Database: C:\\FBTESTING\\QA\\FBT-REPO\\TMP\\FBT-REPL.FB50.FDB +# # + VERBOSE: Added 1 segment(s) to the processing queue +# # + +# # +IMAGE-PC1 (replica) Fri Jun 11 17:57:04 2021 +# # + Database: C:\\FBTESTING\\QA\\FBT-REPO\\TMP\\FBT-REPL.FB50.FDB +# # + VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file +# +# p=re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) +# +# found_required_message = False +# for i in range(0,max_allowed_time_for_wait): +# time.sleep(1) +# +# # Get content of fb_home replication.log _after_ isql finish: +# f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') +# diff_data = difflib.unified_diff( +# replold_lines, +# f_repllog_new.readlines() +# ) +# f_repllog_new.close() +# +# for k,d in enumerate(diff_data): +# if p.search(d): +# print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' ) +# found_required_message = True +# break +# +# if found_required_message: +# break +# +# if not found_required_message: +# print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait) +# +# #-------------------------------------------- +# +# sql_ddl = ''' set bail on; +# set list on; +# +# select mon$database_name from mon$database; +# recreate sequence g_1; +# recreate sequence g_2; +# recreate table test(id int primary key); +# commit; +# +# select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') as last_generated_repl_segment from rdb$database; +# quit; +# ''' % locals() +# +# +# f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_chk_skip_gen_repl.sql'), 'w') +# f_sql_chk.write(sql_ddl) +# flush_and_close( f_sql_chk ) +# +# f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w') +# f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w') +# subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err) +# flush_and_close( f_sql_log ) +# flush_and_close( f_sql_err ) +# +# last_generated_repl_segment = 0 +# +# with open(f_sql_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in initial SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors +# +# # Test connect to master DB, just to fire DB-level triggers: +# ########################### +# con1 = fdb.connect( dsn = 'localhost:' + db_main) +# con1.execute_immediate( 'insert into test(id) values( gen_id(g_2, 1) )' ) +# con1.execute_immediate( 'update test set id = id + gen_id(g_1, 1)' ) +# con1.commit() +# con1.close() +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' ) +# +# runProgram('isql', ['localhost:' + db_main, '-nod'], 'set list on; select count(*) test_rows_on_main, gen_id(g_1,0) as g1_curr_val_on_main, gen_id(g_2,0) as g2_curr_val_on_main from test;') +# runProgram('isql', ['localhost:' + db_repl, '-nod'], 'set list on; select count(*) test_rows_on_repl, gen_id(g_1,0) as g1_curr_val_on_repl, gen_id(g_2,0) as g2_curr_val_on_repl from test;') +# +# +# # return initial state of master DB: +# # remove all DB objects (tables, views, ...): +# # -------------------------------------------- +# sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') +# +# f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects.log'), 'w') +# f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) +# flush_and_close(f_clean_log) +# flush_and_close(f_clean_err) +# +# with open(f_clean_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STDERR in cleanup SQL: ' + line) +# MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 +# +# with open(f_clean_log.name,'r') as f: +# for line in f: +# # show number of dropped objects +# print(line) +# +# if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors +# +# ############################################################################## +# ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### +# ############################################################################## +# wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) +# +# f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_skip_gen_repl.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_main_meta_sql ) +# +# f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_skip_gen_repl.sql'), 'w') +# subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) +# flush_and_close( f_repl_meta_sql ) +# +# db_main_meta=open(f_main_meta_sql.name, 'r') +# db_repl_meta=open(f_repl_meta_sql.name, 'r') +# +# diffmeta = ''.join(difflib.unified_diff( +# db_main_meta.readlines(), +# db_repl_meta.readlines() +# )) +# db_main_meta.close() +# db_repl_meta.close() +# +# f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_skip_gen_repl.txt'), 'w', buffering = 0) +# f_meta_diff.write(diffmeta) +# flush_and_close( f_meta_diff ) +# +# # Following must issue only TWO rows: +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ +# # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ +# # Only thes lines will be suppressed further (see subst. section): +# with open(f_meta_diff.name, 'r') as f: +# for line in f: +# if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): +# print('UNEXPECTED METADATA DIFF.: ' + line) +# +# ###################### +# ### A C H T U N G ### +# ###################### +# # MANDATORY, OTHERWISE REPLICATION MAY GET STUCK ON TESTS THAT ARE EXECUTED AFTER THIS +# # (e.g. on "updating-blob-with-empty-string-stops-replication") +# # with 'ERROR: Record format with length is not found for table TEST': +# runProgram('gfix', ['-sweep', 'localhost:' + db_repl]) +# runProgram('gfix', ['-sweep', 'localhost:' + db_main]) +# ####################### +# +# # cleanup: +# ########## +# cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) ) +# +# +#--- diff --git a/tests/functional/replication/test_invalid_msg_if_target_db_has_no_replica_flag.py b/tests/functional/replication/test_invalid_msg_if_target_db_has_no_replica_flag.py index 84aeae84..464d68ff 100644 --- a/tests/functional/replication/test_invalid_msg_if_target_db_has_no_replica_flag.py +++ b/tests/functional/replication/test_invalid_msg_if_target_db_has_no_replica_flag.py @@ -1,120 +1,123 @@ #coding:utf-8 -# -# id: tests.functional.replication.invalid_msg_if_target_db_has_no_replica_flag -# title: Invalid message in replication.log (and possibly crash in the case of synchronous replication) when the target DB has no its "replica" flag set -# decription: -# See: https://github.com/FirebirdSQL/firebird/issues/6989 -# -# Test changes replica DB attribute (removes 'replica' flag). Then we do some trivial DDL on master (create and drop table). -# Log of replication must soon contain *two* phrases: -# 1. VERBOSE: Added 1 segment(s) to the processing queue -# 2. ERROR: Database is not in the replica mode -# If any of these phrases absent then we have bug. -# -# Otherwise we continue and return attribute 'replica' to the target DB. After this replication log must contain phrase: -# VERBOSE: Segment ( bytes) is replicated in ms, deleting the file. -# We can assume that replication finished OK only when such line is found see ('POINT-1'). -# -# Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). -# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). -# -# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). -# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, -# thus metadata difference must not be issued. -# -# #################### -# ### CRUCIAL NOTE ### -# #################### -# Currently there is bug in FB 4.x and 5.x which can be seen on SECOND run of this test: message with text -# "ERROR: Record format with length 68 is not found for table TEST" will appear in it after inserting 1st record in master. -# The reason of that is "dirty" pages that remain in RDB$RELATION_FIELDS both on master and replica after dropping table. -# Following query show different data that appear in replica DB on 1st and 2nd run (just after table was created on master): -# ======= -# set blobdisplay 6; -# select rdb$descriptor as fmt_descr -# from rdb$formats natural join rdb$relations where rdb$relation_name = 'TEST'; -# ======= -# This bug was explained by dimitr, see letters 25.06.2021 11:49 and 25.06.2021 16:56. -# It will be fixed later. -# -# The only workaround to solve this problem is to make SWEEP after all DB objects have been dropped. -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# !NB! BOTH master and replica must be cleaned up by sweep! -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# -# ################ -# ### N O T E ### -# ################ -# Test assumes that master and replica DB have been created beforehand. -# Also, it assumes that %FB_HOME% -# eplication.conf has been prepared with apropriate parameters for replication. -# Particularly, name of directories and databases must have info about checked FB major version and ServerMode. -# * verbose = true // in order to find out line with message that required segment was replicated -# * section for master database with specified parameters: -# journal_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.journal" -# journal_archive_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# journal_archive_command = "copy $(pathname) $(archivepathname)" -# journal_archive_timeout = 10 -# * section for replica database with specified parameter: -# journal_source_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# -# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: -# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) -# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) -# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) -# -# These two databases must NOT be dropped in any of tests related to replication! -# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode -# and make cleanup after it, i.e. when all tests will be completed. -# -# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. -# -# Temporary comment. For debug purpoces: -# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); -# 2) copy file %fbt-repo% ests -# unctional abloidatches\\setup-fb-for-replication.bat.txt -# to some place and rename it "*.bat"; -# 3) open this .bat in editor and asjust value of 'fbt_repo' variable; -# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] -# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: -# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc -# 5) batch 'setup-fb-for-replication.bat' will: -# * stop selected FB instance -# * create test databases (in !fbt_repo! mp\\); -# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); -# * replace %fb_home% -# eplication.conf with apropriate -# * start selected FB instance -# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): -# %fpt_repo% -# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc -# -# Checked on: WI-T5.0.0.257; WI-V4.0.1.2631 (both SS/CS). -# -# -# tracker_id: -# min_versions: ['4.0.1'] -# versions: 4.0.1 -# qmid: None + +""" +ID: replication.invalid_msg_if_target_db_has_no_replica_flag +ISSUE: 6989 +TITLE: Invalid message in replication.log (and possibly crash in the case of synchronous + replication) when the target DB has no its "replica" flag set +DESCRIPTION: + Test changes replica DB attribute (removes 'replica' flag). Then we do some trivial DDL on master (create and drop table). + Log of replication must soon contain *two* phrases: + 1. VERBOSE: Added 1 segment(s) to the processing queue + 2. ERROR: Database is not in the replica mode + If any of these phrases absent then we have bug. + + Otherwise we continue and return attribute 'replica' to the target DB. After this replication log must contain phrase: + VERBOSE: Segment ( bytes) is replicated in ms, deleting the file. + We can assume that replication finished OK only when such line is found see ('POINT-1'). + + Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). + After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). + + Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). + The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, + thus metadata difference must not be issued. + + #################### + ### CRUCIAL NOTE ### + #################### + Currently there is bug in FB 4.x and 5.x which can be seen on SECOND run of this test: message with text + "ERROR: Record format with length 68 is not found for table TEST" will appear in it after inserting 1st record in master. + The reason of that is "dirty" pages that remain in RDB$RELATION_FIELDS both on master and replica after dropping table. + Following query show different data that appear in replica DB on 1st and 2nd run (just after table was created on master): + ======= + set blobdisplay 6; + select rdb$descriptor as fmt_descr + from rdb$formats natural join rdb$relations where rdb$relation_name = 'TEST'; + ======= + This bug was explained by dimitr, see letters 25.06.2021 11:49 and 25.06.2021 16:56. + It will be fixed later. + + The only workaround to solve this problem is to make SWEEP after all DB objects have been dropped. + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + !NB! BOTH master and replica must be cleaned up by sweep! + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + ################ + ### N O T E ### + ################ + Test assumes that master and replica DB have been created beforehand. + Also, it assumes that %FB_HOME%/replication.conf has been prepared with apropriate parameters for replication. + Particularly, name of directories and databases must have info about checked FB major version and ServerMode. + * verbose = true // in order to find out line with message that required segment was replicated + * section for master database with specified parameters: + journal_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.journal" + journal_archive_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + journal_archive_command = "copy $(pathname) $(archivepathname)" + journal_archive_timeout = 10 + * section for replica database with specified parameter: + journal_source_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + + Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: + 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) + 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) + NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) + + These two databases must NOT be dropped in any of tests related to replication! + They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode + and make cleanup after it, i.e. when all tests will be completed. + + NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. + + Temporary comment. For debug purpoces: + 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); + 2) copy file %fbt-repo%/tests/functional/tabloid/batches/setup-fb-for-replication.bat.txt + to some place and rename it "*.bat"; + 3) open this .bat in editor and asjust value of 'fbt_repo' variable; + 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] + where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: + DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc + 5) batch 'setup-fb-for-replication.bat' will: + * stop selected FB instance + * create test databases (in !fbt_repo!/tmp); + * prepare journal/archive sub-folders for replication (also in !fbt_repo!/tmp); + * replace %fb_home%/replication.conf with apropriate + * start selected FB instance + 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): + %fpt_repo%/fbt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc + + Checked on: WI-T5.0.0.257; WI-V4.0.1.2631 (both SS/CS). +FBTEST: tests.functional.replication.invalid_msg_if_target_db_has_no_replica_flag +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0.1 -# resources: None +substitutions = [('Start removing objects in:.*', 'Start removing objects'), + ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), + ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR'), ('[ \t]+', ' ')] -substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR'), ('[ \t]+', ' ')] +db = db_factory() -init_script_1 = """""" +act = python_act('db', substitutions=substitutions) -db_1 = db_factory(sql_dialect=3, init=init_script_1) +expected_stdout = """ + POINT-A FOUND required message after given timestamp. + POINT-1 FOUND message about replicated segment. + Start removing objects + Finish. Total objects removed: 0 + POINT-2 FOUND message about replicated segment. +""" + +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=4.0.1') +@pytest.mark.platform('Windows') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import subprocess # import re @@ -123,25 +126,25 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # import time # from datetime import datetime # from datetime import timedelta -# -# +# +# # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password -# +# # # NB: with default values of 'apply_idle_timeout' and 'apply_error_timeout' (10 and 60 s) # # total time of this test is about 130...132s. # ##################################### # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65 # MAX_TIME_FOR_WAIT_ERR_MSG_IN_LOG = 65 # ##################################### -# +# # svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) # FB_HOME = svc.get_home_directory() # svc.close() -# +# # engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float # fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' -# +# # cur = db_conn.cursor() # cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") # server_mode = 'XX' @@ -153,36 +156,36 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # elif r[0] == 'Classic': # server_mode = 'CS' # cur.close() -# +# # # 'fbt-main.fb50.ss.fdb' etc: # db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) # db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') -# +# # # Folders for journalling and archieving segments. # repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) # repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) -# +# # runProgram('gfix', ['-replica', 'none', 'localhost:' + db_repl]) -# +# # db_conn.close() -# +# # #-------------------------------------------- -# +# # def flush_and_close( file_handle ): # # https://docs.python.org/2/library/os.html#os.fsync # # If you're starting with a Python file object f, # # first do f.flush(), and # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # global os -# +# # file_handle.flush() # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # # otherwise: "OSError: [Errno 9] Bad file descriptor"! # os.fsync(file_handle.fileno()) # file_handle.close() -# +# # #-------------------------------------------- -# +# # def cleanup( f_names_list ): # global os # for i in range(len( f_names_list )): @@ -193,17 +196,17 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # else: # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # del_name = None -# +# # if del_name and os.path.isfile( del_name ): # os.remove( del_name ) -# +# # #-------------------------------------------- -# +# # def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): # global re # global difflib # global time -# +# # # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:- # def check_pattern_in_log( log_lines, pattern, prefix_msg = '' ): # found_required_message = False @@ -214,11 +217,11 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # break # return found_required_message # # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:- -# +# # replold_lines = [] # with open( os.path.join(fb_home,'replication.log'), 'r') as f: # replold_lines = f.readlines() -# +# # con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) # cur = con.cursor() # cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") @@ -226,47 +229,47 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # last_generated_repl_segment = r[0] # cur.close() # con.close() -# +# # # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file # segment_replicated_pattern=re.compile( 'verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) -# +# # # 08.09.2021: replication content can remain unchanged if there was no user-defined object in DB that must be dropped! # # Because of this, it is crucial to check OLD content of replication log before loop. # # Also, segment_replicated_pattern must NOT start from '\\+' because it can occur only for diff_data (within loop): # # # found_required_message = check_pattern_in_log( replold_lines, segment_replicated_pattern, prefix_msg ) -# +# # if not found_required_message: -# +# # for i in range(0,max_allowed_time_for_wait): # time.sleep(1) -# +# # # Get content of fb_home replication.log _after_ isql finish: # f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') # diff_data = difflib.unified_diff( -# replold_lines, +# replold_lines, # f_repllog_new.readlines() # ) # f_repllog_new.close() -# +# # found_required_message = check_pattern_in_log( diff_data, segment_replicated_pattern, prefix_msg ) # if found_required_message: # break -# +# # if not found_required_message: # print('UNEXPECTED RESULT: no message about replicated segment No. %d for %d seconds.' % (int(last_generated_repl_segment), max_allowed_time_for_wait) ) -# -# +# +# # #-------------------------------------------- -# +# # def wait_for_required_msg_in_log( fb_home, required_pattern, db_main, max_allowed_time_for_wait, prefix_msg = '' ): -# +# # global re # global difflib # global time # global datetime -# -# +# +# # # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:- # def check_pattern_in_log( log_lines, pattern, min_timestamp, prefix_msg = '' ): # found_required_message = False @@ -286,14 +289,14 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # found_required_message = True # break # return found_required_message -# +# # # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:- -# +# # replold_lines = [] # with open( os.path.join(fb_home,'replication.log'), 'r') as f: # replold_lines = f.readlines() -# -# +# +# # ################################## # ### A.C.H.T.U.N.G ### # ### do NOT use datetime.now() ### @@ -302,31 +305,31 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # ### (it is HH:MM:SS only) ### # ################################## # current_date_with_hhmmss = datetime.today().replace(microsecond=0) -# +# # runProgram('isql', ['localhost:' + db_main], 'create table test(id int primary key); drop table test;') -# +# # for i in range(0,max_allowed_time_for_wait): # time.sleep(1) -# +# # # Get content of fb_home replication.log _after_ isql finish: # f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') # diff_data = difflib.unified_diff( -# replold_lines, +# replold_lines, # f_repllog_new.readlines() # ) # f_repllog_new.close() -# +# # found_required_message = check_pattern_in_log( list(diff_data), required_pattern, current_date_with_hhmmss, prefix_msg ) # if found_required_message: # break -# +# # if not found_required_message: # print('UNEXPECTED RESULT: required message NOT found after %s for %d seconds.' % (current_date_with_hhmmss, max_allowed_time_for_wait)) -# +# # return found_required_message # #-------------------------------------------- -# -# +# +# # ####################################################################### # ### Make trivial changes in the master (CREATE / DROP table) and # ### # ### check that "ERROR: Database is not in the replica mode" appears ### @@ -334,7 +337,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # ####################################################################### # not_in_replica_mode_pattern=re.compile( 'ERROR: Database is not in the replica mode', re.IGNORECASE) # found_expected_err_msg = wait_for_required_msg_in_log( FB_HOME, not_in_replica_mode_pattern, db_main, MAX_TIME_FOR_WAIT_ERR_MSG_IN_LOG, 'POINT-A' ) -# +# # ''' # # temp, 4debug only: try this if framework will not able to drop database (Classic only): # fdb_tmp=os.path.join(context['temp_directory'],'tmp_gh_6989.tmp.fdb') @@ -344,66 +347,66 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # runProgram('gfix', ['-replica', 'read_only', 'localhost:' + fdb_tmp]) # shutil.move(fdb_tmp, db_repl) # ''' -# +# # runProgram('gfix', ['-replica', 'read_only', 'localhost:' + db_repl]) -# +# # if found_expected_err_msg: # ############################################################################## # ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### # ############################################################################## # wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' ) -# -# +# +# # # return initial state of master DB: # # remove all DB objects (tables, views, ...): # # -------------------------------------------- # sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') -# +# # f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects-gh_6989.log'), 'w') # f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) # flush_and_close(f_clean_log) # flush_and_close(f_clean_err) -# +# # with open(f_clean_err.name,'r') as f: # for line in f: # print('UNEXPECTED STDERR in cleanup SQL: ' + line) # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# +# # with open(f_clean_log.name,'r') as f: # for line in f: # # show number of dropped objects # print(line) -# +# # if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> previous SQL script finished w/o errors -# +# # ############################################################################## # ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### # ############################################################################## # wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) -# +# # f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_gh_6989.sql'), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) # flush_and_close( f_main_meta_sql ) -# +# # f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_gh_6989.sql'), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) # flush_and_close( f_repl_meta_sql ) -# +# # db_main_meta=open(f_main_meta_sql.name, 'r') # db_repl_meta=open(f_repl_meta_sql.name, 'r') -# +# # diffmeta = ''.join(difflib.unified_diff( -# db_main_meta.readlines(), +# db_main_meta.readlines(), # db_repl_meta.readlines() # )) # db_main_meta.close() # db_repl_meta.close() -# +# # f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_gh_6989.txt'), 'w', buffering = 0) # f_meta_diff.write(diffmeta) # flush_and_close( f_meta_diff ) -# +# # # Following must issue only TWO rows: # # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ # # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ @@ -412,7 +415,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # for line in f: # if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): # print('UNEXPECTED METADATA DIFF.: ' + line) -# +# # ###################### # ### A C H T U N G ### # ###################### @@ -421,26 +424,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # runProgram('gfix', ['-sweep', 'localhost:' + db_repl]) # runProgram('gfix', ['-sweep', 'localhost:' + db_main]) # ####################### -# +# # # cleanup: # ########## # cleanup( (f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) ) -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - POINT-A FOUND required message after given timestamp. - POINT-1 FOUND message about replicated segment. - Start removing objects - Finish. Total objects removed: 0 - POINT-2 FOUND message about replicated segment. -""" - -@pytest.mark.version('>=4.0.1') -@pytest.mark.platform('Windows') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/replication/test_oltp_emul_ddl.py b/tests/functional/replication/test_oltp_emul_ddl.py index 6f427361..e395a043 100644 --- a/tests/functional/replication/test_oltp_emul_ddl.py +++ b/tests/functional/replication/test_oltp_emul_ddl.py @@ -1,80 +1,82 @@ #coding:utf-8 -# -# id: tests.functional.replication.oltp_emul_ddl -# title: Applying full DDL from OLTP-EMUL test on master with further check replica -# decription: -# -# ################ -# ### N O T E ### -# ################ -# Test assumes that master and replica DB have been created beforehand. -# Also, it assumes that %FB_HOME% -# eplication.conf has been prepared with apropriate parameters for replication. -# Particularly, name of directories and databases must have info about checked FB major version and ServerMode. -# * verbose = true // in order to find out line with message that required segment was replicated -# * section for master database with specified parameters: -# journal_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.journal" -# journal_archive_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# journal_archive_command = "copy $(pathname) $(archivepathname)" -# journal_archive_timeout = 10 -# * section for replica database with specified parameter: -# journal_source_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# -# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: -# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) -# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) -# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) -# -# These two databases must NOT be dropped in any of tests related to replication! -# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode -# and make cleanup after it, i.e. when all tests will be completed. -# -# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. -# -# Temporary comment. For debug purpoces: -# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); -# 2) copy file %fbt-repo% ests -# unctional abloidatches\\setup-fb-for-replication.bat.txt -# to some place and rename it "*.bat"; -# 3) open this .bat in editor and asjust value of 'fbt_repo' variable; -# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] -# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: -# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc -# 5) batch 'setup-fb-for-replication.bat' will: -# * stop selected FB instance -# * create test databases (in !fbt_repo! mp\\); -# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); -# * replace %fb_home% -# eplication.conf with apropriate -# * start selected FB instance -# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): -# %fpt_repo% -# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc -# -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: None + +""" +ID: replication.oltp_emul_ddl +TITLE: Applying full DDL from OLTP-EMUL test on master with further check replica +DESCRIPTION: + ################ + ### N O T E ### + ################ + Test assumes that master and replica DB have been created beforehand. + Also, it assumes that %FB_HOME%/replication.conf has been prepared with apropriate parameters for replication. + Particularly, name of directories and databases must have info about checked FB major version and ServerMode. + * verbose = true // in order to find out line with message that required segment was replicated + * section for master database with specified parameters: + journal_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.journal" + journal_archive_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + journal_archive_command = "copy $(pathname) $(archivepathname)" + journal_archive_timeout = 10 + * section for replica database with specified parameter: + journal_source_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + + Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: + 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) + 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) + NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) + + These two databases must NOT be dropped in any of tests related to replication! + They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode + and make cleanup after it, i.e. when all tests will be completed. + + NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. + + Temporary comment. For debug purpoces: + 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); + 2) copy file %fbt-repo%/tests/functional/tabloid/batches/setup-fb-for-replication.bat.txt + to some place and rename it "*.bat"; + 3) open this .bat in editor and asjust value of 'fbt_repo' variable; + 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] + where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: + DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc + 5) batch 'setup-fb-for-replication.bat' will: + * stop selected FB instance + * create test databases (in !fbt_repo!/tmp); + * prepare journal/archive sub-folders for replication (also in !fbt_repo!/tmp); + * replace %fb_home%/replication.conf with apropriate + * start selected FB instance + 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): + %fpt_repo%/fbt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc +FBTEST: tests.functional.replication.oltp_emul_ddl +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +substitutions = [('Start removing objects in:.*', 'Start removing objects'), + ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), + ('.* CREATE DATABASE .*', '')] -substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', '')] +db = db_factory() -init_script_1 = """""" +act = python_act('db', substitutions=substitutions) -db_1 = db_factory(sql_dialect=3, init=init_script_1) +expected_stdout = """ + POINT-1 FOUND message about replicated segment. + Master and replica data: THE SAME. + Start removing objects + Finish. Total objects removed + POINT-2 FOUND message about replicated segment. +""" + +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=4.0') +@pytest.mark.platform('Windows') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import subprocess # import re @@ -82,21 +84,21 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # import difflib # import shutil # import time -# +# # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password -# +# # ##################################### # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65 # ##################################### -# +# # svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) # FB_HOME = svc.get_home_directory() # svc.close() -# +# # engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float # fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' -# +# # cur = db_conn.cursor() # cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") # server_mode = 'XX' @@ -108,43 +110,43 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # elif r[0] == 'Classic': # server_mode = 'CS' # cur.close() -# +# # # 'fbt-main.fb50.ss.fdb' etc: # db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) # db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') -# +# # # Folders for journalling and archieving segments. # repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) # repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) -# +# # fb_port = 0 # cur = db_conn.cursor() # cur.execute("select rdb$config_value from rdb$config where rdb$config_name = 'RemoteServicePort'") # for r in cur: # fb_port = int(r[0]) # cur.close() -# +# # db_conn.close() -# +# # runProgram('gfix', ['-w', 'async', 'localhost:' + db_main]) -# +# # #-------------------------------------------- -# +# # def flush_and_close( file_handle ): # # https://docs.python.org/2/library/os.html#os.fsync # # If you're starting with a Python file object f, # # first do f.flush(), and # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # global os -# +# # file_handle.flush() # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # # otherwise: "OSError: [Errno 9] Bad file descriptor"! # os.fsync(file_handle.fileno()) # file_handle.close() -# +# # #-------------------------------------------- -# +# # def cleanup( f_names_list ): # global os # for i in range(len( f_names_list )): @@ -155,22 +157,22 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # else: # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # del_name = None -# +# # if del_name and os.path.isfile( del_name ): # os.remove( del_name ) -# +# # #-------------------------------------------- -# +# # def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): -# +# # global re # global difflib # global time -# +# # replold_lines = [] # with open( os.path.join(fb_home,'replication.log'), 'r') as f: # replold_lines = f.readlines() -# +# # con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) # cur = con.cursor() # cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") @@ -178,67 +180,67 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # last_generated_repl_segment = r[0] # cur.close() # con.close() -# +# # #print('last_generated_repl_segment:', last_generated_repl_segment) -# +# # # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file # p_successfully_replicated = re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) -# +# # # VERBOSE: Segment 16 replication failure at offset 33628 # p_replication_failure = re.compile('segment\\s+\\d+\\s+replication\\s+failure', re.IGNORECASE) -# +# # # ERROR: Record format with length 56 is not found for table PERF_ESTIMATED # p_rec_format_not_found = re.compile('record\\s+format\\s+with\\s+length\\s+\\d+\\s+is\\s+not\\s+found', re.IGNORECASE) -# +# # found_required_message = False -# +# # found_replfail_message = False # found_recformat_message = False # found_common_error_msg = False -# +# # for i in range(0,max_allowed_time_for_wait): # time.sleep(1) -# +# # # Get content of fb_home replication.log _after_ isql finish: # f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') # diff_data = difflib.unified_diff( -# replold_lines, +# replold_lines, # f_repllog_new.readlines() # ) # f_repllog_new.close() -# +# # for k,d in enumerate(diff_data): -# +# # if p_replication_failure.search(d): # print( (prefix_msg + ' ' if prefix_msg else '') + '@@@ SEGMENT REPLICATION FAILURE @@@ ' + d ) # found_replfail_message = True # break -# +# # if p_rec_format_not_found.search(d): # print( (prefix_msg + ' ' if prefix_msg else '') + '@@@ RECORD FORMAT NOT FOUND @@@ ' + d ) # found_recformat_message = True # break -# +# # if 'ERROR:' in d: # print( (prefix_msg + ' ' if prefix_msg else '') + '@@@ REPLICATION ERROR @@@ ' + d ) # found_common_error_msg = True # break -# +# # if p_successfully_replicated.search(d): # print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' ) # found_required_message = True # break -# +# # if found_required_message or found_replfail_message or found_recformat_message or found_common_error_msg: # break -# +# # if not found_required_message: # print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait) -# +# # #-------------------------------------------- -# +# # def generate_sync_settings_sql(db_main, fb_port): -# +# # def generate_inject_setting_sql(working_mode, mcode, new_value, allow_insert_if_eof = 0): # sql_inject_setting = '' # if allow_insert_if_eof == 0: @@ -254,10 +256,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # values( upper('%(working_mode)s'), upper('%(mcode)s'), %(new_value)s ) # matching (working_mode, mcode); # ''' % locals() -# +# # return sql_inject_setting -# -# +# +# # sql_adjust_settings_table = ''' # set list on; # select 'Adjust settings: start at ' || cast('now' as timestamp) as msg from rdb$database; @@ -265,42 +267,42 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # execute block as # begin # ''' -# +# # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'working_mode', "upper('small_03')" ) ) ) # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'enable_mon_query', "'0'" ) ) ) # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'unit_selection_method', "'random'" ) ) ) # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'build_with_split_heavy_tabs', "'1'" ) ) ) # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'build_with_qd_compound_ordr', "lower('most_selective_first')" ) ) ) -# +# # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'build_with_separ_qdistr_idx', "'0'" ) ) ) # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'used_in_replication', "'1'" ) ) ) # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'separate_workers', "'1'" ) ) ) # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'workers_count', "'100'" ) ) ) # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'update_conflict_percent', "'0'" ) ) ) # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'connect_str', "'connect ''localhost:%(db_main)s'' user ''SYSDBA'' password ''masterkey'';'" % locals(), 1 ) ) ) -# +# # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'mon_unit_list', "'//'" ) ) ) # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'halt_test_on_errors', "'/CK/'" ) ) ) # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'qmism_verify_bitset', "'1'" ) ) ) # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'recalc_idx_min_interval', "'9999999'" ) ) ) -# +# # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'warm_time', "'0'", 1 ) ) ) # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'test_intervals', "'10'", 1 ) ) ) -# +# # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'tmp_worker_role_name', "upper('tmp$oemul$worker')", 1 ) ) ) # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'tmp_worker_user_prefix', "upper('tmp$oemul$user_')", 1 ) ) ) -# -# +# +# # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'common', 'use_es', "'2'", 1 ) ) ) # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'host', "'localhost'", 1 ) ) ) # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'port', "'%(fb_port)s'" % locals(), 1 ) ) ) # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'usr', "'SYSDBA'", 1 ) ) ) # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'pwd', "'masterkey'", 1 ) ) ) # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'tmp_worker_user_pswd', "'0Ltp-Emu1'", 1 ) ) ) -# +# # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'conn_pool_support', "'1'", 1 ) ) ) # sql_adjust_settings_table = ''.join( (sql_adjust_settings_table, generate_inject_setting_sql( 'init', 'resetting_support', "'1'", 1 ) ) ) -# +# # sql_adjust_settings_table += ''' # end ^ # set term ;^ @@ -308,20 +310,20 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # select 'Adjust settings: finish at ' || cast('now' as timestamp) as msg from rdb$database; # set list off; # ''' -# +# # return sql_adjust_settings_table -# +# # #-------------------------------------------- -# +# # # Extract .sql files with OLTP-EMUL DDL for applying # # ZipFile.extractall(path=None, members=None, pwd=None) # zf = zipfile.ZipFile( os.path.join(context['files_location'],'oltp-emul-ddl.zip') ) # src_files = zf.namelist() # zf.extractall(path = context['temp_directory']) # zf.close() -# +# # #-------------------------------------------- -# +# # oltp_emul_initial_scripts = [ # 'oltp-emul-01_initial_DDL' # ,'oltp-emul-02_business_units' @@ -329,47 +331,47 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # ,'oltp-emul-04_misc_debug_code' # ,'oltp-emul-05_main_tabs_filling' # ] -# -# +# +# # #src_dir = context['files_location'] # src_dir = context['temp_directory'] -# +# # sql_apply = '\\n' # for f in oltp_emul_initial_scripts: # sql_apply += ' in ' + os.path.join(src_dir, f+'.sql;\\n') -# +# # sql_ddl = ''' %(sql_apply)s # ''' % locals() -# +# # #-------------------------------------------- -# +# # f_run_initial_ddl = open( os.path.join(context['temp_directory'],'tmp-oltp-emul-ddl.sql'), 'w') # f_run_initial_ddl.write(sql_ddl) -# +# # # Add SQL code for adjust SETTINGS table with values which are commonly used in OLTP-EMUL config: # f_run_initial_ddl.write( generate_sync_settings_sql(db_main, fb_port) ) # flush_and_close( f_run_initial_ddl ) -# +# # f_run_initial_log = open( ''.join( (os.path.splitext(f_run_initial_ddl.name)[0], '.log' ) ), 'w') # f_run_initial_err = open( ''.join( (os.path.splitext(f_run_initial_ddl.name)[0], '.err' ) ), 'w') # subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_run_initial_ddl.name ], stdout = f_run_initial_log, stderr = f_run_initial_err) -# +# # flush_and_close( f_run_initial_log ) # flush_and_close( f_run_initial_err ) -# +# # with open(f_run_initial_err.name,'r') as f: # for line in f: # print('UNEXPECTED STDERR in initial SQL: ' + line) # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# +# # if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors -# +# # post_handling_out = os.path.join( context['temp_directory'],'oltp_split_heavy_tabs.tmp' ) # post_adjust_sep_wrk_out = os.path.join( context['temp_directory'], 'oltp_adjust_sep_wrk.tmp' ) # post_adjust_replication = os.path.join( context['temp_directory'], 'post_adjust_repl_pk.tmp' ) # post_adjust_ext_pool = os.path.join( context['temp_directory'], 'post_adjust_ext_pool.tmp' ) # post_adjust_eds_perf = os.path.join( context['temp_directory'], 'post_adjust_eds_perf.tmp' ) -# +# # oltp_emul_post_handling_scripts = [ # 'oltp-emul-06_split_heavy_tabs' # ,'oltp-emul-07_adjust_perf_tabs' @@ -377,8 +379,8 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # ,'oltp-emul-09_adjust_eds_calls' # ,'oltp-emul-10_adjust_eds_perf' # ] -# -# +# +# # sql_post_handling = '\\n' # for f in oltp_emul_post_handling_scripts: # run_post_handling_sql = os.path.join( src_dir, f+'.sql' ) @@ -391,26 +393,26 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # in %(tmp_post_handling_sql)s; # ----------------------------- # ''' % locals() -# +# # if 'adjust_eds_calls' in f: # # We have to make RECONNECT here, otherwise get: # # Statement failed, SQLSTATE = 2F000 # # Error while parsing procedure SP_PERF_EDS_LOGGING's BLR # # -attempted update of read-only column # # After line 49 in file ... oltp-emul-10_adjust_eds_perf.sql -# +# # sql_post_handling += " commit; connect 'localhost:%(db_main)s' user 'SYSDBA' password 'masterkey';" % locals() -# +# # oltp_emul_final_actions_scripts = [ # 'oltp-emul-11_ref_data_filling' # ,'oltp-emul-12_activate_db_triggers' # ] # for f in oltp_emul_final_actions_scripts: # sql_post_handling += ' in ' + os.path.join(src_dir, f+'.sql;\\n') -# +# # f_post_handling_sql = open( os.path.join(context['temp_directory'],'tmp-oltp-emul-post.sql'), 'w') # f_post_handling_sql.write(sql_post_handling) -# +# # sql4debug_only = ''' # set echo off; # set list on; @@ -423,49 +425,49 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # quit; # ''' # f_post_handling_sql.write( sql4debug_only ) -# +# # flush_and_close( f_post_handling_sql ) -# +# # f_post_handling_log = open( ''.join( (os.path.splitext(f_post_handling_sql.name)[0], '.log' ) ), 'w') # f_post_handling_err = open( ''.join( (os.path.splitext(f_post_handling_sql.name)[0], '.err' ) ), 'w') # subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_post_handling_sql.name ], stdout = f_post_handling_log, stderr = f_post_handling_err) # flush_and_close( f_post_handling_log ) # flush_and_close( f_post_handling_err ) -# +# # with open(f_post_handling_err.name,'r') as f: # for line in f: # print('UNEXPECTED STDERR in post-handling SQL: ' + line) # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# +# # cleanup( (f_post_handling_sql, f_post_handling_log, f_post_handling_err ) ) # for f in oltp_emul_post_handling_scripts: # tmp_post_handling_sql = os.path.join( context['temp_directory'], f + '.tmp' ) # cleanup( (tmp_post_handling_sql,) ) -# +# # if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors -# +# # # Test connect to master DB, call initial business operation: create client order # ########################### # custom_tpb = fdb.TPB() # custom_tpb.lock_resolution = fdb.isc_tpb_nowait # custom_tpb.isolation_level = fdb.isc_tpb_concurrency -# +# # con1 = fdb.connect( dsn = 'localhost:' + db_main, isolation_level = custom_tpb) # cur1 = con1.cursor() # cur1.execute( 'select ware_id from sp_client_order order by ware_id' ) -# +# # client_order_wares_main = [] # for r in cur1: # client_order_wares_main.append(r[0]) # cur1.close() # con1.commit() # con1.close() -# +# # ############################################################################## # ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### # ############################################################################## # wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' ) -# +# # con2 = fdb.connect( dsn = 'localhost:' + db_repl, no_db_triggers = 1) # cur2 = con2.cursor() # cur2.execute( 'select d.ware_id from doc_data d order by d.ware_id' ) @@ -475,62 +477,62 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # cur2.close() # con2.commit() # con2.close() -# +# # print('Master and replica data: %s ' % ( 'THE SAME.' if client_order_wares_main and sorted(client_order_wares_main) == sorted(client_order_wares_repl) else '### FAIL: DIFFERS ###' ) ) -# +# # #print('client_order_wares_main=',client_order_wares_main) # #print('client_order_wares_repl=',client_order_wares_repl) -# +# # # return initial state of master DB: # # remove all DB objects (tables, views, ...): # # -------------------------------------------- # sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') -# +# # f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects.log'), 'w') # f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) # flush_and_close(f_clean_log) # flush_and_close(f_clean_err) -# +# # with open(f_clean_err.name,'r') as f: # for line in f: # print('UNEXPECTED STDERR in cleanup SQL: ' + line) # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# +# # with open(f_clean_log.name,'r') as f: # for line in f: # # show number of dropped objects # print(line) -# +# # if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors -# +# # ############################################################################## # ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### # ############################################################################## # wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) -# +# # f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_skip_gen_repl.sql'), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) # flush_and_close( f_main_meta_sql ) -# +# # f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_skip_gen_repl.sql'), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) # flush_and_close( f_repl_meta_sql ) -# +# # db_main_meta=open(f_main_meta_sql.name, 'r') # db_repl_meta=open(f_repl_meta_sql.name, 'r') -# +# # diffmeta = ''.join(difflib.unified_diff( -# db_main_meta.readlines(), +# db_main_meta.readlines(), # db_repl_meta.readlines() # )) # db_main_meta.close() # db_repl_meta.close() -# +# # f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_skip_gen_repl.txt'), 'w', buffering = 0) # f_meta_diff.write(diffmeta) # flush_and_close( f_meta_diff ) -# +# # # Following must issue only TWO rows: # # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ # # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ @@ -539,10 +541,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # for line in f: # if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): # print('UNEXPECTED METADATA DIFF.: ' + line) -# -# +# +# # cleanup( (f_main_meta_sql, f_repl_meta_sql, f_meta_diff) ) -# +# # ###################### # ### A C H T U N G ### # ###################### @@ -551,31 +553,14 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # runProgram('gfix', ['-sweep', 'localhost:' + db_repl]) # runProgram('gfix', ['-sweep', 'localhost:' + db_main]) # ####################### -# +# # # cleanup: # ########## # cleanup( (f_run_initial_ddl, f_run_initial_log, f_run_initial_err, f_clean_log, f_clean_err) ) -# +# # # src_files - list of .sql files which were applied; got from zf.namelist(). # # We have to remove all of them: # cleanup( [ os.path.join(context['temp_directory'],f) for f in src_files ] ) -# -# +# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - POINT-1 FOUND message about replicated segment. - Master and replica data: THE SAME. - Start removing objects - Finish. Total objects removed - POINT-2 FOUND message about replicated segment. -""" - -@pytest.mark.version('>=4.0') -@pytest.mark.platform('Windows') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/replication/test_permission_error_on_ddl_issued_by_non_sysdba.py b/tests/functional/replication/test_permission_error_on_ddl_issued_by_non_sysdba.py index 769d1f1a..14a1ac51 100644 --- a/tests/functional/replication/test_permission_error_on_ddl_issued_by_non_sysdba.py +++ b/tests/functional/replication/test_permission_error_on_ddl_issued_by_non_sysdba.py @@ -1,122 +1,122 @@ #coding:utf-8 -# -# id: tests.functional.replication.permission_error_on_ddl_issued_by_non_sysdba -# title: Permission error with replication -# decription: -# See: https://github.com/FirebirdSQL/firebird/issues/6856 -# -# Test run actions which are specified in the ticket (create user with granting admin access to him, etc). -# After this we wait until replica becomes actual to master, and this delay will last no more then threshold -# that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). -# During this delay, we check every second for replication log and search there line with number of last generated -# segment (which was replicated and deleting finally). -# We can assume that replication finished OK only when such line is found see ('POINT-1'). -# -# Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). -# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). -# -# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). -# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, -# thus metadata difference must not be issued. -# -# ################ -# ### N O T E ### -# ################ -# Test assumes that master and replica DB have been created beforehand. -# Also, it assumes that %FB_HOME% -# eplication.conf has been prepared with apropriate parameters for replication. -# Particularly, name of directories and databases must have info about checked FB major version and ServerMode. -# * verbose = true // in order to find out line with message that required segment was replicated -# * section for master database with specified parameters: -# journal_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.journal" -# journal_archive_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# journal_archive_command = "copy $(pathname) $(archivepathname)" -# journal_archive_timeout = 10 -# * section for replica database with specified parameter: -# journal_source_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# -# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: -# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) -# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) -# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) -# -# These two databases must NOT be dropped in any of tests related to replication! -# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode -# and make cleanup after it, i.e. when all tests will be completed. -# -# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. -# -# Temporary comment. For debug purpoces: -# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); -# 2) copy file %fbt-repo% ests -# unctional abloidatches\\setup-fb-for-replication.bat.txt -# to some place and rename it "*.bat"; -# 3) open this .bat in editor and asjust value of 'fbt_repo' variable; -# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] -# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: -# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc -# 5) batch 'setup-fb-for-replication.bat' will: -# * stop selected FB instance -# * create test databases (in !fbt_repo! mp\\); -# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); -# * replace %fb_home% -# eplication.conf with apropriate -# * start selected FB instance -# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): -# %fpt_repo% -# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc -# -# Confirmed bug on 4.0.1.2578 and 5.0.0.169: messages "ERROR: unsuccessful metadata update / CREATE OR ALTER VIEW V1 failed" -# will be added into replication log and after this replication gets stuck. -# -# Checked on: 4.0.1.2585 (SS/CS); 5.0.0.192 (SS/CS) -# -# -# -# tracker_id: -# min_versions: ['4.0.1'] -# versions: 4.0.1 -# qmid: None + +""" +ID: replication.permission_error_on_ddl_issued_by_non_sysdba +ISSUE: 6856 +TITLE: Permission error with replication +DESCRIPTION: + Test run actions which are specified in the ticket (create user with granting admin access to him, etc). + After this we wait until replica becomes actual to master, and this delay will last no more then threshold + that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). + During this delay, we check every second for replication log and search there line with number of last generated + segment (which was replicated and deleting finally). + We can assume that replication finished OK only when such line is found see ('POINT-1'). + + Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). + After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). + + Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). + The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, + thus metadata difference must not be issued. + + ################ + ### N O T E ### + ################ + Test assumes that master and replica DB have been created beforehand. + Also, it assumes that %FB_HOME%/replication.conf has been prepared with apropriate parameters for replication. + Particularly, name of directories and databases must have info about checked FB major version and ServerMode. + * verbose = true // in order to find out line with message that required segment was replicated + * section for master database with specified parameters: + journal_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.journal" + journal_archive_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + journal_archive_command = "copy $(pathname) $(archivepathname)" + journal_archive_timeout = 10 + * section for replica database with specified parameter: + journal_source_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + + Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: + 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) + 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) + NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) + + These two databases must NOT be dropped in any of tests related to replication! + They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode + and make cleanup after it, i.e. when all tests will be completed. + + NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. + + Temporary comment. For debug purpoces: + 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); + 2) copy file %fbt-repo%/tests/functional/tabloid/batches/setup-fb-for-replication.bat.txt + to some place and rename it "*.bat"; + 3) open this .bat in editor and asjust value of 'fbt_repo' variable; + 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] + where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: + DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc + 5) batch 'setup-fb-for-replication.bat' will: + * stop selected FB instance + * create test databases (in !fbt_repo!/tmp); + * prepare journal/archive sub-folders for replication (also in !fbt_repo!/tmp); + * replace %fb_home%/replication.conf with apropriate + * start selected FB instance + 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): + %fpt_repo%/fbt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc + + Confirmed bug on 4.0.1.2578 and 5.0.0.169: messages "ERROR: unsuccessful metadata update / CREATE OR ALTER VIEW V1 failed" + will be added into replication log and after this replication gets stuck. + + Checked on: 4.0.1.2585 (SS/CS); 5.0.0.192 (SS/CS) +FBTEST: tests.functional.replication.permission_error_on_ddl_issued_by_non_sysdba +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0.1 -# resources: None +substitutions = [('Start removing objects in:.*', 'Start removing objects'), + ('Finish. Total objects removed: \\d+', 'Finish. Total objects removed'), + ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR')] -substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: \\d+', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR')] +db = db_factory() -init_script_1 = """""" +act = python_act('db', substitutions=substitutions) -db_1 = db_factory(sql_dialect=3, init=init_script_1) +expected_stdout = """ + POINT-1 FOUND message about replicated segment. + Start removing objects + Finish. Total objects removed + POINT-2 FOUND message about replicated segment. +""" + +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=4.0.1') +@pytest.mark.platform('Windows') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import subprocess # import re # import difflib # import shutil # import time -# +# # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password -# +# # ##################################### # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65 # ##################################### -# +# # svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) # FB_HOME = svc.get_home_directory() # svc.close() -# +# # engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float # fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' -# +# # cur = db_conn.cursor() # cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") # server_mode = 'XX' @@ -128,34 +128,34 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # elif r[0] == 'Classic': # server_mode = 'CS' # cur.close() -# +# # # 'fbt-main.fb50.ss.fdb' etc: # db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) # db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') -# +# # # Folders for journalling and archieving segments. # repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) # repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) -# +# # db_conn.close() -# +# # #-------------------------------------------- -# +# # def flush_and_close( file_handle ): # # https://docs.python.org/2/library/os.html#os.fsync # # If you're starting with a Python file object f, # # first do f.flush(), and # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # global os -# +# # file_handle.flush() # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # # otherwise: "OSError: [Errno 9] Bad file descriptor"! # os.fsync(file_handle.fileno()) # file_handle.close() -# +# # #-------------------------------------------- -# +# # def cleanup( f_names_list ): # global os # for i in range(len( f_names_list )): @@ -166,18 +166,18 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # else: # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # del_name = None -# +# # if del_name and os.path.isfile( del_name ): # os.remove( del_name ) -# +# # #-------------------------------------------- -# +# # def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): -# +# # global re # global difflib # global time -# +# # # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:- # def check_pattern_in_log( log_lines, pattern, prefix_msg = '' ): # found_required_message = False @@ -188,11 +188,11 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # break # return found_required_message # # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:- -# +# # replold_lines = [] # with open( os.path.join(fb_home,'replication.log'), 'r') as f: # replold_lines = f.readlines() -# +# # con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) # cur = con.cursor() # cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") @@ -200,42 +200,42 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # last_generated_repl_segment = r[0] # cur.close() # con.close() -# +# # # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file # segment_replicated_pattern=re.compile( 'verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) -# +# # # 08.09.2021: replication content can remain unchanged if there was no user-defined object in DB that must be dropped! # # Because of this, it is crucial to check OLD content of replication log before loop. # # Also, segment_replicated_pattern must NOT start from '\\+' because it can occur only for diff_data (within loop): # # # found_required_message = check_pattern_in_log( replold_lines, segment_replicated_pattern, prefix_msg ) -# +# # if not found_required_message: -# +# # for i in range(0,max_allowed_time_for_wait): # time.sleep(1) -# +# # # Get content of fb_home replication.log _after_ isql finish: # f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') # diff_data = difflib.unified_diff( -# replold_lines, +# replold_lines, # f_repllog_new.readlines() # ) # f_repllog_new.close() -# +# # found_required_message = check_pattern_in_log( diff_data, segment_replicated_pattern, prefix_msg ) # if found_required_message: # break -# +# # if not found_required_message: # print('UNEXPECTED RESULT: no message about replicated segment No. %d for %d seconds.' % (int(last_generated_repl_segment), max_allowed_time_for_wait) ) -# +# # #-------------------------------------------- -# +# # sql_ddl = ''' create or alter user dba_helper password '123' using plugin Srp grant admin role; # grant rdb$admin to user dba_helper; # set term ^; -# execute block as +# execute block as # begin # execute statement 'create or alter view v1 as select 1 x from rdb$database' # with autonomous transaction @@ -249,82 +249,82 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # drop view v1; # commit; # ''' % locals() -# -# +# +# # f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_gh_6856_init.sql'), 'w') # f_sql_chk.write(sql_ddl) # flush_and_close( f_sql_chk ) -# +# # f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w') # f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w') # subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err) # flush_and_close( f_sql_log ) # flush_and_close( f_sql_err ) -# +# # last_generated_repl_segment = 0 -# +# # with open(f_sql_err.name,'r') as f: # for line in f: # print('UNEXPECTED STDERR in initial SQL: ' + line) # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# +# # if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors -# +# # ############################################################################## # ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### # ############################################################################## # wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' ) -# +# # # return initial state of master DB: # # remove all DB objects (tables, views, ...): # # -------------------------------------------- # sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') -# +# # f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects-gh_6856.log'), 'w') # f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) # flush_and_close(f_clean_log) # flush_and_close(f_clean_err) -# +# # with open(f_clean_err.name,'r') as f: # for line in f: # print('UNEXPECTED STDERR in cleanup SQL: ' + line) # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# +# # with open(f_clean_log.name,'r') as f: # for line in f: # # show number of dropped objects # print(line) -# +# # if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> previous SQL script finished w/o errors -# +# # ############################################################################## # ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### # ############################################################################## # wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) -# +# # f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_gh_6856.sql'), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) # flush_and_close( f_main_meta_sql ) -# +# # f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_gh_6856.sql'), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) # flush_and_close( f_repl_meta_sql ) -# +# # db_main_meta=open(f_main_meta_sql.name, 'r') # db_repl_meta=open(f_repl_meta_sql.name, 'r') -# +# # diffmeta = ''.join(difflib.unified_diff( -# db_main_meta.readlines(), +# db_main_meta.readlines(), # db_repl_meta.readlines() # )) # db_main_meta.close() # db_repl_meta.close() -# +# # f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_gh_6856.txt'), 'w', buffering = 0) # f_meta_diff.write(diffmeta) # flush_and_close( f_meta_diff ) -# +# # # Following must issue only TWO rows: # # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ # # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ @@ -333,7 +333,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # for line in f: # if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): # print('UNEXPECTED METADATA DIFF.: ' + line) -# +# # ###################### # ### A C H T U N G ### # ###################### @@ -342,25 +342,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # runProgram('gfix', ['-sweep', 'localhost:' + db_repl]) # runProgram('gfix', ['-sweep', 'localhost:' + db_main]) # ####################### -# +# # # cleanup: # ########## # cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) ) -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - POINT-1 FOUND message about replicated segment. - Start removing objects - Finish. Total objects removed - POINT-2 FOUND message about replicated segment. -""" - -@pytest.mark.version('>=4.0.1') -@pytest.mark.platform('Windows') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/replication/test_shutdown_during_applying_segments_leads_to_crash.py b/tests/functional/replication/test_shutdown_during_applying_segments_leads_to_crash.py index 8e2f157b..04c426bb 100644 --- a/tests/functional/replication/test_shutdown_during_applying_segments_leads_to_crash.py +++ b/tests/functional/replication/test_shutdown_during_applying_segments_leads_to_crash.py @@ -1,114 +1,118 @@ #coding:utf-8 -# -# id: tests.functional.replication.shutdown_during_applying_segments_leads_to_crash -# title: Crash or hang while shutting down the replica database if segments are being applied -# decription: -# See: https://github.com/FirebirdSQL/firebird/issues/6975 -# -# Bug initially was found during heavy test of replication performed by OLTP-EMUL, for FB 4.x -# (see letters to dimitr 13.09.2021; reply from dimitr, 18.09.2021 08:42 - all in mailbox: pz at ibase.ru). -# -# It *can* be reproduced without heavy/concurrent workload, but we have to operate with data that are written -# into database 'slowly'. Such data can be wide INDEXED column which has GUID-based values. -# -# Test creates a table with 'wide' indexed field and adds data to it. -# Then we save current timestamp (with accuracy up to SECONDS, i.e. cut off milli- or microseconds) to variable. -# After this we start check replicationb.log for appearance of phrase 'Added segment(s) to the processing queue'. -# After founding each such phrase we skip two lines above and parse timestamp when this occurred. -# If timestamp in log less than saved timestamp of our DML action then we go on to the next such phrase. -# Otherwise we can assume that replication BEGINS to apply just generated segment. -# See function wait_for_add_queue_in_replica() which does this parsing of replication.log. -# -# Because we operate with table which have very 'wide' index and, moreover, data in this index are GUID-generated -# text strings, we can safely assume that applying of segment will take at least 5...10 seconds (actually this -# can take done for 30...35 seconds). -# -# During this time we change replica mode to full shutdown and (immediately after that) return to online. -# NO message like 'error reading / writing from/to connection' must appear at this step. -# -# After this, we have to wait for replica finish applying segment and when this occur we drop the table. -# -# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). -# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, -# thus metadata difference must not be issued. -# -# ################ -# ### N O T E ### -# ################ -# Test assumes that master and replica DB have been created beforehand. -# Also, it assumes that %FB_HOME% -# eplication.conf has been prepared with apropriate parameters for replication. -# Particularly, name of directories and databases must have info about checked FB major version and ServerMode. -# * verbose = true // in order to find out line with message that required segment was replicated -# * section for master database with specified parameters: -# journal_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.journal" -# journal_archive_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# journal_archive_command = "copy $(pathname) $(archivepathname)" -# journal_archive_timeout = 10 -# * section for replica database with specified parameter: -# journal_source_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# -# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: -# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) -# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) -# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) -# -# These two databases must NOT be dropped in any of tests related to replication! -# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode -# and make cleanup after it, i.e. when all tests will be completed. -# -# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. -# -# Temporary comment. For debug purpoces: -# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); -# 2) copy file %fbt-repo% ests -# unctional abloidatches\\setup-fb-for-replication.bat.txt -# to some place and rename it "*.bat"; -# 3) open this .bat in editor and asjust value of 'fbt_repo' variable; -# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] -# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: -# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc -# 5) batch 'setup-fb-for-replication.bat' will: -# * stop selected FB instance -# * create test databases (in !fbt_repo! mp\\); -# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); -# * replace %fb_home% -# eplication.conf with apropriate -# * start selected FB instance -# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): -# %fpt_repo% -# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc -# -# Confirmed bug on 5.0.0.215: server crashed when segment was applied to replica and at the same time we issued -# 'gfix -shut full -force 0 ...'. Regardless of that command, replica DB remained in NORMAL mode, not in shutdown. -# If this command was issued after this again - FB process hanged (gfix could not return control to OS). -# This is the same bug as described in the ticked (discussed with dimitr, letters 22.09.2021). -# -# Checked on: 4.0.1.2613 (SS/CS); 5.0.0.219 (SS/CS) -# -# tracker_id: -# min_versions: ['4.0.1'] -# versions: 4.0.1 -# qmid: None + +""" +ID: replication.shutdown_during_applying_segments_leads_to_crash +ISSUE: 6975 +TITLE: Crash or hang while shutting down the replica database if segments are being applied +DESCRIPTION: + Bug initially was found during heavy test of replication performed by OLTP-EMUL, for FB 4.x + (see letters to dimitr 13.09.2021; reply from dimitr, 18.09.2021 08:42 - all in mailbox: pz at ibase.ru). + + It *can* be reproduced without heavy/concurrent workload, but we have to operate with data that are written + into database 'slowly'. Such data can be wide INDEXED column which has GUID-based values. + + Test creates a table with 'wide' indexed field and adds data to it. + Then we save current timestamp (with accuracy up to SECONDS, i.e. cut off milli- or microseconds) to variable. + After this we start check replicationb.log for appearance of phrase 'Added segment(s) to the processing queue'. + After founding each such phrase we skip two lines above and parse timestamp when this occurred. + If timestamp in log less than saved timestamp of our DML action then we go on to the next such phrase. + Otherwise we can assume that replication BEGINS to apply just generated segment. + See function wait_for_add_queue_in_replica() which does this parsing of replication.log. + + Because we operate with table which have very 'wide' index and, moreover, data in this index are GUID-generated + text strings, we can safely assume that applying of segment will take at least 5...10 seconds (actually this + can take done for 30...35 seconds). + + During this time we change replica mode to full shutdown and (immediately after that) return to online. + NO message like 'error reading / writing from/to connection' must appear at this step. + + After this, we have to wait for replica finish applying segment and when this occur we drop the table. + + Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). + The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, + thus metadata difference must not be issued. + + ################ + ### N O T E ### + ################ + Test assumes that master and replica DB have been created beforehand. + Also, it assumes that %FB_HOME%/replication.conf has been prepared with apropriate parameters for replication. + Particularly, name of directories and databases must have info about checked FB major version and ServerMode. + * verbose = true // in order to find out line with message that required segment was replicated + * section for master database with specified parameters: + journal_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.journal" + journal_archive_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + journal_archive_command = "copy $(pathname) $(archivepathname)" + journal_archive_timeout = 10 + * section for replica database with specified parameter: + journal_source_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + + Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: + 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) + 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) + NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) + + These two databases must NOT be dropped in any of tests related to replication! + They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode + and make cleanup after it, i.e. when all tests will be completed. + + NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. + + Temporary comment. For debug purpoces: + 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); + 2) copy file %fbt-repo%/tests/functional/tabloid/batches/setup-fb-for-replication.bat.txt + to some place and rename it "*.bat"; + 3) open this .bat in editor and asjust value of 'fbt_repo' variable; + 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] + where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: + DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc + 5) batch 'setup-fb-for-replication.bat' will: + * stop selected FB instance + * create test databases (in !fbt_repo!/tmp); + * prepare journal/archive sub-folders for replication (also in !fbt_repo!/tmp); + * replace %fb_home%/replication.conf with apropriate + * start selected FB instance + 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): + %fpt_repo%/fbt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc + + Confirmed bug on 5.0.0.215: server crashed when segment was applied to replica and at the same time we issued + 'gfix -shut full -force 0 ...'. Regardless of that command, replica DB remained in NORMAL mode, not in shutdown. + If this command was issued after this again - FB process hanged (gfix could not return control to OS). + This is the same bug as described in the ticked (discussed with dimitr, letters 22.09.2021). + + Checked on: 4.0.1.2613 (SS/CS); 5.0.0.219 (SS/CS) +FBTEST: tests.functional.replication.shutdown_during_applying_segments_leads_to_crash +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0.1 -# resources: None +substitutions = [('Start removing objects in:.*', 'Start removing objects'), + ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), + ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR'), ('[ \t]+', ' ')] -substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR'), ('[ \t]+', ' ')] +db = db_factory() -init_script_1 = """""" +act = python_act('db', substitutions=substitutions) -db_1 = db_factory(sql_dialect=3, init=init_script_1) +expected_stdout = """ + POINT-A FOUND message about segments added to queue after given timestamp. + POINT-B Attributes force write, full shutdown, read-only replica + POINT-1 FOUND message about replicated segment. + Start removing objects + Finish. Total objects removed + POINT-2 FOUND message about replicated segment. +""" + +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=4.0.1') +@pytest.mark.platform('Windows') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import subprocess # import re @@ -117,25 +121,25 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # import time # from datetime import datetime # from datetime import timedelta -# -# +# +# # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password -# +# # # NB: with default values of 'apply_idle_timeout' and 'apply_error_timeout' (10 and 60 s) # # total time of this test is about 130...132s. # ##################################### # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 135 # MAX_TIME_FOR_WAIT_ADDED_TO_QUEUE = 135 # ##################################### -# +# # svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) # FB_HOME = svc.get_home_directory() # svc.close() -# +# # engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float # fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' -# +# # cur = db_conn.cursor() # cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") # server_mode = 'XX' @@ -147,36 +151,36 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # elif r[0] == 'Classic': # server_mode = 'CS' # cur.close() -# +# # # 'fbt-main.fb50.ss.fdb' etc: # db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) # db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') -# +# # # Folders for journalling and archieving segments. # repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) # repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) -# +# # runProgram('gfix', ['-w', 'async', 'localhost:' + db_main]) -# +# # db_conn.close() -# +# # #-------------------------------------------- -# +# # def flush_and_close( file_handle ): # # https://docs.python.org/2/library/os.html#os.fsync # # If you're starting with a Python file object f, # # first do f.flush(), and # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # global os -# +# # file_handle.flush() # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # # otherwise: "OSError: [Errno 9] Bad file descriptor"! # os.fsync(file_handle.fileno()) # file_handle.close() -# +# # #-------------------------------------------- -# +# # def cleanup( f_names_list ): # global os # for i in range(len( f_names_list )): @@ -187,17 +191,17 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # else: # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # del_name = None -# +# # if del_name and os.path.isfile( del_name ): # os.remove( del_name ) -# +# # #-------------------------------------------- -# +# # def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): # global re # global difflib # global time -# +# # # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:- # def check_pattern_in_log( log_lines, pattern, prefix_msg = '' ): # found_required_message = False @@ -208,11 +212,11 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # break # return found_required_message # # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:- -# +# # replold_lines = [] # with open( os.path.join(fb_home,'replication.log'), 'r') as f: # replold_lines = f.readlines() -# +# # con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) # cur = con.cursor() # cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") @@ -220,50 +224,50 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # last_generated_repl_segment = r[0] # cur.close() # con.close() -# +# # # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file # segment_replicated_pattern=re.compile( 'verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) -# +# # # 08.09.2021: replication content can remain unchanged if there was no user-defined object in DB that must be dropped! # # Because of this, it is crucial to check OLD content of replication log before loop. # # Also, segment_replicated_pattern must NOT start from '\\+' because it can occur only for diff_data (within loop): # # # found_required_message = check_pattern_in_log( replold_lines, segment_replicated_pattern, prefix_msg ) -# +# # if not found_required_message: -# +# # for i in range(0,max_allowed_time_for_wait): # time.sleep(1) -# +# # # Get content of fb_home replication.log _after_ isql finish: # f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') # diff_data = difflib.unified_diff( -# replold_lines, +# replold_lines, # f_repllog_new.readlines() # ) # f_repllog_new.close() -# +# # found_required_message = check_pattern_in_log( diff_data, segment_replicated_pattern, prefix_msg ) # if found_required_message: # break -# +# # if not found_required_message: # print('UNEXPECTED RESULT: no message about replicated segment No. %d for %d seconds.' % (int(last_generated_repl_segment), max_allowed_time_for_wait) ) -# -# +# +# # #-------------------------------------------- -# +# # def wait_for_add_queue_in_replica( fb_home, max_allowed_time_for_wait, min_timestamp, prefix_msg = '' ): -# +# # global re # global difflib # global time # global datetime -# +# # # (replica) Tue Sep 21 20:24:57 2021 # # Database: ... # # Added 3 segment(s) to the processing queue -# +# # # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:- # def check_pattern_in_log( log_lines, pattern, min_timestamp, prefix_msg = '' ): # found_required_message = False @@ -283,48 +287,48 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # found_required_message = True # break # return found_required_message -# +# # # -:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:- -# +# # replold_lines = [] # with open( os.path.join(fb_home,'replication.log'), 'r') as f: # replold_lines = f.readlines() -# -# +# +# # segments_to_queue_pattern=re.compile( 'verbose:\\s+added\\s+\\d+\\s+segment.*to.*queue', re.IGNORECASE) -# +# # # 08.09.2021: replication content can remain unchanged if there was no user-defined object in DB that must be dropped! # # Because of this, it is crucial to check OLD content of replication log before loop. # # Also, segments_to_queue_pattern must NOT start from '\\+' because it can occur only for diff_data (within loop): # # # found_required_message = check_pattern_in_log( replold_lines, segments_to_queue_pattern, min_timestamp, prefix_msg ) -# +# # if not found_required_message: -# +# # for i in range(0,max_allowed_time_for_wait): # time.sleep(1) -# +# # # Get content of fb_home replication.log _after_ isql finish: # f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') # diff_data = difflib.unified_diff( -# replold_lines, +# replold_lines, # f_repllog_new.readlines() # ) # f_repllog_new.close() -# +# # found_required_message = check_pattern_in_log( list(diff_data), segments_to_queue_pattern, min_timestamp, prefix_msg ) # if found_required_message: # break -# +# # if not found_required_message: # print('UNEXPECTED RESULT: no message about segments added to queue after %s.' % min_timestamp) -# +# # #-------------------------------------------- -# +# # sql_ddl = ''' set bail on; # recreate table test(s varchar(700), constraint test_s_unq unique(s)); # commit; -# +# # set term ^; # execute block as # declare fld_len int; @@ -335,42 +339,42 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # join rdb$fields ff on rf.rdb$field_source = ff.rdb$field_name # where upper(rf.rdb$relation_name) = upper('test') and upper(rf.rdb$field_name) = upper('s') # into fld_len; -# -# +# +# # n = 10000; # while (n > 0) do # begin # insert into test(s) values( lpad('', :fld_len, uuid_to_char(gen_uuid())) ); # n = n - 1; # end -# +# # end # ^ # set term ;^ # commit; # ''' % locals() -# -# +# +# # f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_gh_6975_init.sql'), 'w') # f_sql_chk.write(sql_ddl) # flush_and_close( f_sql_chk ) -# +# # f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w') # f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w') # subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err) # flush_and_close( f_sql_log ) # flush_and_close( f_sql_err ) -# +# # last_generated_repl_segment = 0 -# +# # with open(f_sql_err.name,'r') as f: # for line in f: # print('UNEXPECTED STDERR in initial SQL: ' + line) # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# +# # if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors -# -# +# +# # ################################## # ### A.C.H.T.U.N.G ### # ### do NOT use datetime.now() ### @@ -379,89 +383,89 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # ### (it is HH:MM:SS only) ### # ################################## # current_date_with_hhmmss = datetime.today().replace(microsecond=0) -# -# +# +# # ############################################################################## # ### W A I T F O R S E G M E N T S A D D E D T O Q U E U E ### # ############################################################################## # wait_for_add_queue_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_ADDED_TO_QUEUE, current_date_with_hhmmss, 'POINT-A' ) -# +# # # This led to crash and appearance of message: # # "Fatal lock manager error: invalid lock id (0), errno: 0" in firebird.log: # # # runProgram('gfix', ['-shut', 'full', '-force', '0', 'localhost:' + db_repl]) -# +# # f_repl_hdr_log=open( os.path.join(context['temp_directory'],'db_repl_hdr.log'), 'w') # subprocess.call( [context['gstat_path'], db_repl, '-h'], stdout=f_repl_hdr_log, stderr=subprocess.STDOUT ) # flush_and_close( f_repl_hdr_log ) -# +# # with open(f_repl_hdr_log.name,'r') as f: # for line in f: # if 'Attributes' in line: # print('POINT-B ' + line.strip()) -# -# +# +# # # This (issuing 'gfix -shu ...' second time) led FB process to hang! # # runProgram('gfix', ['-shut', 'full', '-force', '0', 'localhost:' + db_repl]) -# +# # runProgram('gfix', ['-online', 'localhost:' + db_repl]) -# -# +# +# # ############################################################################## # ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### # ############################################################################## # wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' ) -# +# # # return initial state of master DB: # # remove all DB objects (tables, views, ...): # # -------------------------------------------- # sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') -# +# # f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects-gh_6975.log'), 'w') # f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) # flush_and_close(f_clean_log) # flush_and_close(f_clean_err) -# +# # with open(f_clean_err.name,'r') as f: # for line in f: # print('UNEXPECTED STDERR in cleanup SQL: ' + line) # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# +# # with open(f_clean_log.name,'r') as f: # for line in f: # # show number of dropped objects # print(line) -# +# # if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> previous SQL script finished w/o errors -# +# # ############################################################################## # ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### # ############################################################################## # wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) -# +# # f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_gh_6975.sql'), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) # flush_and_close( f_main_meta_sql ) -# +# # f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_gh_6975.sql'), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) # flush_and_close( f_repl_meta_sql ) -# +# # db_main_meta=open(f_main_meta_sql.name, 'r') # db_repl_meta=open(f_repl_meta_sql.name, 'r') -# +# # diffmeta = ''.join(difflib.unified_diff( -# db_main_meta.readlines(), +# db_main_meta.readlines(), # db_repl_meta.readlines() # )) # db_main_meta.close() # db_repl_meta.close() -# +# # f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_gh_6975.txt'), 'w', buffering = 0) # f_meta_diff.write(diffmeta) # flush_and_close( f_meta_diff ) -# +# # # Following must issue only TWO rows: # # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ # # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ @@ -470,9 +474,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # for line in f: # if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): # print('UNEXPECTED METADATA DIFF.: ' + line) -# +# # runProgram('gfix', ['-w', 'sync', 'localhost:' + db_main]) -# +# # ###################### # ### A C H T U N G ### # ###################### @@ -481,27 +485,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # runProgram('gfix', ['-sweep', 'localhost:' + db_repl]) # runProgram('gfix', ['-sweep', 'localhost:' + db_main]) # ####################### -# +# # # cleanup: # ########## # #cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) ) -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - POINT-A FOUND message about segments added to queue after given timestamp. - POINT-B Attributes force write, full shutdown, read-only replica - POINT-1 FOUND message about replicated segment. - Start removing objects - Finish. Total objects removed - POINT-2 FOUND message about replicated segment. -""" - -@pytest.mark.version('>=4.0.1') -@pytest.mark.platform('Windows') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/replication/test_some_updates_crash_server_on_replica_side.py b/tests/functional/replication/test_some_updates_crash_server_on_replica_side.py index ba641e09..1235b54d 100644 --- a/tests/functional/replication/test_some_updates_crash_server_on_replica_side.py +++ b/tests/functional/replication/test_some_updates_crash_server_on_replica_side.py @@ -1,147 +1,149 @@ #coding:utf-8 -# -# id: tests.functional.replication.some_updates_crash_server_on_replica_side -# title: Some updates can crash Firebird server on replica side -# decription: -# See: https://github.com/FirebirdSQL/firebird/issues/6909 -# -# We create table and add data in it according to ticket info. -# -# After this we wait until replica becomes actual to master, and this delay will last no more then threshold -# that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). -# During this delay, we check every second for replication log and search there line with number of last generated -# segment (which was replicated and deleting finally). -# We can assume that replication finished OK only when such line is found see ('POINT-1'). -# -# Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). -# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). -# -# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). -# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, -# thus metadata difference must not be issued. -# -# #################### -# ### CRUCIAL NOTE ### -# #################### -# Currently, 25.06.2021, there is bug in FB 4.x and 5.x which can be seen on SECOND run of this test: message with text -# "ERROR: Record format with length 68 is not found for table TEST" will appear in it after inserting 1st record in master. -# The reason of that is "dirty" pages that remain in RDB$RELATION_FIELDS both on master and replica after dropping table. -# Following query show different data that appear in replica DB on 1st and 2nd run (just after table was created on master): -# ======= -# set blobdisplay 6; -# select rdb$descriptor as fmt_descr -# from rdb$formats natural join rdb$relations where rdb$relation_name = 'TEST'; -# ======= -# This bug was explained by dimitr, see letters 25.06.2021 11:49 and 25.06.2021 16:56. -# It will be fixed later. -# -# The only workaround to solve this problem is to make SWEEP after all DB objects have been dropped. -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# !NB! BOTH master and replica must be cleaned up by sweep! -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# -# ################ -# ### N O T E ### -# ################ -# Test assumes that master and replica DB have been created beforehand. -# Also, it assumes that %FB_HOME% -# eplication.conf has been prepared with apropriate parameters for replication. -# Particularly, name of directories and databases must have info about checked FB major version and ServerMode. -# * verbose = true // in order to find out line with message that required segment was replicated -# * section for master database with specified parameters: -# journal_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.journal" -# journal_archive_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# journal_archive_command = "copy $(pathname) $(archivepathname)" -# journal_archive_timeout = 10 -# * section for replica database with specified parameter: -# journal_source_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# -# Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: -# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) -# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) -# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) -# -# These two databases must NOT be dropped in any of tests related to replication! -# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode -# and make cleanup after it, i.e. when all tests will be completed. -# -# NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. -# -# Temporary comment. For debug purpoces: -# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); -# 2) copy file %fbt-repo% ests -# unctional abloidatches\\setup-fb-for-replication.bat.txt -# to some place and rename it "*.bat"; -# 3) open this .bat in editor and asjust value of 'fbt_repo' variable; -# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] -# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: -# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc -# 5) batch 'setup-fb-for-replication.bat' will: -# * stop selected FB instance -# * create test databases (in !fbt_repo! mp\\); -# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); -# * replace %fb_home% -# eplication.conf with apropriate -# * start selected FB instance -# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): -# %fpt_repo% -# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc -# -# Confirmed bug on 5.0.0.126 (31.07.2021), 4.0.1.2547 (30.07.2021) -# FB crashes, segment is not delivered on replica. -# Initial fix was for FB 4.x 30-jul-2021 16:28 (44f48955c250193096c244bee9e5cd7ddf9a099b), -# frontported to FB 5.x 04-aug-2021 12:48 (220ca99b85289fdd7a5257e576499a1b9c345cd9) -# -# Checked on: -# 5.0.0.126 SS: 27.290s // intermediate build, timestamp: 04-aug-2021 12:08. -# 5.0.0.126 CS: 26.239s. -# 4.0.1.2556 SS: 29.956s. -# 4.0.1.2556 CS: 26.108s. -# -# tracker_id: -# min_versions: ['4.0.1'] -# versions: 4.0.1 -# qmid: None + +""" +ID: replication.some_updates_crash_server_on_replica_side +ISSUE: 6909 +TITLE: Some updates can crash Firebird server on replica side +DESCRIPTION: + We create table and add data in it according to ticket info. + + After this we wait until replica becomes actual to master, and this delay will last no more then threshold + that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). + During this delay, we check every second for replication log and search there line with number of last generated + segment (which was replicated and deleting finally). + We can assume that replication finished OK only when such line is found see ('POINT-1'). + + Further, we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). + After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). + + Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). + The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, + thus metadata difference must not be issued. + + #################### + ### CRUCIAL NOTE ### + #################### + Currently, 25.06.2021, there is bug in FB 4.x and 5.x which can be seen on SECOND run of this test: message with text + "ERROR: Record format with length 68 is not found for table TEST" will appear in it after inserting 1st record in master. + The reason of that is "dirty" pages that remain in RDB$RELATION_FIELDS both on master and replica after dropping table. + Following query show different data that appear in replica DB on 1st and 2nd run (just after table was created on master): + ======= + set blobdisplay 6; + select rdb$descriptor as fmt_descr + from rdb$formats natural join rdb$relations where rdb$relation_name = 'TEST'; + ======= + This bug was explained by dimitr, see letters 25.06.2021 11:49 and 25.06.2021 16:56. + It will be fixed later. + + The only workaround to solve this problem is to make SWEEP after all DB objects have been dropped. + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + !NB! BOTH master and replica must be cleaned up by sweep! + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + ################ + ### N O T E ### + ################ + Test assumes that master and replica DB have been created beforehand. + Also, it assumes that %FB_HOME%/replication.conf has been prepared with apropriate parameters for replication. + Particularly, name of directories and databases must have info about checked FB major version and ServerMode. + * verbose = true // in order to find out line with message that required segment was replicated + * section for master database with specified parameters: + journal_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.journal" + journal_archive_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + journal_archive_command = "copy $(pathname) $(archivepathname)" + journal_archive_timeout = 10 + * section for replica database with specified parameter: + journal_source_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + + Master and replica databases must be created in "!fbt_repo! mp" directory and have names like these: + 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) + 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) + NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) + + These two databases must NOT be dropped in any of tests related to replication! + They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode + and make cleanup after it, i.e. when all tests will be completed. + + NB. Currently this task was implemented only in Windows batch, thus test has attribute platform = 'Windows'. + + Temporary comment. For debug purpoces: + 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); + 2) copy file %fbt-repo%/tests/functional/tabloid/batches/setup-fb-for-replication.bat.txt + to some place and rename it "*.bat"; + 3) open this .bat in editor and asjust value of 'fbt_repo' variable; + 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] + where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: + DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc + 5) batch 'setup-fb-for-replication.bat' will: + * stop selected FB instance + * create test databases (in !fbt_repo!/tmp); + * prepare journal/archive sub-folders for replication (also in !fbt_repo!/tmp); + * replace %fb_home%/replication.conf with apropriate + * start selected FB instance + 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): + %fpt_repo%/fbt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc + + Confirmed bug on 5.0.0.126 (31.07.2021), 4.0.1.2547 (30.07.2021) + FB crashes, segment is not delivered on replica. + Initial fix was for FB 4.x 30-jul-2021 16:28 (44f48955c250193096c244bee9e5cd7ddf9a099b), + frontported to FB 5.x 04-aug-2021 12:48 (220ca99b85289fdd7a5257e576499a1b9c345cd9) + + Checked on: + 5.0.0.126 SS: 27.290s // intermediate build, timestamp: 04-aug-2021 12:08. + 5.0.0.126 CS: 26.239s. + 4.0.1.2556 SS: 29.956s. + 4.0.1.2556 CS: 26.108s. +FBTEST: tests.functional.replication.some_updates_crash_server_on_replica_side +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0.1 -# resources: None +substitutions = [('Start removing objects in:.*', 'Start removing objects'), + ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), + ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR')] -substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', ''), ('FMT_DESCR .*', 'FMT_DESCR')] +db = db_factory() -init_script_1 = """""" +act = python_act('db', substitutions=substitutions) -db_1 = db_factory(sql_dialect=3, init=init_script_1) +expected_stdout = """ + POINT-1 FOUND message about replicated segment. + Start removing objects + Finish. Total objects removed + POINT-2 FOUND message about replicated segment. +""" + +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=4.0.1') +@pytest.mark.platform('Windows') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import subprocess # import re # import difflib # import shutil # import time -# +# # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password -# +# # ##################################### # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65 # ##################################### -# +# # svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) # FB_HOME = svc.get_home_directory() # svc.close() -# +# # engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float # fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' -# +# # cur = db_conn.cursor() # cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") # server_mode = 'XX' @@ -153,34 +155,34 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # elif r[0] == 'Classic': # server_mode = 'CS' # cur.close() -# +# # # 'fbt-main.fb50.ss.fdb' etc: # db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) # db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') -# +# # # Folders for journalling and archieving segments. # repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) # repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) -# +# # db_conn.close() -# +# # #-------------------------------------------- -# +# # def flush_and_close( file_handle ): # # https://docs.python.org/2/library/os.html#os.fsync # # If you're starting with a Python file object f, # # first do f.flush(), and # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # global os -# +# # file_handle.flush() # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # # otherwise: "OSError: [Errno 9] Bad file descriptor"! # os.fsync(file_handle.fileno()) # file_handle.close() -# +# # #-------------------------------------------- -# +# # def cleanup( f_names_list ): # global os # for i in range(len( f_names_list )): @@ -191,22 +193,22 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # else: # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # del_name = None -# +# # if del_name and os.path.isfile( del_name ): # os.remove( del_name ) -# +# # #-------------------------------------------- -# +# # def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): -# +# # global re # global difflib # global time -# +# # replold_lines = [] # with open( os.path.join(fb_home,'replication.log'), 'r') as f: # replold_lines = f.readlines() -# +# # con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) # cur = con.cursor() # cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") @@ -214,38 +216,38 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # last_generated_repl_segment = r[0] # cur.close() # con.close() -# +# # #print('last_generated_repl_segment:', last_generated_repl_segment) -# +# # # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file # p=re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) -# +# # found_required_message = False # for i in range(0,max_allowed_time_for_wait): # time.sleep(1) -# +# # # Get content of fb_home replication.log _after_ isql finish: # f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') # diff_data = difflib.unified_diff( -# replold_lines, +# replold_lines, # f_repllog_new.readlines() # ) # f_repllog_new.close() -# +# # for k,d in enumerate(diff_data): # if p.search(d): # print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' ) # found_required_message = True # break -# +# # if found_required_message: # break -# +# # if not found_required_message: # print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait) -# +# # #-------------------------------------------- -# +# # sql_ddl = ''' set bail on; # create table rep ( # id_report integer not null, @@ -255,93 +257,93 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # stav char(1) default 'a' not null, # constraint rep_pk primary key (id_report) # ); -# +# # insert into rep (id_report, report_data, zvyrazneni_radku, kriteria_dotazu, stav) # values (0, 'report_data', 'n', 'n', 'a'); -# +# # commit; -# +# # update rep set zvyrazneni_radku='a' where id_report =0; # update rep set kriteria_dotazu='a' where id_report =0; -# +# # commit; # ''' % locals() -# -# +# +# # f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_gh_6909_init.sql'), 'w') # f_sql_chk.write(sql_ddl) # flush_and_close( f_sql_chk ) -# +# # f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w') # f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w') # subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err) # flush_and_close( f_sql_log ) # flush_and_close( f_sql_err ) -# +# # last_generated_repl_segment = 0 -# +# # with open(f_sql_err.name,'r') as f: # for line in f: # print('UNEXPECTED STDERR in initial SQL: ' + line) # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# +# # if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors -# +# # ############################################################################## # ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### # ############################################################################## # wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' ) -# +# # # return initial state of master DB: # # remove all DB objects (tables, views, ...): # # -------------------------------------------- # sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') -# +# # f_clean_log=open( os.path.join(context['temp_directory'],'drop-all-db-objects-gh_6909.log'), 'w') # f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) # flush_and_close(f_clean_log) # flush_and_close(f_clean_err) -# +# # with open(f_clean_err.name,'r') as f: # for line in f: # print('UNEXPECTED STDERR in cleanup SQL: ' + line) # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# +# # with open(f_clean_log.name,'r') as f: # for line in f: # # show number of dropped objects # print(line) -# +# # if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> previous SQL script finished w/o errors -# +# # ############################################################################## # ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### # ############################################################################## # wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) -# +# # f_main_meta_sql=open( os.path.join(context['temp_directory'],'db_main_meta_gh_6909.sql'), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) # flush_and_close( f_main_meta_sql ) -# +# # f_repl_meta_sql=open( os.path.join(context['temp_directory'],'db_repl_meta_gh_6909.sql'), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) # flush_and_close( f_repl_meta_sql ) -# +# # db_main_meta=open(f_main_meta_sql.name, 'r') # db_repl_meta=open(f_repl_meta_sql.name, 'r') -# +# # diffmeta = ''.join(difflib.unified_diff( -# db_main_meta.readlines(), +# db_main_meta.readlines(), # db_repl_meta.readlines() # )) # db_main_meta.close() # db_repl_meta.close() -# +# # f_meta_diff=open( os.path.join(context['temp_directory'],'db_meta_diff_gh_6909.txt'), 'w', buffering = 0) # f_meta_diff.write(diffmeta) # flush_and_close( f_meta_diff ) -# +# # # Following must issue only TWO rows: # # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ # # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ @@ -350,7 +352,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # for line in f: # if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): # print('UNEXPECTED METADATA DIFF.: ' + line) -# +# # ###################### # ### A C H T U N G ### # ###################### @@ -359,25 +361,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # runProgram('gfix', ['-sweep', 'localhost:' + db_repl]) # runProgram('gfix', ['-sweep', 'localhost:' + db_main]) # ####################### -# +# # # cleanup: # ########## # cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) ) -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - POINT-1 FOUND message about replicated segment. - Start removing objects - Finish. Total objects removed - POINT-2 FOUND message about replicated segment. -""" - -@pytest.mark.version('>=4.0.1') -@pytest.mark.platform('Windows') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/replication/test_updating_blob_with_empty_string_stops_replication.py b/tests/functional/replication/test_updating_blob_with_empty_string_stops_replication.py index ef52ba37..2624fca8 100644 --- a/tests/functional/replication/test_updating_blob_with_empty_string_stops_replication.py +++ b/tests/functional/replication/test_updating_blob_with_empty_string_stops_replication.py @@ -1,129 +1,132 @@ #coding:utf-8 -# -# id: functional.replication.updating_blob_with_empty_string_stops_replication -# title: Replication gets stuck due to "Blob xxx.xx is not found for table xxx" error -# decription: -# https://github.com/FirebirdSQL/firebird/issues/6795 -# -# Test creates table with blob column and performs trivial scenario: -# insert into test(id, b) values(1, null); -# update test set b = '' where id = 1; -# After this we do connect and query ID of last generated segment by querying REPLICATION_SEQUENCE variable -# from SYSTEM context namespace. -# -# Then we wait until replica becomes actual to master, and this delay will last no more then threshold -# that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). -# During this delay, we check every second for replication log and search there line with number of last generated -# segment (which was replicated and deleting finally). -# We can assume that replication finished OK only when such line is found see ('POINT-1'). -# -# Then we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). -# After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). -# -# Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). -# The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, -# thus metadata difference must not be issued. -# -# ################ -# ### N O T E ### -# ################ -# Test assumes that master and replica DB have been created beforehand. -# Also, it assumes that %FB_HOME% -# eplication.conf has been prepared with apropriate parameters for replication. -# Particularly, name of directories and databases must have info about checked FB major version and ServerMode. -# * verbose = true // in order to find out line with message that required segment was replicated -# * section for master database with specified parameters: -# journal_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.journal" -# journal_archive_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# journal_archive_command = "copy $(pathname) $(archivepathname)" -# journal_archive_timeout = 10 -# * section for replica database with specified parameter: -# journal_source_directory = "!fbt_repo! mp -# b-replication.!fb_major!.!server_mode!.archive" -# -# Master and replica databases must be created in "%FBT_REPO% mp" directory and have names like these: -# 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) -# 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) -# NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) -# -# These two databases must NOT be dropped in any of tests related to replication! -# They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode -# and make cleanup after it, i.e. when all tests will be completed. -# -# NB. Currently this task presents only in Windows batch, thus test has attribute platform = 'Windows'. -# -# Temporary comment. For debug purpoces: -# 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); -# 2) copy file %fbt-repo% ests -# unctional abloidatches\\setup-fb-for-replication.bat.txt -# to some place and rename it "*.bat"; -# 3) open this .bat in editor and asjust value of 'fbt_repo' variable; -# 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] -# where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: -# DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc -# 5) batch 'setup-fb-for-replication.bat' will: -# * stop selected FB instance -# * create test databases (in !fbt_repo! mp\\); -# * prepare journal/archive sub-folders for replication (also in !fbt_repo! mp\\); -# * replace %fb_home% -# eplication.conf with apropriate -# * start selected FB instance -# 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): -# %fpt_repo% -# bt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc -# -# Confirmed bug on 4.0.0.2465, got in replication.log: -# * Added 1 segment(s) to the processing queue -# * Segment 1 replication failure at offset 150 -# * Blob 128.480 is not found for table TEST -# After this replication of segment 1 unable to continue and issues repeating messages about added segments. -# -# Checked on: -# 4.0.0.2479 SS; 4.0.1.2519 CS; 5.0.0.82 SS/CS. -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: None + +""" +ID: replication.updating_blob_with_empty_string_stops_replication +ISSUE: 6795 +TITLE: Replication gets stuck due to "Blob xxx.xx is not found for table xxx" error +DESCRIPTION: + Test creates table with blob column and performs trivial scenario: + insert into test(id, b) values(1, null); + update test set b = '' where id = 1; + After this we do connect and query ID of last generated segment by querying REPLICATION_SEQUENCE variable + from SYSTEM context namespace. + + Then we wait until replica becomes actual to master, and this delay will last no more then threshold + that is defined by MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG variable (measured in seconds). + During this delay, we check every second for replication log and search there line with number of last generated + segment (which was replicated and deleting finally). + We can assume that replication finished OK only when such line is found see ('POINT-1'). + + Then we invoke ISQL with executing auxiliary script for drop all DB objects on master (with '-nod' command switch). + After all objects will be dropped, we have to wait again until replica becomes actual with master (see 'POINT-2'). + + Finally, we extract metadata for master and replica and compare them (see 'f_meta_diff'). + The only difference in metadata must be 'CREATE DATABASE' statement with different DB names - we suppress it, + thus metadata difference must not be issued. + + ################ + ### N O T E ### + ################ + Test assumes that master and replica DB have been created beforehand. + Also, it assumes that %FB_HOME%/replication.conf has been prepared with apropriate parameters for replication. + Particularly, name of directories and databases must have info about checked FB major version and ServerMode. + * verbose = true // in order to find out line with message that required segment was replicated + * section for master database with specified parameters: + journal_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.journal" + journal_archive_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + journal_archive_command = "copy $(pathname) $(archivepathname)" + journal_archive_timeout = 10 + * section for replica database with specified parameter: + journal_source_directory = "!fbt_repo!/tmp/fb-replication.!fb_major!.!server_mode!.archive" + + Master and replica databases must be created in "%FBT_REPO% mp" directory and have names like these: + 'fbt-main.fb40.SS.fdb'; 'fbt-repl.fb40.SS.fdb'; - for FB 4.x ('SS' = Super; 'CS' = Classic) + 'fbt-main.fb50.SS.fdb'; 'fbt-repl.fb50.SS.fdb'; - for FB 5.x ('SS' = Super; 'CS' = Classic) + NB: fixed numeric value ('40' or '50') must be used for any minor FB version (4.0; 4.0.1; 4.1; 5.0; 5.1 etc) + + These two databases must NOT be dropped in any of tests related to replication! + They are created and dropped in the batch scenario which prepares FB instance to be checked for each ServerMode + and make cleanup after it, i.e. when all tests will be completed. + + NB. Currently this task presents only in Windows batch, thus test has attribute platform = 'Windows'. + + Temporary comment. For debug purpoces: + 1) find out SUFFIX of the name of FB service which is to be tested (e.g. 'DefaultInstance', '40SS' etc); + 2) copy file %fbt-repo%/tests/functional/tabloid/batches/setup-fb-for-replication.bat.txt + to some place and rename it "*.bat"; + 3) open this .bat in editor and asjust value of 'fbt_repo' variable; + 4) run: setup-fb-for-replication.bat [SUFFIX_OF_FB_SERVICE] + where SUFFIX_OF_FB_SERVICE is ending part of FB service which you want to check: + DefaultInstance ; 40ss ; 40cs ; 50ss ; 50cs etc + 5) batch 'setup-fb-for-replication.bat' will: + * stop selected FB instance + * create test databases (in !fbt_repo!/tmp); + * prepare journal/archive sub-folders for replication (also in !fbt_repo!/tmp); + * replace %fb_home%/replication.conf with apropriate + * start selected FB instance + 6) run this test (FB instance will be already launched by setup-fb-for-replication.bat): + %fpt_repo%/fbt-run2.bat dblevel-triggers-must-not-fire-on-replica.fbt 50ss, etc + + Confirmed bug on 4.0.0.2465, got in replication.log: + * Added 1 segment(s) to the processing queue + * Segment 1 replication failure at offset 150 + * Blob 128.480 is not found for table TEST + After this replication of segment 1 unable to continue and issues repeating messages about added segments. + + Checked on: + 4.0.0.2479 SS; 4.0.1.2519 CS; 5.0.0.82 SS/CS. +FBTEST: functional.replication.updating_blob_with_empty_string_stops_replication +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +substitutions = [('Start removing objects in:.*', 'Start removing objects'), + ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), + ('.* CREATE DATABASE .*', '')] -substitutions_1 = [('Start removing objects in:.*', 'Start removing objects'), ('Finish. Total objects removed: [1-9]\\d*', 'Finish. Total objects removed'), ('.* CREATE DATABASE .*', '')] +db = db_factory() -init_script_1 = """""" +act = python_act('db', substitutions=substitutions) -db_1 = db_factory(sql_dialect=3, init=init_script_1) +expected_stdout = """ + POINT-1 FOUND message about replicated segment. + EMPTY_STRING_BLOB_ON_REPLICA 1 + Start removing objects + Finish. Total objects removed + POINT-2 FOUND message about replicated segment. +""" + +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=4.0') +@pytest.mark.platform('Windows') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import subprocess # import re # import difflib # import shutil # import time -# +# # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password -# +# # ##################################### # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 65 # ##################################### -# +# # svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) # FB_HOME = svc.get_home_directory() # svc.close() -# +# # engine = db_conn.engine_version # 4.0; 4.1; 5.0 etc -- type float # fb_major = 'fb' + str(engine)[:1] + '0' # 'fb40'; 'fb50' -# +# # cur = db_conn.cursor() # cur.execute("select rdb$config_value from rdb$config where upper(rdb$config_name) = upper('ServerMode')") # server_mode = 'XX' @@ -135,34 +138,34 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # elif r[0] == 'Classic': # server_mode = 'CS' # cur.close() -# +# # # 'fbt-main.fb50.ss.fdb' etc: # db_main = os.path.join( context['temp_directory'], 'fbt-main.' + fb_major + '.' + server_mode + '.fdb' ) # db_repl = db_main.replace( 'fbt-main.', 'fbt-repl.') -# +# # # Folders for journalling and archieving segments. # repl_journal_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.journal' ) # repl_archive_dir = os.path.join( context['temp_directory'], 'fb-replication.' + fb_major + '.' + server_mode + '.archive' ) -# +# # db_conn.close() -# +# # #-------------------------------------------- -# +# # def flush_and_close( file_handle ): # # https://docs.python.org/2/library/os.html#os.fsync # # If you're starting with a Python file object f, # # first do f.flush(), and # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # global os -# +# # file_handle.flush() # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # # otherwise: "OSError: [Errno 9] Bad file descriptor"! # os.fsync(file_handle.fileno()) # file_handle.close() -# +# # #-------------------------------------------- -# +# # def cleanup( f_names_list ): # global os # for i in range(len( f_names_list )): @@ -173,22 +176,22 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # else: # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # del_name = None -# +# # if del_name and os.path.isfile( del_name ): # os.remove( del_name ) -# +# # #-------------------------------------------- -# +# # def wait_for_data_in_replica( fb_home, max_allowed_time_for_wait, db_main, prefix_msg = '' ): -# +# # global re # global difflib # global time -# +# # replold_lines = [] # with open( os.path.join(fb_home,'replication.log'), 'r') as f: # replold_lines = f.readlines() -# +# # con = fdb.connect( dsn = 'localhost:' + db_main, no_db_triggers = 1) # cur = con.cursor() # cur.execute("select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') from rdb$database") @@ -196,127 +199,127 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # last_generated_repl_segment = r[0] # cur.close() # con.close() -# +# # #print('last_generated_repl_segment:', last_generated_repl_segment) -# +# # # VERBOSE: Segment 1 (2582 bytes) is replicated in 1 second(s), deleting the file # p=re.compile( '\\+\\s+verbose:\\s+segment\\s+%(last_generated_repl_segment)s\\s+\\(\\d+\\s+bytes\\)\\s+is\\s+replicated.*deleting' % locals(), re.IGNORECASE) -# +# # found_required_message = False # for i in range(0,max_allowed_time_for_wait): # time.sleep(1) -# +# # # Get content of fb_home replication.log _after_ isql finish: # f_repllog_new = open( os.path.join(fb_home,'replication.log'), 'r') # diff_data = difflib.unified_diff( -# replold_lines, +# replold_lines, # f_repllog_new.readlines() # ) # f_repllog_new.close() -# +# # for k,d in enumerate(diff_data): # if p.search(d): # print( (prefix_msg + ' ' if prefix_msg else '') + 'FOUND message about replicated segment.' ) # found_required_message = True # break -# +# # if found_required_message: # break -# +# # if not found_required_message: # print('UNEXPECTED RESULT: no message about replicated segment for %d seconds.' % max_allowed_time_for_wait) -# +# # #-------------------------------------------- -# +# # sql_ddl = ''' set bail on; # set list on; -# +# # recreate table test(id int primary key, b blob); # insert into test(id, b) values(1, null); # update test set b = '' where id = 1; # commit; -# +# # select rdb$get_context('SYSTEM','REPLICATION_SEQUENCE') as last_generated_repl_segment from rdb$database; # quit; # ''' % locals() -# -# +# +# # f_sql_chk = open( os.path.join(context['temp_directory'],'tmp_gh_6795_test.sql'), 'w') # f_sql_chk.write(sql_ddl) # flush_and_close( f_sql_chk ) -# +# # f_sql_log = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.log' ) ), 'w') # f_sql_err = open( ''.join( (os.path.splitext(f_sql_chk.name)[0], '.err' ) ), 'w') # subprocess.call( [ context['isql_path'], 'localhost:' + db_main, '-i', f_sql_chk.name ], stdout = f_sql_log, stderr = f_sql_err) # flush_and_close( f_sql_log ) # flush_and_close( f_sql_err ) -# +# # last_generated_repl_segment = 0 -# +# # with open(f_sql_err.name,'r') as f: # for line in f: # print('UNEXPECTED STDERR in initial SQL: ' + line) # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# +# # if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors -# +# # ############################################################################## # ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### # ############################################################################## # wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-1' ) -# +# # runProgram('isql', ['localhost:' + db_repl, '-nod'], "set list on; select count(*) as empty_string_blob_on_replica from test where b = '' and id = 1;") -# +# # # return initial state of master DB: # # remove all DB objects (tables, views, ...): # # -------------------------------------------- # sql_clean_ddl = os.path.join(context['files_location'],'drop-all-db-objects.sql') -# +# # f_clean_log=open( os.path.join(context['temp_directory'],'tmp_gh_6795_drop-all-db-objects.log'), 'w') # f_clean_err=open( ''.join( ( os.path.splitext(f_clean_log.name)[0], '.err') ), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-i', sql_clean_ddl], stdout=f_clean_log, stderr=f_clean_err ) # flush_and_close(f_clean_log) # flush_and_close(f_clean_err) -# +# # with open(f_clean_err.name,'r') as f: # for line in f: # print('UNEXPECTED STDERR in cleanup SQL: ' + line) # MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG = 0 -# +# # with open(f_clean_log.name,'r') as f: # for line in f: # # show number of dropped objects # print(line) -# +# # if MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG: # ==> initial SQL script finished w/o errors -# +# # ############################################################################## # ### W A I T U N T I L R E P L I C A B E C O M E S A C T U A L ### # ############################################################################## # wait_for_data_in_replica( FB_HOME, MAX_TIME_FOR_WAIT_SEGMENT_IN_LOG, db_main, 'POINT-2' ) -# +# # f_main_meta_sql=open( os.path.join(context['temp_directory'],'tmp_gh_6795_db_main_meta.sql'), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_main, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_main_meta_sql, stderr=subprocess.STDOUT ) # flush_and_close( f_main_meta_sql ) -# +# # f_repl_meta_sql=open( os.path.join(context['temp_directory'],'tmp_gh_6795_db_repl_meta.sql'), 'w') # subprocess.call( [context['isql_path'], 'localhost:' + db_repl, '-q', '-nod', '-ch', 'utf8', '-x'], stdout=f_repl_meta_sql, stderr=subprocess.STDOUT ) # flush_and_close( f_repl_meta_sql ) -# +# # db_main_meta=open(f_main_meta_sql.name, 'r') # db_repl_meta=open(f_repl_meta_sql.name, 'r') -# +# # diffmeta = ''.join(difflib.unified_diff( -# db_main_meta.readlines(), +# db_main_meta.readlines(), # db_repl_meta.readlines() # )) # db_main_meta.close() # db_repl_meta.close() -# +# # f_meta_diff=open( os.path.join(context['temp_directory'],'tmp_gh_6795_db_meta_diff.txt'), 'w', buffering = 0) # f_meta_diff.write(diffmeta) # flush_and_close( f_meta_diff ) -# +# # # Following must issue only TWO rows: # # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_main]' ... */ # # UNEXPECTED METADATA DIFF.: -/* CREATE DATABASE 'localhost:[db_repl]' ... */ @@ -325,7 +328,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # for line in f: # if line[:1] in ('-', '+') and line[:3] not in ('---','+++'): # print('UNEXPECTED METADATA DIFF.: ' + line) -# +# # ###################### # ### A C H T U N G ### # ###################### @@ -334,27 +337,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # runProgram('gfix', ['-sweep', 'localhost:' + db_repl]) # runProgram('gfix', ['-sweep', 'localhost:' + db_main]) # ####################### -# -# +# +# # # cleanup: # ########## # cleanup( (f_sql_chk, f_sql_log, f_sql_err,f_clean_log,f_clean_err,f_main_meta_sql,f_repl_meta_sql,f_meta_diff) ) -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - POINT-1 FOUND message about replicated segment. - EMPTY_STRING_BLOB_ON_REPLICA 1 - Start removing objects - Finish. Total objects removed - POINT-2 FOUND message about replicated segment. -""" - -@pytest.mark.version('>=4.0') -@pytest.mark.platform('Windows') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/role/create/test_01.py b/tests/functional/role/create/test_01.py index a71e6e33..628a6f2f 100644 --- a/tests/functional/role/create/test_01.py +++ b/tests/functional/role/create/test_01.py @@ -1,38 +1,27 @@ #coding:utf-8 -# -# id: functional.role.create.01 -# title: CREATE ROLE -# decription: -# CREATE ROLE -# Dependencies: -# CREATE DATABASE -# Basic SELECT -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 3.0, 4.0 -# qmid: functional.role.create.create_role_01 + +""" +ID: role.create-01 +TITLE: CREATE ROLE +DESCRIPTION: +FBTEST: functional.role.create.01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [('SQL\\$.*', 'SQLnnnn')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ create role test; commit; set list on; select * from rdb$roles order by rdb$role_name; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('SQL\\$.*', 'SQLnnnn')]) + +# version: 3.0 expected_stdout_1 = """ RDB$ROLE_NAME RDB$ADMIN @@ -40,7 +29,7 @@ expected_stdout_1 = """ RDB$DESCRIPTION RDB$SYSTEM_FLAG 1 RDB$SECURITY_CLASS SQLnnnn - + RDB$ROLE_NAME TEST RDB$OWNER_NAME SYSDBA RDB$DESCRIPTION @@ -49,28 +38,12 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=3.0,<4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +def test_1(act: Action): + act.expected_stdout = expected_stdout_1 + act.execute() + assert act.clean_stdout == act.clean_expected_stdout # version: 4.0 -# resources: None - -substitutions_2 = [('SQL\\$.*', 'SQLnnnn')] - -init_script_2 = """""" - -db_2 = db_factory(sql_dialect=3, init=init_script_2) - -test_script_2 = """ - create role test; - commit; - set list on; - select * from rdb$roles order by rdb$role_name; -""" - -act_2 = isql_act('db_2', test_script_2, substitutions=substitutions_2) expected_stdout_2 = """ RDB$ROLE_NAME RDB$ADMIN @@ -89,8 +62,7 @@ expected_stdout_2 = """ """ @pytest.mark.version('>=4.0') -def test_2(act_2: Action): - act_2.expected_stdout = expected_stdout_2 - act_2.execute() - assert act_2.clean_stdout == act_2.clean_expected_stdout - +def test_2(act: Action): + act.expected_stdout = expected_stdout_2 + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/role/create/test_02.py b/tests/functional/role/create/test_02.py index cd4bf854..773416cc 100644 --- a/tests/functional/role/create/test_02.py +++ b/tests/functional/role/create/test_02.py @@ -1,42 +1,28 @@ #coding:utf-8 -# -# id: functional.role.create.02 -# title: CREATE ROLE - try create role with same name -# decription: CREATE ROLE - try create role with same name -# -# Dependencies: -# CREATE DATABASE -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.role.create.create_role_02 + +""" +ID: role.create-02 +TITLE: CREATE ROLE - try create role with same name +DESCRIPTION: +FBTEST: functional.role.create.02 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() +test_role = role_factory('db', name='test') -substitutions_1 = [] +act = isql_act('db', "CREATE ROLE test;") -init_script_1 = """CREATE ROLE test; -commit;""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE ROLE test;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stderr_1 = """Statement failed, SQLSTATE = 42000 +expected_stderr = """Statement failed, SQLSTATE = 42000 unsuccessful metadata update -CREATE ROLE TEST failed -SQL role TEST already exists """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action, test_role): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/services/test_role_in_service_attachment.py b/tests/functional/services/test_role_in_service_attachment.py index 13f89f8a..03c70518 100644 --- a/tests/functional/services/test_role_in_service_attachment.py +++ b/tests/functional/services/test_role_in_service_attachment.py @@ -1,31 +1,32 @@ #coding:utf-8 -# -# id: functional.services.role_in_service_attachment -# title: Check that trace plugin shows ROLE used in service attachment. Format: USER[:ROLE]. -# decription: -# See: https://github.com/FirebirdSQL/firebird/commit/dd241208f203e54a9c5e9b8b24c0ef24a4298713 -# Checked on 4.0.0.2070 SS/CS. -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: + +""" +ID: services.role-in-service-attachment +TITLE: Check that trace plugin shows ROLE used in service attachment. Format: USER[:ROLE] +DESCRIPTION: + See: https://github.com/FirebirdSQL/firebird/commit/dd241208f203e54a9c5e9b8b24c0ef24a4298713 +FBTEST: functional.services.role_in_service_attachment +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = python_act('db') -init_script_1 = """""" +expected_stdout = """ + EXPECTED output found in the trace log +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=4.0') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import sys # import os # import re @@ -33,28 +34,28 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # import time # from fdb import services # from subprocess import Popen -# +# # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password # db_conn.close() -# +# # #-------------------------------------------- -# +# # def flush_and_close( file_handle ): # # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and +# # If you're starting with a Python file object f, +# # first do f.flush(), and # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # global os -# +# # file_handle.flush() # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # # otherwise: "OSError: [Errno 9] Bad file descriptor"! # os.fsync(file_handle.fileno()) # file_handle.close() -# +# # #-------------------------------------------- -# +# # def cleanup( f_names_list ): # global os # for f in f_names_list: @@ -65,12 +66,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # else: # print('Unrecognized type of element:', f, ' - can not be treated as file.') # del_name = None -# +# # if del_name and os.path.isfile( del_name ): # os.remove( del_name ) -# +# # #-------------------------------------------- -# +# # f_sql_cmd = open( os.path.join(context['temp_directory'],'tmp_trace_prepare.sql'), 'w', buffering=0) # f_sql_txt=''' # set wng off; @@ -95,17 +96,17 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # ''' # f_sql_cmd.write(f_sql_txt) # flush_and_close(f_sql_cmd) -# +# # f_prepare_log=open( os.path.join(context['temp_directory'],'tmp_trace_prepare.log'), 'w', buffering=0) # subprocess.call( [ context['isql_path'], dsn, "-i", f_sql_cmd.name ], stdout=f_prepare_log, stderr=subprocess.STDOUT ) # flush_and_close(f_prepare_log) -# +# # txt = ''' # database= # { # enabled = false # } -# services +# services # { # enabled = true # log_initfini = false @@ -114,37 +115,37 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # log_errors = true # log_warnings = false # } -# +# # ''' -# +# # f_trc_cfg=open( os.path.join(context['temp_directory'],'tmp_trace_test.cfg'), 'w', buffering = 0) # f_trc_cfg.write(txt) # flush_and_close(f_trc_cfg) -# +# # # ############################################################## # # S T A R T T R A C E i n S E P A R A T E P R O C E S S # # ############################################################## -# +# # f_trc_log=open( os.path.join(context['temp_directory'],'tmp_trace_test.log'), "w", buffering = 0) # f_trc_err=open( os.path.join(context['temp_directory'],'tmp_trace_test.err'), "w", buffering = 0) -# +# # # ::: NB ::: DO NOT USE 'localhost:service_mgr' here! Use only local protocol: # p_trace = Popen( [ context['fbsvcmgr_path'], 'localhost:service_mgr', 'user', 'tmp$watcher', 'password', '123', 'role', 'r4watch', 'action_trace_start' , 'trc_cfg', f_trc_cfg.name],stdout=f_trc_log,stderr=f_trc_err) -# +# # time.sleep(1) -# +# # # #################################################### # # G E T A C T I V E T R A C E S E S S I O N I D # # #################################################### # # Save active trace session info into file for further parsing it and obtain session_id back (for stop): -# +# # f_trc_lst = open( os.path.join(context['temp_directory'],'tmp_trace_test.lst'), 'w', buffering = 0) # subprocess.call([context['fbsvcmgr_path'], 'localhost:service_mgr', 'action_trace_list'], stdout=f_trc_lst) # flush_and_close(f_trc_lst) -# +# # # !!! DO NOT REMOVE THIS LINE !!! # time.sleep(1) -# +# # trcssn=0 # with open( f_trc_lst.name,'r') as f: # for line in f: @@ -156,10 +157,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # i=i+1 # break # # Result: `trcssn` is ID of active trace session. Now we have to terminate it: -# +# # # Here we are waiting for trace log will be fulfilled with data related to SERVICE activity, namely: trace session that was just started. # time.sleep(3) -# +# # # #################################################### # # S E N D R E Q U E S T T R A C E T O S T O P # # #################################################### @@ -169,14 +170,14 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # fn_nul.close() # # DO NOT REMOVE THIS LINE: # time.sleep(1) -# +# # p_trace.terminate() # flush_and_close(f_trc_log) # flush_and_close(f_trc_err) -# +# # p_new = re.compile('service_mgr.*\\s+tmp\\$watcher:r4watch,.*', re.IGNORECASE) # p_old = re.compile('service_mgr.*\\s+tmp\\$watcher,.*', re.IGNORECASE) -# +# # # Check: # ######### # # Log of preparation .sql must be empty: @@ -184,8 +185,8 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # for line in f: # if line.split(): # print('UNEXPECTED output in '+f_prepare_log.name+': '+line) -# -# # Trace STDOUT must contain line like: +# +# # Trace STDOUT must contain line like: # # service_mgr, (Service 0000000004F893C0, TMP$WATCHER:r4watch, TCPv6:::1/55274, C:\\FB SS # bsvcmgr.exe:6752) # with open( f_trc_log.name,'r') as f: @@ -197,28 +198,16 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # print('ERROR: trace output contains only USER, without ROLE.') # else: # print('ERROR: format in the trace log differs from expected.') -# +# # # Trace STDERR must be empty: # with open( f_trc_err.name,'r') as f: # for line in f: # if line.split(): # print('UNEXPECTED STDERR in '+f_trc_err.name+': '+line) -# -# +# +# # # Cleanup: # ########### # cleanup( ( f_sql_cmd, f_prepare_log, f_trc_log, f_trc_err, f_trc_cfg, f_trc_lst ) ) -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - EXPECTED output found in the trace log -""" - -@pytest.mark.version('>=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/services/test_user_management.py b/tests/functional/services/test_user_management.py index 32b23d88..e306be3d 100644 --- a/tests/functional/services/test_user_management.py +++ b/tests/functional/services/test_user_management.py @@ -1,101 +1,32 @@ #coding:utf-8 -# -# id: functional.services.user_management -# title: Check ability to make connect to FB services and add/drop user. -# decription: -# We check here: -# 1) FB services features which add and remove user; -# 2) Python fdb driver functions (from class Connection): add_user(), get_users() and remove_user() -# -# NB. -# User with name 'tmp$test$user$' must NOT present in security_db. -# Correctness of adding user is verified by establishing TCP-based attachment to test DB using its login/password. -# -# See doc: -# https://firebirdsql.org/file/documentation/drivers_documentation/python/fdb/reference.html#fdb.services.Connection.add_user -# https://firebirdsql.org/file/documentation/drivers_documentation/python/fdb/usage-guide.html#user-maintanance -# -# Checked on: -# FB25Cs, build 2.5.8.27067: OK, 1.015s. -# FB25SC, build 2.5.8.27070: OK, 0.813s. -# fb30Cs, build 3.0.3.32805: OK, 2.297s. -# FB30SS, build 3.0.3.32813: OK, 2.109s. -# FB40CS, build 4.0.0.748: OK, 2.859s. -# FB40SS, build 4.0.0.767: OK, 2.000s. -# -# tracker_id: -# min_versions: ['2.5.8'] -# versions: 2.5.8 -# qmid: + +""" +ID: services.user-management +TITLE: Check ability to make connect to FB services and add/drop user +DESCRIPTION: + We check here: + 1) FB services features which add and remove user; + 2) Python firebird-driver functions (from class Server) + + NB. + User with name 'tmp$test$user$' must NOT present in security_db. + Correctness of adding user is verified by establishing TCP-based attachment to test DB using its login/password. + + See doc: + https://firebird-driver.readthedocs.io/en/latest/usage-guide.html#user-maintenance + https://firebird-driver.readthedocs.io/en/latest/ref-core.html#firebird.driver.core.Server.user + https://firebird-driver.readthedocs.io/en/latest/ref-core.html#serveruserservices +FBTEST: functional.services.user_management +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 2.5.8 -# resources: None +db = db_factory() -substitutions_1 = [] +act = python_act('db') -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import subprocess -# from fdb import services -# -# TEST_USER_NAME='tmp$test$user$'.upper() -# db_conn.close() -# -# svc_con = services.connect( host='localhost', user = user_name, password = user_password ) -# -# u01 = services.User(TEST_USER_NAME) -# u01.password = 'QweRty' -# u01.first_name = 'Foo' -# u01.last_name = 'Bar' -# -# -# print('Adding user.') -# svc_con.add_user(u01) -# print('Done.') -# -# usr_list = svc_con.get_users() -# print('Search in users list.') -# for u in usr_list: -# if u.name == TEST_USER_NAME: -# print('Found user:', u.name) -# -# sql=''' -# select -# mon$user as user_connected -# ,iif( upper(mon$remote_protocol) starting with upper('TCP'), 'TCP', coalesce(mon$remote_protocol, '') ) as protocol_info -# from mon$attachments -# where mon$attachment_id = current_connection -# ''' -# -# try: -# print('Trying to establish connection.') -# usr_con = fdb.connect( dsn = dsn, user=TEST_USER_NAME, password='QweRty') -# cur2 = usr_con.cursor() -# cur2.execute( sql ) -# for r in cur2: -# for i in range(0,len(r)): -# print( ''.join( ( r[i] ) ) ) -# print('Done.') -# finally: -# usr_con.close() -# -# -# print('Removing user.') -# svc_con.remove_user(u01) -# print('Done.') -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ +expected_stdout = """ Adding user. Done. Search in users list. @@ -108,9 +39,61 @@ expected_stdout_1 = """ Done. """ -@pytest.mark.version('>=2.5.8') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=3') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# test_script_1 +#--- +# +# import os +# import subprocess +# from fdb import services +# +# TEST_USER_NAME='tmp$test$user$'.upper() +# db_conn.close() +# +# svc_con = services.connect( host='localhost', user = user_name, password = user_password ) +# +# u01 = services.User(TEST_USER_NAME) +# u01.password = 'QweRty' +# u01.first_name = 'Foo' +# u01.last_name = 'Bar' +# +# +# print('Adding user.') +# svc_con.add_user(u01) +# print('Done.') +# +# usr_list = svc_con.get_users() +# print('Search in users list.') +# for u in usr_list: +# if u.name == TEST_USER_NAME: +# print('Found user:', u.name) +# +# sql=''' +# select +# mon$user as user_connected +# ,iif( upper(mon$remote_protocol) starting with upper('TCP'), 'TCP', coalesce(mon$remote_protocol, '') ) as protocol_info +# from mon$attachments +# where mon$attachment_id = current_connection +# ''' +# +# try: +# print('Trying to establish connection.') +# usr_con = fdb.connect( dsn = dsn, user=TEST_USER_NAME, password='QweRty') +# cur2 = usr_con.cursor() +# cur2.execute( sql ) +# for r in cur2: +# for i in range(0,len(r)): +# print( ''.join( ( r[i] ) ) ) +# print('Done.') +# finally: +# usr_con.close() +# +# +# print('Removing user.') +# svc_con.remove_user(u01) +# print('Done.') +#--- diff --git a/tests/functional/session/test_alter_session_reset.py b/tests/functional/session/test_alter_session_reset.py index 3d244882..9f2199cb 100644 --- a/tests/functional/session/test_alter_session_reset.py +++ b/tests/functional/session/test_alter_session_reset.py @@ -1,38 +1,28 @@ #coding:utf-8 -# -# id: functional.session.alter_session_reset -# title: -# Test results of ALTER SESSION RESET -# -# decription: -# Checked on FB40SS, build 4.0.0.1166: OK, 4.329s. -# 31.10.2019. Refactoring: -# * remove IDs of attachment/transaction from output. -# * replaced mon$isolation_mode with its describing text - take in account that in FB 4.0 -# READ CONSISTENCY is default isolation mode for READ COMMITTED Tx. -# -# Checked on: -# 4.0.0.1635 SS: 2.049s. -# 4.0.0.1633 CS: 2.266s. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: session.alter-session-reset +TITLE: Test results of ALTER SESSION RESET +DESCRIPTION: +NOTES: +[31.10.2019] + Refactoring: + * remove IDs of attachment/transaction from output. + * replaced mon$isolation_mode with its describing text - take in account that in FB 4.0 + READ CONSISTENCY is default isolation mode for READ COMMITTED Tx. +FBTEST: functional.session.alter_session_reset +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +substitutions = [('-At line[:]{0,1}[\\s]+[\\d]+,[\\s]+column[:]{0,1}[\\s]+[\\d]+', ''), + ('line[:]{0,1}[\\s]+[\\d]+,[\\s]+col[:]{0,1}[\\s]+[\\d]+', ''), + ('[-]{0,1}Effective user is.*', 'Effective user')] -substitutions_1 = [('-At line[:]{0,1}[\\s]+[\\d]+,[\\s]+column[:]{0,1}[\\s]+[\\d]+', ''), ('line[:]{0,1}[\\s]+[\\d]+,[\\s]+col[:]{0,1}[\\s]+[\\d]+', ''), ('[-]{0,1}Effective user is.*', 'Effective user')] +db = db_factory() -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ --set bail on; -- set wng off; set autoddl off; @@ -104,7 +94,7 @@ test_script_1 = """ commit; recreate view v_info as - select + select current_user as my_name ,current_role as my_role ,t.mon$lock_timeout as tx_lock_timeout @@ -114,7 +104,7 @@ test_script_1 = """ -- 15.01.2019: removed detailed info about read committed TIL because of read consistency TIL that 4.0 introduces. -- Any record with t.mon$isolation_mode = 4 now is considered just as read committed, w/o any detalization (this not much needed here). ,decode( t.mon$isolation_mode, 0,'CONSISTENCY', 1,'SNAPSHOT', 2, 'READ_COMMITTED', 3, 'READ_COMMITTED', 4, 'READ_COMMITTED', '??' ) as isol_descr - from mon$transactions t + from mon$transactions t where t.mon$attachment_id = current_connection; commit; @@ -155,11 +145,11 @@ test_script_1 = """ select 'Point before call sp_decfloat_test with trap settings: {Division_by_zero, Invalid_operation, Overflow}' as msg from rdb$database; select * from sp_decfloat_test; - + -- this will CLEAR previous trap settings which were: {Division_by_zero, Invalid_operation, Overflow} set decfloat traps to Inexact; - + select 'Point before call sp_decfloat_test with trap settings: {Inexact}' as msg from rdb$database; -- Should raise: -- Statement failed, SQLSTATE = 22000 @@ -212,9 +202,9 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=substitutions) -expected_stdout_1 = """ +expected_stdout = """ MSG Point before call sp_decfloat_test with trap settings: {Division_by_zero, Invalid_operation, Overflow} RAISED_GDS 335545142 RAISED_SQLST 22003 @@ -256,7 +246,8 @@ expected_stdout_1 = """ CONTEXT_VAR_VALUE """ -expected_stderr_1 = """ + +expected_stderr = """ Statement failed, SQLSTATE = 22003 Decimal float overflow. The exponent of a result is greater than the magnitude allowed. -At procedure 'SP_DECFLOAT_TEST' line: 17, col: 13 @@ -280,11 +271,9 @@ expected_stderr_1 = """ """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/session/test_alter_session_reset_allow_2pc_prepared.py b/tests/functional/session/test_alter_session_reset_allow_2pc_prepared.py index af18e772..f15e459a 100644 --- a/tests/functional/session/test_alter_session_reset_allow_2pc_prepared.py +++ b/tests/functional/session/test_alter_session_reset_allow_2pc_prepared.py @@ -1,161 +1,148 @@ #coding:utf-8 -# -# id: functional.session.alter_session_reset_allow_2pc_prepared -# title: -# ALTER SESSION RESET: do NOT raise error if prepared 2PC transactions present. -# -# decription: -# Test issue from CORE-5832 about ALTER SESSION RESET: -# "throw error (isc_ses_reset_err) if any open transaction exist in current conneciton, -# except of ... prepared 2PC transactions which is allowed and ignored by this check" -# -# We create two databases with table (id int, x int) in each of them. -# Then we create two connections (one per each DB). -# -# These connections are added to the instance of fdb.ConnectionGroup() in order to have -# ability to use 2PC mechanism. -# -# In the first connection we start TWO transactions, in the second it is enough to start one. -# Then we do trivial DML in each of these three transactions: insert one row in a table. -# -# Finally, we run prepare() method in one of pair transactions that belong to 1st connection. -# After this, we must be able to run ALTER SESSION RESET in the *second* Tx of this pair, and -# this statement must NOT raise any error. -# -# NB! Without prepare() such action must lead to exception: -# "SQLCODE: -901 / Cannot reset user session / There are open transactions (2 active)" -# -# Output of this test must remain EMPTY. -# -# Checked on 4.0.0.2307 SS/CS. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: session.alter-session-reset-allow-2pc-prepared +ISSUE: 6093 +TITLE: ALTER SESSION RESET: do NOT raise error if prepared 2PC transactions present +DESCRIPTION: + Test issue about ALTER SESSION RESET: + "throw error (isc_ses_reset_err) if any open transaction exist in current conneciton, + except of ... prepared 2PC transactions which is allowed and ignored by this check" + + We create two databases with table (id int, x int) in each of them. + Then we create two connections (one per each DB). + + These connections are added to the instance of fdb.ConnectionGroup() in order to have + ability to use 2PC mechanism. + + In the first connection we start TWO transactions, in the second it is enough to start one. + Then we do trivial DML in each of these three transactions: insert one row in a table. + + Finally, we run prepare() method in one of pair transactions that belong to 1st connection. + After this, we must be able to run ALTER SESSION RESET in the *second* Tx of this pair, and + this statement must NOT raise any error. + + NB! Without prepare() such action must lead to exception: + "SQLCODE: -901 / Cannot reset user session / There are open transactions (2 active)" + + Output of this test must remain EMPTY. +FBTEST: functional.session.alter_session_reset_allow_2pc_prepared +JIRA: CORE-5832 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = python_act('db') -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=4.0') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import sys # import time # import subprocess # import re # from fdb import services -# +# # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password -# +# # #-------------------------------------------- -# +# # def cleanup( f_names_list ): # global os # for i in range(len( f_names_list )): # if os.path.isfile( f_names_list[i]): # os.remove( f_names_list[i] ) -# +# # #-------------------------------------------- -# +# # db_conn.close() # svc = services.connect(host='localhost', user=user_name, password=user_password) # fb_home = svc.get_home_directory() -# -# +# +# # DBNAME_A = os.path.join(context['temp_directory'],'tmp_2pc_a.fdb') # DBNAME_B = os.path.join(context['temp_directory'],'tmp_2pc_b.fdb') -# +# # if os.path.isfile(DBNAME_A): # os.remove(DBNAME_A) # if os.path.isfile(DBNAME_B): # os.remove(DBNAME_B) -# +# # con1 = fdb.create_database( dsn = 'localhost:' + DBNAME_A) # con2 = fdb.create_database( dsn = 'localhost:' + DBNAME_B) -# +# # con1.execute_immediate( 'create table test(id int, x int, constraint test_pk primary key(id) using index test_pk)' ) # con1.commit() -# +# # con2.execute_immediate( 'create table test(id int, x int, constraint test_pk primary key(id) using index test_pk)' ) # con2.commit() -# +# # con1.close() # con2.close() -# +# # cgr = fdb.ConnectionGroup() -# +# # con1 = fdb.connect( dsn = 'localhost:' + DBNAME_A) # con2 = fdb.connect( dsn = 'localhost:' + DBNAME_B) -# +# # cgr.add(con1) # cgr.add(con2) -# +# # # https://pythonhosted.org/fdb/reference.html#fdb.TPB # # https://pythonhosted.org/fdb/reference.html#fdb.Connection.trans -# +# # tx1a = con1.trans() # tx2 = con2.trans() -# +# # tx1b = con1.trans() -# +# # tx1a.begin() # tx2.begin() # tx1b.begin() -# +# # cur1a=tx1a.cursor() # cur2=tx2.cursor() # cur1b=tx1b.cursor() -# +# # cur1a.execute( "insert into test(id, x) values( ?, ? )", (1, 111) ) # cur2.execute( "insert into test(id, x) values( ?, ? )", (2, 222) ) # cur1b.execute( "insert into test(id, x) values( ?, ? )", (3, 333) ) -# +# # # ::: NB ::: WITHOUT following prepare() exception will raise: # # Error while executing SQL statement: / SQLCODE: -901 / Cannot reset user session / There are open transactions (2 active); -901; 335545206L # tx1a.prepare() -# +# # cur1b.execute( "alter session reset" ) -# +# # cur1a.close() # cur1b.close() # cur2.close() # tx1a.rollback() # tx1b.rollback() # tx2.rollback() -# +# # # ::: NB ::: # # We can NOT ecplicitly close connections that participate in ConnectionGroup. # # Exception will raise in that case: "Cannot close a connection that is a member of a ConnectionGroup." # #con1.close() # #con2.close() -# +# # cgr.clear() -# +# # # change state of test databases to full shutdown otherwise get "Object in use" (set linger = 0 does not help!) # runProgram('gfix',['localhost:' + DBNAME_A,'-shut','full','-force','0']) # runProgram('gfix',['localhost:' + DBNAME_B,'-shut','full','-force','0']) -# +# # cleanup( (DBNAME_A, DBNAME_B) ) -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - - -@pytest.mark.version('>=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/session/test_alter_session_reset_clear_gtt.py b/tests/functional/session/test_alter_session_reset_clear_gtt.py index b9226e99..a75dd5bc 100644 --- a/tests/functional/session/test_alter_session_reset_clear_gtt.py +++ b/tests/functional/session/test_alter_session_reset_clear_gtt.py @@ -1,55 +1,44 @@ #coding:utf-8 -# -# id: functional.session.alter_session_reset_clear_gtt -# title: -# ALTER SESSION RESET: clear contents of all used GTT ON COMMIT PRESERVE ROWS -# -# decription: -# Test issue from CORE-5832 about ALTER SESSION RESET: -# "clear contents of all used GLOBAL TEMPORARY TABLE ... ON COMMIT PRESERVE ROWS". -# -# NOTE. *** SET AUTODDL OFF REQUIRED *** -# Following is detailed explanation of this note: -# ======== -# Default ISQL behaviour is to start always *TWO* transactions (for DML and second for DDL) -# after previous commit / rollback and before *ANY* further satement is to be executed, except -# those which control ISQL itself (e.g. 'SET TERM'; 'IN ...'; 'SET BAIL' etc). -# So, even when statement has nothing to change, ISQL will start TWO transactions -# just before executing . -# This means that these transactions will start even if want to run 'ALTER SESSION RESET'. -# This, in turn, makes one of them (which must perform DDL) be 'active and NOT current' -# from ALTER SESSION point of view (which is run within DML transaction). -# -# According to description given in CORE-5832, ALTER SESSION throws error isc_ses_reset_err -# "if any open transaction exist in current conneciton, *except of current transaction* and -# prepared 2PC transactions which is allowed and ignored by this check". -# -# So, we have to prohibit 'autostart' of DDL-transaction because otherwise ALTER SESSION will -# throw: "SQLSTATE = 01002 / Cannot reset user session / -There are open transactions (2 active)". -# This is done by 'SET AUTODDL OFF' statement at the beginning of this test script. -# ======== -# -# Thanks to Vlad for explanations (discussed 18.01.2021). -# Checked on 4.0.0.2307 SS/CS. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: session.alter-session-reset-clear-gtt +ISSUE: 6093 +TITLE: ALTER SESSION RESET: clear contents of all used GTT ON COMMIT PRESERVE ROWS +DESCRIPTION: + Test issue about ALTER SESSION RESET: + "clear contents of all used GLOBAL TEMPORARY TABLE ... ON COMMIT PRESERVE ROWS". + + NOTE. *** SET AUTODDL OFF REQUIRED *** + Following is detailed explanation of this note: + + Default ISQL behaviour is to start always *TWO* transactions (for DML and second for DDL) + after previous commit / rollback and before *ANY* further satement is to be executed, except + those which control ISQL itself (e.g. 'SET TERM'; 'IN ...'; 'SET BAIL' etc). + So, even when statement has nothing to change, ISQL will start TWO transactions + just before executing . + This means that these transactions will start even if want to run 'ALTER SESSION RESET'. + This, in turn, makes one of them (which must perform DDL) be 'active and NOT current' + from ALTER SESSION point of view (which is run within DML transaction). + + According to description given in #6093, ALTER SESSION throws error isc_ses_reset_err + "if any open transaction exist in current conneciton, *except of current transaction* and + prepared 2PC transactions which is allowed and ignored by this check". + + So, we have to prohibit 'autostart' of DDL-transaction because otherwise ALTER SESSION will + throw: "SQLSTATE = 01002 / Cannot reset user session / -There are open transactions (2 active)". + This is done by 'SET AUTODDL OFF' statement at the beginning of this test script. + + Thanks to Vlad for explanations (discussed 18.01.2021). +FBTEST: functional.session.alter_session_reset_clear_gtt +JIRA: CORE-5832 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; set autoddl off; commit; @@ -71,17 +60,16 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ ID 1 Records affected: 1 Records affected: 0 """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/session/test_alter_session_reset_clear_timeouts.py b/tests/functional/session/test_alter_session_reset_clear_timeouts.py index 4af5a49d..32388819 100644 --- a/tests/functional/session/test_alter_session_reset_clear_timeouts.py +++ b/tests/functional/session/test_alter_session_reset_clear_timeouts.py @@ -1,55 +1,44 @@ #coding:utf-8 -# -# id: functional.session.alter_session_reset_clear_timeouts -# title: -# ALTER SESSION RESET: reset session and statement timeouts to zero -# -# decription: -# Test issue from CORE-5832 about ALTER SESSION RESET: -# "reset session and statement timeouts to zero". -# -# NOTE. *** SET AUTODDL OFF REQUIRED *** -# Following is detailed explanation of this note: -# ======== -# Default ISQL behaviour is to start always *TWO* transactions (for DML and second for DDL) -# after previous commit / rollback and before *ANY* further satement is to be executed, except -# those which control ISQL itself (e.g. 'SET TERM'; 'IN ...'; 'SET BAIL' etc). -# So, even when statement has nothing to change, ISQL will start TWO transactions -# just before executing . -# This means that these transactions will start even if want to run 'ALTER SESSION RESET'. -# This, in turn, makes one of them (which must perform DDL) be 'active and NOT current' -# from ALTER SESSION point of view (which is run within DML transaction). -# -# According to description given in CORE-5832, ALTER SESSION throws error isc_ses_reset_err -# "if any open transaction exist in current conneciton, *except of current transaction* and -# prepared 2PC transactions which is allowed and ignored by this check". -# -# So, we have to prohibit 'autostart' of DDL-transaction because otherwise ALTER SESSION will -# throw: "SQLSTATE = 01002 / Cannot reset user session / -There are open transactions (2 active)". -# This is done by 'SET AUTODDL OFF' statement at the beginning of this test script. -# ======== -# -# Thanks to Vlad for explanations (discussed 18.01.2021). -# Checked on 4.0.0.2307 SS/CS. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: session.alter-session-reset-clear-timeouts +ISSUE: 6093 +TITLE: ALTER SESSION RESET: reset session and statement timeouts to zero +DESCRIPTION: + Test issue about ALTER SESSION RESET: + "reset session and statement timeouts to zero". + + NOTE. *** SET AUTODDL OFF REQUIRED *** + Following is detailed explanation of this note: + + Default ISQL behaviour is to start always *TWO* transactions (for DML and second for DDL) + after previous commit / rollback and before *ANY* further satement is to be executed, except + those which control ISQL itself (e.g. 'SET TERM'; 'IN ...'; 'SET BAIL' etc). + So, even when statement has nothing to change, ISQL will start TWO transactions + just before executing . + This means that these transactions will start even if want to run 'ALTER SESSION RESET'. + This, in turn, makes one of them (which must perform DDL) be 'active and NOT current' + from ALTER SESSION point of view (which is run within DML transaction). + + According to description given in #6093, ALTER SESSION throws error isc_ses_reset_err + "if any open transaction exist in current conneciton, *except of current transaction* and + prepared 2PC transactions which is allowed and ignored by this check". + + So, we have to prohibit 'autostart' of DDL-transaction because otherwise ALTER SESSION will + throw: "SQLSTATE = 01002 / Cannot reset user session / -There are open transactions (2 active)". + This is done by 'SET AUTODDL OFF' statement at the beginning of this test script. + + Thanks to Vlad for explanations (discussed 18.01.2021). +FBTEST: functional.session.alter_session_reset_clear_timeouts +JIRA: CORE-5832 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set list on; set autoddl off; @@ -73,9 +62,9 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ SESSION_IDLE_TIMEOUT_BEFORE_RESET 54000 STATEMENT_TIMEOUT_BEFORE_RESET 15000 @@ -84,8 +73,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/session/test_alter_session_reset_decfloat.py b/tests/functional/session/test_alter_session_reset_decfloat.py index a042507f..23220608 100644 --- a/tests/functional/session/test_alter_session_reset_decfloat.py +++ b/tests/functional/session/test_alter_session_reset_decfloat.py @@ -1,63 +1,51 @@ #coding:utf-8 -# -# id: functional.session.alter_session_reset_decfloat -# title: -# ALTER SESSION RESET: DECFLOAT parameters must be returned to default values -# -# decription: -# Test issue from CORE-5832 about ALTER SESSION RESET: -# "DECFLOAT parameters (BIND, TRAP and ROUND) must be reset to default values". -# -# We change all three specified parameters and evaluate some expressions. -# Then we run RESET SESSION and evaluate the same expressions. -# Results must differ for all of them. -# -# NOTE-1. -# FDB driver 2.0.1 does not support DEFCFLOAT datatype (at least version 2.0.1), -# so test type must be ISQL. -# -# NOTE-2. *** SET AUTODDL OFF REQUIRED *** -# Following is detailed explanation of this note: -# ======== -# Default ISQL behaviour is to start always *TWO* transactions (for DML and second for DDL) -# after previous commit / rollback and before *ANY* further satement is to be executed, except -# those which control ISQL itself (e.g. 'SET TERM'; 'IN ...'; 'SET BAIL' etc). -# So, even when statement has nothing to change, ISQL will start TWO transactions -# just before executing . -# This means that these transactions will start even if want to run 'ALTER SESSION RESET'. -# This, in turn, makes one of them (which must perform DDL) be 'active and NOT current' -# from ALTER SESSION point of view (which is run within DML transaction). -# -# According to description given in CORE-5832, ALTER SESSION throws error isc_ses_reset_err -# "if any open transaction exist in current conneciton, *except of current transaction* and -# prepared 2PC transactions which is allowed and ignored by this check". -# -# So, we have to prohibit 'autostart' of DDL-transaction because otherwise ALTER SESSION will -# throw: "SQLSTATE = 01002 / Cannot reset user session / -There are open transactions (2 active)". -# This is done by 'SET AUTODDL OFF' statement at the beginning of this test script. -# ======== -# -# Thanks to Vlad for explanations (discussed 18.01.2021). -# Checked on 4.0.0.2307 SS/CS. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: session.alter-session-reset-decfloat +ISSUE: 6093 +TITLE: ALTER SESSION RESET: DECFLOAT parameters must be returned to default values +DESCRIPTION: + Test issue about ALTER SESSION RESET: + "DECFLOAT parameters (BIND, TRAP and ROUND) must be reset to default values". + + We change all three specified parameters and evaluate some expressions. + Then we run RESET SESSION and evaluate the same expressions. + Results must differ for all of them. + + NOTE-1. + firebird-driver does support DEFCFLOAT datatype, so test does *not* need to use only ISQL. + + NOTE-2. *** SET AUTODDL OFF REQUIRED *** + Following is detailed explanation of this note: + + Default ISQL behaviour is to start always *TWO* transactions (for DML and second for DDL) + after previous commit / rollback and before *ANY* further satement is to be executed, except + those which control ISQL itself (e.g. 'SET TERM'; 'IN ...'; 'SET BAIL' etc). + So, even when statement has nothing to change, ISQL will start TWO transactions + just before executing . + This means that these transactions will start even if want to run 'ALTER SESSION RESET'. + This, in turn, makes one of them (which must perform DDL) be 'active and NOT current' + from ALTER SESSION point of view (which is run within DML transaction). + + According to description given in #6093, ALTER SESSION throws error isc_ses_reset_err + "if any open transaction exist in current conneciton, *except of current transaction* and + prepared 2PC transactions which is allowed and ignored by this check". + + So, we have to prohibit 'autostart' of DDL-transaction because otherwise ALTER SESSION will + throw: "SQLSTATE = 01002 / Cannot reset user session / -There are open transactions (2 active)". + This is done by 'SET AUTODDL OFF' statement at the beginning of this test script. + + Thanks to Vlad for explanations (discussed 18.01.2021). +FBTEST: functional.session.alter_session_reset_decfloat +JIRA: CORE-5832 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('^((?!(sqltype|before_reset|after_reset)).)*$', ''), ('[ \t]+', ' '), ('.*alias.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ recreate table test(a decfloat, b decfloat, c decfloat); insert into test(a,b,c) values(1608.90, 5.00, 100.00); @@ -98,32 +86,32 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('^((?!(sqltype|before_reset|after_reset)).)*$', ''), + ('[ \t]+', ' '), ('.*alias.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ 01: sqltype: 500 SHORT scale: 0 subtype: 0 len: 2 before_reset: check datatype 1235 - + before_reset: check division result 0 - + before_reset: check round result 81 - + 01: sqltype: 32760 DECFLOAT(16) scale: 0 subtype: 0 len: 8 after_reset: check datatype 1234.5678 after_reset: check round result 80.445 """ -expected_stderr_1 = """ + +expected_stderr = """ Statement failed, SQLSTATE = 22012 Decimal float divide by zero. The code attempted to divide a DECFLOAT value by zero. """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/session/test_alter_session_reset_raise_if_open_tx.py b/tests/functional/session/test_alter_session_reset_raise_if_open_tx.py index 58105966..1203db28 100644 --- a/tests/functional/session/test_alter_session_reset_raise_if_open_tx.py +++ b/tests/functional/session/test_alter_session_reset_raise_if_open_tx.py @@ -1,50 +1,55 @@ #coding:utf-8 -# -# id: functional.session.alter_session_reset_raise_if_open_tx -# title: -# ALTER SESSION RESET: throw error (isc_ses_reset_err) if any open transaction exist in current connection... -# -# decription: -# Test issue from CORE-5832 about ALTER SESSION RESET: -# "throw error (isc_ses_reset_err) if any open transaction exist in current connection except -# of current transaction..." -# -# We start three transactions within the same connection. First of them runs 'ALTER SESSION RESET'. -# It must fail with error with phrase about active transactions that prevent from doing this action. -# -# NOTE: this test does NOT check admissibility of session reset when prepared 2PC transactions exist. -# It checks only ERROR RAISING when there are several Tx opened within the same connection. -# -# Checked on 4.0.0.2307 SS/CS. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: session.alter-session-reset-raise-if-open-tx +ISSUE: 6093 +TITLE: ALTER SESSION RESET: throw error (isc_ses_reset_err) if any open transaction exist in current connection +DESCRIPTION: + Test issue about ALTER SESSION RESET: + "throw error (isc_ses_reset_err) if any open transaction exist in current connection except + of current transaction..." + + We start three transactions within the same connection. First of them runs 'ALTER SESSION RESET'. + It must fail with error with phrase about active transactions that prevent from doing this action. + + NOTE: this test does NOT check admissibility of session reset when prepared 2PC transactions exist. + It checks only ERROR RAISING when there are several Tx opened within the same connection. +FBTEST: functional.session.alter_session_reset_raise_if_open_tx +JIRA: CORE-5832 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = python_act('db') -init_script_1 = """""" +expected_stdout = """ + Error while executing SQL statement: + - SQLCODE: -901 + - Cannot reset user session + - There are open transactions (3 active) + -901 + 335545206 +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=4.0') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # tx1 = db_conn.trans() # tx2 = db_conn.trans() # tx3 = db_conn.trans() -# +# # tx1.begin() # tx2.begin() # tx3.begin() -# +# # cur1=tx1.cursor() # try: # cur1.execute('alter session reset') @@ -55,24 +60,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # tx1.commit() # tx2.commit() # tx3.commit() -# +# # db_conn.close() -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - Error while executing SQL statement: - - SQLCODE: -901 - - Cannot reset user session - - There are open transactions (3 active) - -901 - 335545206 -""" - -@pytest.mark.version('>=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/session/test_alter_session_reset_remove_context_vars.py b/tests/functional/session/test_alter_session_reset_remove_context_vars.py index b8b86776..f298c9e2 100644 --- a/tests/functional/session/test_alter_session_reset_remove_context_vars.py +++ b/tests/functional/session/test_alter_session_reset_remove_context_vars.py @@ -1,55 +1,44 @@ #coding:utf-8 -# -# id: functional.session.alter_session_reset_remove_context_vars -# title: -# ALTER SESSION RESET: remove all context variables in 'USER_SESSION' namespace -# -# decription: -# Test issue from CORE-5832 about ALTER SESSION RESET: -# "remove all context variables in 'USER_SESSION' namespace ". -# -# NOTE. *** SET AUTODDL OFF REQUIRED *** -# Following is detailed explanation of this note: -# ======== -# Default ISQL behaviour is to start always *TWO* transactions (for DML and second for DDL) -# after previous commit / rollback and before *ANY* further satement is to be executed, except -# those which control ISQL itself (e.g. 'SET TERM'; 'IN ...'; 'SET BAIL' etc). -# So, even when statement has nothing to change, ISQL will start TWO transactions -# just before executing . -# This means that these transactions will start even if want to run 'ALTER SESSION RESET'. -# This, in turn, makes one of them (which must perform DDL) be 'active and NOT current' -# from ALTER SESSION point of view (which is run within DML transaction). -# -# According to description given in CORE-5832, ALTER SESSION throws error isc_ses_reset_err -# "if any open transaction exist in current conneciton, *except of current transaction* and -# prepared 2PC transactions which is allowed and ignored by this check". -# -# So, we have to prohibit 'autostart' of DDL-transaction because otherwise ALTER SESSION will -# throw: "SQLSTATE = 01002 / Cannot reset user session / -There are open transactions (2 active)". -# This is done by 'SET AUTODDL OFF' statement at the beginning of this test script. -# ======== -# -# Thanks to Vlad for explanations (discussed 18.01.2021). -# Checked on 4.0.0.2307 SS/CS. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: session.alter-session-reset-remove-context-vars +ISSUE: 6093 +TITLE: ALTER SESSION RESET: remove all context variables in 'USER_SESSION' namespace +DESCRIPTION: + Test issue about ALTER SESSION RESET: + "remove all context variables in 'USER_SESSION' namespace ". + + NOTE. *** SET AUTODDL OFF REQUIRED *** + Following is detailed explanation of this note: + + Default ISQL behaviour is to start always *TWO* transactions (for DML and second for DDL) + after previous commit / rollback and before *ANY* further satement is to be executed, except + those which control ISQL itself (e.g. 'SET TERM'; 'IN ...'; 'SET BAIL' etc). + So, even when statement has nothing to change, ISQL will start TWO transactions + just before executing . + This means that these transactions will start even if want to run 'ALTER SESSION RESET'. + This, in turn, makes one of them (which must perform DDL) be 'active and NOT current' + from ALTER SESSION point of view (which is run within DML transaction). + + According to description given in #6093, ALTER SESSION throws error isc_ses_reset_err + "if any open transaction exist in current conneciton, *except of current transaction* and + prepared 2PC transactions which is allowed and ignored by this check". + + So, we have to prohibit 'autostart' of DDL-transaction because otherwise ALTER SESSION will + throw: "SQLSTATE = 01002 / Cannot reset user session / -There are open transactions (2 active)". + This is done by 'SET AUTODDL OFF' statement at the beginning of this test script. + + Thanks to Vlad for explanations (discussed 18.01.2021). +FBTEST: functional.session.alter_session_reset_remove_context_vars +JIRA: CORE-5832 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; set autoddl off; commit; @@ -72,16 +61,15 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ SESSION_LEVEL_CONTEXT_VAR_BEFORE_RESET 123 SESSION_LEVEL_CONTEXT_VAR_AFTER_RESET """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/session/test_alter_session_reset_restore_role.py b/tests/functional/session/test_alter_session_reset_restore_role.py index c5b80819..40027478 100644 --- a/tests/functional/session/test_alter_session_reset_restore_role.py +++ b/tests/functional/session/test_alter_session_reset_restore_role.py @@ -1,55 +1,44 @@ #coding:utf-8 -# -# id: functional.session.alter_session_reset_restore_role -# title: -# ALTER SESSION RESET: restore ROLE which was passed with DPB and clear all cached security classes (if role was changed) -# -# decription: -# Test issue from CORE-5832 about ALTER SESSION RESET: -# "restore ROLE which was passed with DPB and clear all cached security classes (if role was changed) ". -# -# NOTE. *** SET AUTODDL OFF REQUIRED *** -# Following is detailed explanation of this note: -# ======== -# Default ISQL behaviour is to start always *TWO* transactions (for DML and second for DDL) -# after previous commit / rollback and before *ANY* further satement is to be executed, except -# those which control ISQL itself (e.g. 'SET TERM'; 'IN ...'; 'SET BAIL' etc). -# So, even when statement has nothing to change, ISQL will start TWO transactions -# just before executing . -# This means that these transactions will start even if want to run 'ALTER SESSION RESET'. -# This, in turn, makes one of them (which must perform DDL) be 'active and NOT current' -# from ALTER SESSION point of view (which is run within DML transaction). -# -# According to description given in CORE-5832, ALTER SESSION throws error isc_ses_reset_err -# "if any open transaction exist in current conneciton, *except of current transaction* and -# prepared 2PC transactions which is allowed and ignored by this check". -# -# So, we have to prohibit 'autostart' of DDL-transaction because otherwise ALTER SESSION will -# throw: "SQLSTATE = 01002 / Cannot reset user session / -There are open transactions (2 active)". -# This is done by 'SET AUTODDL OFF' statement at the beginning of this test script. -# ======== -# -# Thanks to Vlad for explanations (discussed 18.01.2021). -# Checked on 4.0.0.2307 SS/CS. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: session.alter-session-reset-restore-role +ISSUE: 6093 +TITLE: ALTER SESSION RESET: restore ROLE which was passed with DPB and clear all cached security classes (if role was changed) +DESCRIPTION: + Test issue about ALTER SESSION RESET: + "restore ROLE which was passed with DPB and clear all cached security classes (if role was changed) ". + + NOTE. *** SET AUTODDL OFF REQUIRED *** + Following is detailed explanation of this note: + + Default ISQL behaviour is to start always *TWO* transactions (for DML and second for DDL) + after previous commit / rollback and before *ANY* further satement is to be executed, except + those which control ISQL itself (e.g. 'SET TERM'; 'IN ...'; 'SET BAIL' etc). + So, even when statement has nothing to change, ISQL will start TWO transactions + just before executing . + This means that these transactions will start even if want to run 'ALTER SESSION RESET'. + This, in turn, makes one of them (which must perform DDL) be 'active and NOT current' + from ALTER SESSION point of view (which is run within DML transaction). + + According to description given in #6093, ALTER SESSION throws error isc_ses_reset_err + "if any open transaction exist in current conneciton, *except of current transaction* and + prepared 2PC transactions which is allowed and ignored by this check". + + So, we have to prohibit 'autostart' of DDL-transaction because otherwise ALTER SESSION will + throw: "SQLSTATE = 01002 / Cannot reset user session / -There are open transactions (2 active)". + This is done by 'SET AUTODDL OFF' statement at the beginning of this test script. + + Thanks to Vlad for explanations (discussed 18.01.2021). +FBTEST: functional.session.alter_session_reset_restore_role +JIRA: CORE-5832 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; create role acnt; @@ -84,9 +73,9 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ WHO_AMI SYSDBA WHATS_MY_ROLE BOSS @@ -98,8 +87,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/session/test_alter_session_reset_rollback.py b/tests/functional/session/test_alter_session_reset_rollback.py index ab48a311..d53a6970 100644 --- a/tests/functional/session/test_alter_session_reset_rollback.py +++ b/tests/functional/session/test_alter_session_reset_rollback.py @@ -1,59 +1,49 @@ #coding:utf-8 -# -# id: functional.session.alter_session_reset_rollback -# title: -# ALTER SESSION RESET: ROLLBACK current user transaction (if present) and issue warning if that transaction changes any table before reset -# -# decription: -# Test issue from CORE-5832 about ALTER SESSION RESET: -# "ROLLBACK current user transaction (if present) and issue warning if that transaction changes any table before reset ". -# -# We create trivial table and insert one row in it. -# Then, without committing changes, we issue 'ALTER SESSION RESET'. -# Warning must be thrown after it (in STDERR) and no records must remain in the table as result. -# -# NOTE. *** SET AUTODDL OFF REQUIRED *** -# Following is detailed explanation of this note: -# ======== -# Default ISQL behaviour is to start always *TWO* transactions (for DML and second for DDL) -# after previous commit / rollback and before *ANY* further satement is to be executed, except -# those which control ISQL itself (e.g. 'SET TERM'; 'IN ...'; 'SET BAIL' etc). -# So, even when statement has nothing to change, ISQL will start TWO transactions -# just before executing . -# This means that these transactions will start even if want to run 'ALTER SESSION RESET'. -# This, in turn, makes one of them (which must perform DDL) be 'active and NOT current' -# from ALTER SESSION point of view (which is run within DML transaction). -# -# According to description given in CORE-5832, ALTER SESSION throws error isc_ses_reset_err -# "if any open transaction exist in current conneciton, *except of current transaction* and -# prepared 2PC transactions which is allowed and ignored by this check". -# -# So, we have to prohibit 'autostart' of DDL-transaction because otherwise ALTER SESSION will -# throw: "SQLSTATE = 01002 / Cannot reset user session / -There are open transactions (2 active)". -# This is done by 'SET AUTODDL OFF' statement at the beginning of this test script. -# ======== -# -# Thanks to Vlad for explanations (discussed 18.01.2021). -# Checked on 4.0.0.2307 SS/CS. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: session.alter-session-reset-rollback +ISSUE: 6093 +TITLE: ALTER SESSION RESET: ROLLBACK current user transaction (if present) and issue + warning if that transaction changes any table before reset +DESCRIPTION: + Test issue about ALTER SESSION RESET: + "ROLLBACK current user transaction (if present) and issue warning if that transaction changes any table before reset ". + + We create trivial table and insert one row in it. + Then, without committing changes, we issue 'ALTER SESSION RESET'. + Warning must be thrown after it (in STDERR) and no records must remain in the table as result. + + NOTE. *** SET AUTODDL OFF REQUIRED *** + Following is detailed explanation of this note: + + Default ISQL behaviour is to start always *TWO* transactions (for DML and second for DDL) + after previous commit / rollback and before *ANY* further satement is to be executed, except + those which control ISQL itself (e.g. 'SET TERM'; 'IN ...'; 'SET BAIL' etc). + So, even when statement has nothing to change, ISQL will start TWO transactions + just before executing . + This means that these transactions will start even if want to run 'ALTER SESSION RESET'. + This, in turn, makes one of them (which must perform DDL) be 'active and NOT current' + from ALTER SESSION point of view (which is run within DML transaction). + + According to description given in #6093, ALTER SESSION throws error isc_ses_reset_err + "if any open transaction exist in current conneciton, *except of current transaction* and + prepared 2PC transactions which is allowed and ignored by this check". + + So, we have to prohibit 'autostart' of DDL-transaction because otherwise ALTER SESSION will + throw: "SQLSTATE = 01002 / Cannot reset user session / -There are open transactions (2 active)". + This is done by 'SET AUTODDL OFF' statement at the beginning of this test script. + + Thanks to Vlad for explanations (discussed 18.01.2021). +FBTEST: functional.session.alter_session_reset_rollback +JIRA: CORE-5832 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ recreate table test(id int); commit; @@ -70,22 +60,21 @@ test_script_1 = """ select * from test; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ Records affected: 0 """ -expected_stderr_1 = """ + +expected_stderr = """ Session was reset with warning(s) -Transaction is rolled back due to session reset, all changes are lost """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/session/test_alter_session_reset_start_new_tx.py b/tests/functional/session/test_alter_session_reset_start_new_tx.py index a9b82049..5caa664c 100644 --- a/tests/functional/session/test_alter_session_reset_start_new_tx.py +++ b/tests/functional/session/test_alter_session_reset_start_new_tx.py @@ -1,60 +1,52 @@ #coding:utf-8 -# -# id: functional.session.alter_session_reset_start_new_tx -# title: -# ALTER SESSION RESET: START new transaction with the same properties as transaction that was rolled back (if transaction was present before reset) -# -# decription: -# Test issue from CORE-5832 about ALTER SESSION RESET: -# "START new transaction with the same properties as transaction that was rolled back (if transaction was present before reset)". -# -# We create trivial table and insert one row in it with ID = current_transaction - and use AUTONOMOUS tranaction for this (ES is used). -# Then, without committing changes in main Tx, we issue 'ALTER SESSION RESET'. -# Warning must be thrown after it (in STDERR) and no records must remain in the table as result. -# After this, we check that new transaction has ID different than used before and that attributes of this Tx were NOT changed. -# -# NOTE. *** SET AUTODDL OFF REQUIRED *** -# Following is detailed explanation of this note: -# ======== -# Default ISQL behaviour is to start always *TWO* transactions (for DML and second for DDL) -# after previous commit / rollback and before *ANY* further satement is to be executed, except -# those which control ISQL itself (e.g. 'SET TERM'; 'IN ...'; 'SET BAIL' etc). -# So, even when statement has nothing to change, ISQL will start TWO transactions -# just before executing . -# This means that these transactions will start even if want to run 'ALTER SESSION RESET'. -# This, in turn, makes one of them (which must perform DDL) be 'active and NOT current' -# from ALTER SESSION point of view (which is run within DML transaction). -# -# According to description given in CORE-5832, ALTER SESSION throws error isc_ses_reset_err -# "if any open transaction exist in current conneciton, *except of current transaction* and -# prepared 2PC transactions which is allowed and ignored by this check". -# -# So, we have to prohibit 'autostart' of DDL-transaction because otherwise ALTER SESSION will -# throw: "SQLSTATE = 01002 / Cannot reset user session / -There are open transactions (2 active)". -# This is done by 'SET AUTODDL OFF' statement at the beginning of this test script. -# ======== -# -# Thanks to Vlad for explanations (discussed 18.01.2021). -# Checked on 4.0.0.2307 SS/CS. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: session.alter-session-reset-start-new-tx +ISSUE: 6093 +TITLE: ALTER SESSION RESET: START new transaction with the same properties as transaction + that was rolled back (if transaction was present before reset) +DESCRIPTION: + Test issue about ALTER SESSION RESET: + "START new transaction with the same properties as transaction that was rolled back (if + transaction was present before reset)". + + We create trivial table and insert one row in it with ID = current_transaction - and use + AUTONOMOUS tranaction for this (ES is used). + Then, without committing changes in main Tx, we issue 'ALTER SESSION RESET'. + Warning must be thrown after it (in STDERR) and no records must remain in the table as result. + After this, we check that new transaction has ID different than used before and that attributes of this Tx were NOT changed. + + NOTE. *** SET AUTODDL OFF REQUIRED *** + Following is detailed explanation of this note: + + Default ISQL behaviour is to start always *TWO* transactions (for DML and second for DDL) + after previous commit / rollback and before *ANY* further satement is to be executed, except + those which control ISQL itself (e.g. 'SET TERM'; 'IN ...'; 'SET BAIL' etc). + So, even when statement has nothing to change, ISQL will start TWO transactions + just before executing . + This means that these transactions will start even if want to run 'ALTER SESSION RESET'. + This, in turn, makes one of them (which must perform DDL) be 'active and NOT current' + from ALTER SESSION point of view (which is run within DML transaction). + + According to description given in #6093, ALTER SESSION throws error isc_ses_reset_err + "if any open transaction exist in current conneciton, *except of current transaction* and + prepared 2PC transactions which is allowed and ignored by this check". + + So, we have to prohibit 'autostart' of DDL-transaction because otherwise ALTER SESSION will + throw: "SQLSTATE = 01002 / Cannot reset user session / -There are open transactions (2 active)". + This is done by 'SET AUTODDL OFF' statement at the beginning of this test script. + + Thanks to Vlad for explanations (discussed 18.01.2021). +FBTEST: functional.session.alter_session_reset_start_new_tx +JIRA: CORE-5832 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ -- set echo on; set bail on; recreate table test( @@ -138,9 +130,9 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ TRN_ID_DISTINCT_CNT 2 TRN_ISOLATION_MODE_DISTINCT_CNT 1 TRN_LOCK_TIMEOUT_DISTINCT_CNT 1 @@ -149,8 +141,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/session/test_ext_conn_pool_01.py b/tests/functional/session/test_ext_conn_pool_01.py index 4ea8fb02..c5ace5bc 100644 --- a/tests/functional/session/test_ext_conn_pool_01.py +++ b/tests/functional/session/test_ext_conn_pool_01.py @@ -1,108 +1,189 @@ #coding:utf-8 -# -# id: functional.session.ext_conn_pool_01 -# title: External Connections Pool, functionality test 01 -# decription: -# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -# ATTENTION! CURRENT FB INSTANCE DOES NOT PARTICIPATE IN THIS TEST WORK! TEMPORARY INSTANCE IS USED! -# RESULT OF THIS TEST HAS NO "LINK: WITH CURRENTLY CHECKED FB SERVERMODE! DIFF OUTPUT MUST BE CHECKED! -# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -# -# Basic check of External Connections Pool. We verify here following: -# * ability to reuse connections from ECP in case running ES/EDS by "frequent" attachments -# * ability to distinguish connect/disconnect from reuse connections within apropriate -# DB-level trigger (system variable RESETTING = faluse | true) -# * ability to get information about ECP state: total number of active and idle connections. -# -# See $FB_HOME/doc/sql.extensions/README.external_connections_pool and CORE-5832. -# ------------------------------------------------------------------------------------------- -# Test retrieves FB_HOME directory and makes copy of firebird.conf and database.conf files. -# Then it searches for free TCP port and overwrites content of firebird.conf: this new port -# will be specified for RemoveServicePort (see 'TMP_FREE_PORT'). -# Also, parameters for working with External Connections Pool (hereafter: ECP) will be added: -# ExtConnPoolSize = -# ExtConnPoolLifeTime = -# -- where can be set to 5...10 s (but not less than 3). -# -# New alias is added to databases.conf for test .fdb which must be created as self-security DB: -# tmp_ecp_01 = { -# SecurityDatabase = tmp_ecp_01 -# RemoteAccess = true -# } -# File $FB_HOME/securityN.fdb is copied to the . -# -# After this test launches new Firebird instance *as application* (see async. call of Popen()) and make -# some actions with just created test DB (alias = tmp_ecp_01). Because this DB is self-secutity, we can -# use it for connect without any conflict with existing FB instance. -# -# When all needed actions with this DB complete, this FB temporary instance will be stopped. -# Test does start and stop of this FB instance _two_ times because of checking following ServerMode: -# * Super -# * SuperClassic -# -# ::: NOTE ::: -# Test does NOT check Servermode = Classic because of unpredictable results when ExtConnPoolLifeTime less than 7s. -# In some cases number of IDLE connections can differ from one run to another. The reason is remaining unknown. -# ------------------------------------------------------------------------------------------- -# After FB instance launch, test runs ISQL that connects to using port and creates -# several DB objects: -# * DB-level triggers on CONNECT / DISCONNECT; -# * table 'audit' for logging these events, with specifying detailed info: whether current -# connect/disconnect is caused by SESSION RESET (variable RESETTING is TRUE) or no; -# * two users who which further perform connect and run several ES/EDS statements: -# 'freq' -- will do ES/EDS 'frequently', i.e. with interval LESS than ExtConnPoolLifeTime; -# 'rare' -- will do ES/EDS 'rarely', with interval GREATER than ExtConnPoolLifeTime; -# * role 'cleaner_ext_pool' with granting to it system privilege MODIFY_EXT_CONN_POOL, in order -# to have ability to clear ext pool after final ES/EDS. Grant this role to both 'freq' and 'rare' -# -# Then we create several connections for user 'freq' (appending them into a list) and for each of them -# do ES/EDS. Number of connections is specified by variable ITER_LOOP_CNT. Delay between subsequent -# ES/EDS for 'freq' is minimal: 1 second. -# After this, we repeate the same for user 'rare', and use delay between subsequent ES/EDS GREATER -# than ExtConnPoolLifeTime for seconds. -# After loop we clear ExtConnPool and close all connections from list. -# -# Finally test terminates Firebird application process and queries to the table 'audit' for check results. -# Number of rows (and unique connection IDs) for user 'freq' must be significantly less than for user 'rare', -# despite the fact that both of them did the same work. This is because engine held idle connections in the pool -# and user FREQ could re-use them when ran subsequent ES/EDS. -# -# ::: CAUTION ::: -# Windows Firewall can block attempt to launch FB as application (dialog window appears in this case). -# One may need to configure Firewall so that process -# irebird.exe is enable to operate on any port. -# -# Perhaps, following command will be useful: netsh advfirewall set privateprofile state off -# -# Checked on 4.0.0.2235, FB instances were launched as 'Super' and 'SuperClassic'. Time: ~52s. -# -# 22.05.2021: definition of full path and name to security.db was wrong because it supposed that FB major version -# corresponds to numeric suffix of security database (FB 3.x --> security3.fdb; FB 4.x --> security4.fdb). -# But in major version FB 5.x currently remains to use security4.fdb. -# Proper way is either to use Services API (call to get_security_database_path()) or get this info from fbtest -# built-in context variable context['isc4_path']. -# Checked on 5.0.0.47 (Linux, Windows). -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: None + +""" +ID: session.ext-conn-pool-01 +TITLE: External Connections Pool, functionality test 01 +DESCRIPTION: + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + ATTENTION! CURRENT FB INSTANCE DOES NOT PARTICIPATE IN THIS TEST WORK! TEMPORARY INSTANCE IS USED! + RESULT OF THIS TEST HAS NO "LINK: WITH CURRENTLY CHECKED FB SERVERMODE! DIFF OUTPUT MUST BE CHECKED! + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + Basic check of External Connections Pool. We verify here following: + * ability to reuse connections from ECP in case running ES/EDS by "frequent" attachments + * ability to distinguish connect/disconnect from reuse connections within apropriate + DB-level trigger (system variable RESETTING = faluse | true) + * ability to get information about ECP state: total number of active and idle connections. + + See $FB_HOME/doc/sql.extensions/README.external_connections_pool and CORE-5832. + ------------------------------------------------------------------------------------------- + Test retrieves FB_HOME directory and makes copy of firebird.conf and database.conf files. + Then it searches for free TCP port and overwrites content of firebird.conf: this new port + will be specified for RemoveServicePort (see 'TMP_FREE_PORT'). + Also, parameters for working with External Connections Pool (hereafter: ECP) will be added: + ExtConnPoolSize = + ExtConnPoolLifeTime = + -- where can be set to 5...10 s (but not less than 3). + + New alias is added to databases.conf for test .fdb which must be created as self-security DB: + tmp_ecp_01 = { + SecurityDatabase = tmp_ecp_01 + RemoteAccess = true + } + File $FB_HOME/securityN.fdb is copied to the . + + After this test launches new Firebird instance *as application* (see async. call of Popen()) and make + some actions with just created test DB (alias = tmp_ecp_01). Because this DB is self-secutity, we can + use it for connect without any conflict with existing FB instance. + + When all needed actions with this DB complete, this FB temporary instance will be stopped. + Test does start and stop of this FB instance _two_ times because of checking following ServerMode: + * Super + * SuperClassic + + ::: NOTE ::: + Test does NOT check Servermode = Classic because of unpredictable results when ExtConnPoolLifeTime less than 7s. + In some cases number of IDLE connections can differ from one run to another. The reason is remaining unknown. + ------------------------------------------------------------------------------------------- + After FB instance launch, test runs ISQL that connects to using port and creates + several DB objects: + * DB-level triggers on CONNECT / DISCONNECT; + * table 'audit' for logging these events, with specifying detailed info: whether current + connect/disconnect is caused by SESSION RESET (variable RESETTING is TRUE) or no; + * two users who which further perform connect and run several ES/EDS statements: + 'freq' -- will do ES/EDS 'frequently', i.e. with interval LESS than ExtConnPoolLifeTime; + 'rare' -- will do ES/EDS 'rarely', with interval GREATER than ExtConnPoolLifeTime; + * role 'cleaner_ext_pool' with granting to it system privilege MODIFY_EXT_CONN_POOL, in order + to have ability to clear ext pool after final ES/EDS. Grant this role to both 'freq' and 'rare' + + Then we create several connections for user 'freq' (appending them into a list) and for each of them + do ES/EDS. Number of connections is specified by variable ITER_LOOP_CNT. Delay between subsequent + ES/EDS for 'freq' is minimal: 1 second. + After this, we repeate the same for user 'rare', and use delay between subsequent ES/EDS GREATER + than ExtConnPoolLifeTime for seconds. + After loop we clear ExtConnPool and close all connections from list. + + Finally test terminates Firebird application process and queries to the table 'audit' for check results. + Number of rows (and unique connection IDs) for user 'freq' must be significantly less than for user 'rare', + despite the fact that both of them did the same work. This is because engine held idle connections in the pool + and user FREQ could re-use them when ran subsequent ES/EDS. + + ::: CAUTION ::: + Windows Firewall can block attempt to launch FB as application (dialog window appears in this case). + One may need to configure Firewall so that process /firebird.exe is enable to operate on any port. + + Perhaps, following command will be useful: netsh advfirewall set privateprofile state off + + Checked on 4.0.0.2235, FB instances were launched as 'Super' and 'SuperClassic'. Time: ~52s. +NOTES: +[22.05.2021] + definition of full path and name to security.db was wrong because it supposed that FB major version + corresponds to numeric suffix of security database (FB 3.x --> security3.fdb; FB 4.x --> security4.fdb). + But in major version FB 5.x currently remains to use security4.fdb. + Proper way is either to use Services API (call to get_security_database_path()) or get this info from fbtest + built-in context variable context['isc4_path']. + Checked on 5.0.0.47 (Linux, Windows). +FBTEST: functional.session.ext_conn_pool_01 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = python_act('db') -init_script_1 = """""" +expected_stdout = """ + SRVMODE WHO ATT ID EVT ACTIVE_CNT IDLE_CNT + ============ ========== ======= ======= ======================================== ========== ======== + Super FREQ 1 1 NEW 1 0 + Super FREQ 2 2 NEW 1 1 + Super FREQ 3 3 NEW 1 2 + Super FREQ 4 4 NEW 1 3 + Super FREQ 4 5 RUN DML 1 4 + Super FREQ 4 6 MOVE INTO POOL: ACTIVE -> IDLE 1 4 + Super FREQ 4 7 TAKE FROM POOL: IDLE -> ACTIVE 2 4 + Super FREQ 1 8 BYE 0 6 + Super FREQ 4 9 RUN DML 1 5 + Super FREQ 4 10 MOVE INTO POOL: ACTIVE -> IDLE 1 5 + Super FREQ 4 11 TAKE FROM POOL: IDLE -> ACTIVE 2 5 + Super FREQ 2 12 BYE 0 7 + Super FREQ 4 13 RUN DML 1 6 + Super FREQ 4 14 MOVE INTO POOL: ACTIVE -> IDLE 1 6 + Super FREQ 4 15 TAKE FROM POOL: IDLE -> ACTIVE 2 6 + Super FREQ 4 16 BYE 0 0 + Super FREQ 3 17 BYE 0 0 + Super RARE 1 1 NEW 1 0 + Super RARE 2 2 NEW 1 1 + Super RARE 3 3 NEW 1 2 + Super RARE 4 4 NEW 1 3 + Super RARE 4 5 RUN DML 1 4 + Super RARE 4 6 MOVE INTO POOL: ACTIVE -> IDLE 1 4 + Super RARE 4 7 TAKE FROM POOL: IDLE -> ACTIVE 2 4 + Super RARE 4 8 BYE 0 0 + Super RARE 1 9 BYE 0 0 + Super RARE 5 10 NEW 1 0 + Super RARE 5 11 RUN DML 1 1 + Super RARE 5 12 MOVE INTO POOL: ACTIVE -> IDLE 1 1 + Super RARE 5 13 TAKE FROM POOL: IDLE -> ACTIVE 2 1 + Super RARE 5 14 BYE 0 0 + Super RARE 2 15 BYE 0 0 + Super RARE 6 16 NEW 1 0 + Super RARE 6 17 RUN DML 1 1 + Super RARE 6 18 MOVE INTO POOL: ACTIVE -> IDLE 1 1 + Super RARE 6 19 TAKE FROM POOL: IDLE -> ACTIVE 2 1 + Super RARE 6 20 BYE 0 0 + Super RARE 3 21 BYE 0 0 + SuperClassic FREQ 1 1 NEW 1 0 + SuperClassic FREQ 2 2 NEW 1 1 + SuperClassic FREQ 3 3 NEW 1 2 + SuperClassic FREQ 4 4 NEW 1 3 + SuperClassic FREQ 4 5 RUN DML 1 4 + SuperClassic FREQ 4 6 MOVE INTO POOL: ACTIVE -> IDLE 1 4 + SuperClassic FREQ 4 7 TAKE FROM POOL: IDLE -> ACTIVE 2 4 + SuperClassic FREQ 1 8 BYE 0 6 + SuperClassic FREQ 4 9 RUN DML 1 5 + SuperClassic FREQ 4 10 MOVE INTO POOL: ACTIVE -> IDLE 1 5 + SuperClassic FREQ 4 11 TAKE FROM POOL: IDLE -> ACTIVE 2 5 + SuperClassic FREQ 2 12 BYE 0 7 + SuperClassic FREQ 4 13 RUN DML 1 6 + SuperClassic FREQ 4 14 MOVE INTO POOL: ACTIVE -> IDLE 1 6 + SuperClassic FREQ 4 15 TAKE FROM POOL: IDLE -> ACTIVE 2 6 + SuperClassic FREQ 4 16 BYE 0 0 + SuperClassic FREQ 3 17 BYE 0 0 + SuperClassic RARE 1 1 NEW 1 0 + SuperClassic RARE 2 2 NEW 1 1 + SuperClassic RARE 3 3 NEW 1 2 + SuperClassic RARE 4 4 NEW 1 3 + SuperClassic RARE 4 5 RUN DML 1 4 + SuperClassic RARE 4 6 MOVE INTO POOL: ACTIVE -> IDLE 1 4 + SuperClassic RARE 4 7 TAKE FROM POOL: IDLE -> ACTIVE 2 4 + SuperClassic RARE 4 8 BYE 0 0 + SuperClassic RARE 1 9 BYE 0 0 + SuperClassic RARE 5 10 NEW 1 0 + SuperClassic RARE 5 11 RUN DML 1 1 + SuperClassic RARE 5 12 MOVE INTO POOL: ACTIVE -> IDLE 1 1 + SuperClassic RARE 5 13 TAKE FROM POOL: IDLE -> ACTIVE 2 1 + SuperClassic RARE 5 14 BYE 0 0 + SuperClassic RARE 2 15 BYE 0 0 + SuperClassic RARE 6 16 NEW 1 0 + SuperClassic RARE 6 17 RUN DML 1 1 + SuperClassic RARE 6 18 MOVE INTO POOL: ACTIVE -> IDLE 1 1 + SuperClassic RARE 6 19 TAKE FROM POOL: IDLE -> ACTIVE 2 1 + SuperClassic RARE 6 20 BYE 0 0 + SuperClassic RARE 3 21 BYE 0 0 -db_1 = db_factory(sql_dialect=3, init=init_script_1) + Records affected: 76 +""" + +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=4.0') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import sys # import os # import socket @@ -115,22 +196,22 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # import subprocess # import fdb # from fdb import services -# +# # # POSIX: '/opt/fb40/lib/libfbclient.so' # # Windows: r'C:\\FB SS # bclient.dll' # #FB_CLNT = r'C:\\FB SS # bclient.dll' -# -# +# +# # os.environ["ISC_USER"] = 'SYSDBA' # os.environ["ISC_PASSWORD"] = 'masterkey' -# +# # TMP_DBA_PSWD = 'M@$terkeX' -# +# # # Ext. Poll size and lifetime: # ECP_SIZE = 10 -# +# # # === !!! do NOT set ECP_LIFE less than 4 !!! === # # SuperClassic can fail in that case (approx every 40...50 run): mismatch in last column (number of ECP idle connections): # # Example of diff: @@ -149,13 +230,13 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # ######################################################################################################## # # # ECP_LIFE = 5 -# +# # # How many seconds will be added to delay = when user 'RARE' works with database. # # For Classic it was needed to set this value about 4(!) seconds but this did not help and results remained non-stable # # For Super and SuperClassic it is enough to add 2 seconds: # # # ADD_DELAY_FOR_RARE = 2 -# +# # # How many connections will be done by users 'FREQ' and (after him) by 'RARE'. # # Each connection will run _single_ DML using ES/EDS and then immediately is closed # # Subsequent connection will run its DML after N seconds where: @@ -163,31 +244,31 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # # N = ECP_LIFE + ADD_DELAY_FOR_RARE -- for user 'RARE' # # # ITER_LOOP_CNT = 3 -# +# # svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) # FB_HOME = svc.get_home_directory() # FB_BINS = os.path.join( FB_HOME, 'bin'+os.sep if platform.system() == 'Linux' else '' ) # svc.close() # SEC_FDB = context['isc4_path'] -# -# +# +# # #-------------------------------------------- -# +# # def flush_and_close(file_handle): # # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and +# # If you're starting with a Python file object f, +# # first do f.flush(), and # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # global os -# +# # file_handle.flush() # if file_handle.mode not in ('r', 'rb'): # # otherwise: "OSError: [Errno 9] Bad file descriptor"! # os.fsync(file_handle.fileno()) # file_handle.close() -# +# # #-------------------------------------------- -# +# # def cleanup( f_names_list ): # global os # for i in range(len( f_names_list )): @@ -197,17 +278,17 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # del_name = f_names_list[i] # else: # del_name = None -# +# # if os.path.isfile( del_name ): # os.remove( del_name ) -# +# # #-------------------------------------------- -# +# # def find_free_port(): # global socket # from contextlib import closing # # AF_INET - constant represent the address (and protocol) families, used for the first argument to socket() -# # A pair (host, port) is used for the AF_INET address family, where host is a string representing either a +# # A pair (host, port) is used for the AF_INET address family, where host is a string representing either a # # hostname in Internet domain notation like 'daring.cwi.nl' or an IPv4 address like '100.50.200.5', and port is an integer. # # SOCK_STREAM means that it is a TCP socket. # # SOCK_DGRAM means that it is a UDP socket. @@ -215,7 +296,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # s.bind(('', 0)) # s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # return s.getsockname()[1] -# +# # #-------------------------------------------- # def check_server(address, port): # global socket @@ -228,36 +309,36 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # return False # finally: # s.close() -# +# # #-------------------------------------------- -# +# # def do_shutdown_bring_online( FB_BINS, tcp_port, db_name, dba_pwd ): # global subprocess # subprocess.call( [ os.path.join( FB_BINS, 'gfix'), '-user', 'SYSDBA', '-pas', dba_pwd, '-shut', 'full', '-force', '0', 'localhost/%d:%s' % (tcp_port, db_name) ] ) # subprocess.call( [ os.path.join( FB_BINS, 'gfix'), '-user', 'SYSDBA', '-online', db_name ] ) -# +# # #-------------------------------------------- -# +# # def get_fb_arch(a_dsn): # try: # con1 = fdb.connect(dsn = a_dsn) # con2 = fdb.connect(dsn = a_dsn) -# +# # cur1 = con1.cursor() -# +# # sql=( # "select count(distinct a.mon$server_pid), min(a.mon$remote_protocol), max(iif(a.mon$remote_protocol is null,1,0))" # +" from mon$attachments a" # +" where a.mon$attachment_id in (%s, %s) or upper(a.mon$user) = upper('%s')" # % (con1.attachment_id, con2.attachment_id, 'cache writer') # ) -# +# # cur1.execute(sql) # for r in cur1.fetchall(): # server_cnt=r[0] # server_pro=r[1] # cache_wrtr=r[2] -# +# # if server_pro == None: # fba='Embedded' # elif cache_wrtr == 1: @@ -265,58 +346,58 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # elif server_cnt == 2: # fba='CS' # else: -# +# # f1=con1.db_info(fdb.isc_info_fetches) -# +# # cur2=con2.cursor() # cur2.execute('select 1 from rdb$database') # for r in cur2.fetchall(): # pass -# +# # f2=con1.db_info(fdb.isc_info_fetches) -# +# # fba = 'SC' if f1 ==f2 else 'SS' -# +# # #print(fba, con1.engine_version, con1.version) # return fba -# +# # finally: # con1.close() # con2.close() -# +# # #------------------------------------------------- -# +# # db_conn.close() -# +# # fdb_test = os.path.join(context['temp_directory'],'ext-conn-pool-01.fdb') # cleanup( (fdb_test,) ) -# +# # dts = datetime.datetime.now().strftime("%y%m%d_%H%M%S") -# +# # fbconf_cur = os.path.join(FB_HOME, 'firebird.conf') # fbconf_bak = os.path.join(context['temp_directory'], 'firebird_'+dts+'.bak') -# +# # dbconf_cur = os.path.join(FB_HOME, 'databases.conf') # dbconf_bak = os.path.join(context['temp_directory'], 'databases_'+dts+'.bak') -# +# # shutil.copy2( SEC_FDB, fdb_test ) # f_init_err = 0 -# +# # ################################# # TMP_FREE_PORT = find_free_port() # ################################# -# +# # #fb_arch = get_fb_arch( dsn ) -# +# # CHECKED_MODE_LIST=( 'Super,', 'SuperClassic', ) -# +# # # NO SENSE, ALWAYS DIFFERENT VALUES IN 'IDLE' COLUMN >>> CHECKED_MODE_LIST=('Classic',) -# +# # for srvidx,srvmode in enumerate( CHECKED_MODE_LIST ): -# +# # shutil.copy2( fbconf_cur, fbconf_bak ) # shutil.copy2( dbconf_cur, dbconf_bak ) -# +# # cfg_params_to_change= { # 'ServerMode' : srvmode # ,'RemoteServicePort' : str(TMP_FREE_PORT) @@ -330,12 +411,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # ,'AuthClient' : 'Srp' # ,'AuthServer' : 'Srp' # } -# +# # f_fbconf=open( fbconf_cur, 'w') # for k,v in sorted(cfg_params_to_change.items()): # f_fbconf.write( ''.join( (k, ' = ', v, '\\n') ) ) # flush_and_close( f_fbconf ) -# +# # alias_data= ''' # # Added temporarily for executing test ext-conn-pool-01.fbt # tmp_ecp_01 = %(fdb_test)s { @@ -343,56 +424,56 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # RemoteAccess = true # } # ''' % locals() -# +# # f_dbconf=open( dbconf_cur, 'w') # f_dbconf.write(alias_data) # flush_and_close( f_dbconf ) -# +# # ######################################################################## # ### L A U N C H F B A S A P P L I C A T I O N ### # ######################################################################## # fb_process = subprocess.Popen( [ os.path.join( FB_BINS, 'firebird'), '-a'] ) # time.sleep(2) -# +# # if not check_server('localhost', TMP_FREE_PORT): # print('### ERROR ### FB instance not yet started. Increase delay and repeat!') -# +# # if srvidx == 0: # # initial creation of test DB # ############################# -# +# # sql_text= ''' # set bail on; # set wng off; # set echo on; -# +# # connect 'tmp_ecp_01'; -# +# # create or alter user sysdba password '%(TMP_DBA_PSWD)s' using plugin Srp; -# +# # -- !! otherwise next attempt to attach via TCP will fail with Windows # -- error 32 process can not access file it is used by another process !! # alter database set linger to 0; # commit; -# +# # connect 'localhost/%(TMP_FREE_PORT)s:tmp_ecp_01' user sysdba password '%(TMP_DBA_PSWD)s'; -# +# # set list on; # --select * from mon$database; # --commit; -# +# # create or alter user freq password '123' using plugin Srp; # create or alter user rare password '123' using plugin Srp; # commit; -# +# # create role cleaner_ext_pool # set system privileges to MODIFY_EXT_CONN_POOL; # commit; -# +# # grant default cleaner_ext_pool to user freq; # grant default cleaner_ext_pool to user rare; # commit; -# +# # create table audit( # id smallint generated by default as identity constraint pk_audit primary key # ,srvmode varchar(12) -- 'Super' / 'SuperClassic' / 'Classic' @@ -405,9 +486,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # ,pool_idle_count smallint # ,aux_info varchar(100) # ); -# +# # create view v_audit as -# select +# select # srvmode # ,who # ,att @@ -430,12 +511,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # --order by srvmode, who, att, id # order by srvmode, who, id # ; -# -# +# +# # grant select,insert on audit to public; # grant select on v_audit to public; # commit; -# +# # set term ^; # create or alter procedure sys_get_fb_arch ( # a_connect_as_user varchar(31) default 'SYSDBA' @@ -450,12 +531,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # declare v_fetches_beg bigint; # declare v_fetches_end bigint; # begin -# +# # select a.mon$server_pid, a.mon$remote_protocol # from mon$attachments a # where a.mon$attachment_id = current_connection # into cur_server_pid, att_protocol; -# +# # if ( att_protocol is null ) then # fb_arch = 'Embedded'; # else @@ -464,7 +545,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # 'select a.mon$server_pid' -- + 0*(select 1 from rdb$database)' # ||' from mon$attachments a ' # ||' where a.mon$attachment_id = current_connection'; -# +# # execute statement v_test_sttm # on external # 'localhost:' || rdb$get_context('SYSTEM', 'DB_NAME') @@ -473,7 +554,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # password a_connect_with_pwd # role left('R' || replace(uuid_to_char(gen_uuid()),'-',''),31) # into ext_server_pid; -# +# # if ( cur_server_pid is distinct from ext_server_pid ) then # fb_arch = 'Classic'; # else @@ -482,15 +563,15 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # from mon$io_stats i # where i.mon$stat_group = 0 -- db_level # into v_fetches_beg; -# +# # in autonomous transaction do # select i.mon$page_fetches # from mon$io_stats i # where i.mon$stat_group = 0 -- db_level # into v_fetches_end; -# -# fb_arch = iif( v_fetches_beg is not distinct from v_fetches_end, -# 'SuperClassic', +# +# fb_arch = iif( v_fetches_beg is not distinct from v_fetches_end, +# 'SuperClassic', # 'Super' # ); # end @@ -498,7 +579,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # suspend; # end # ^ -# +# # create or alter trigger trg_aud_bi for audit active before insert sql security definer as # declare v_srvmode varchar(30); # declare p int; @@ -510,22 +591,22 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # -- but is uses SYSDBA account and (because of this) table 'audit' will not be # -- changed in [dis]connect triggers: # new.srvmode = ( select fb_arch from sys_get_fb_arch('SYSDBA', '%(TMP_DBA_PSWD)s') ); -# +# # -- 11.01.2021 22:00: WEIRD! If this statement enabled then .py script HANGS on 2nd iter! # rdb$set_context('USER_SESSION', 'FB_ARCH', new.srvmode); # end -# +# # new.pool_active_count = rdb$get_context('SYSTEM','EXT_CONN_POOL_ACTIVE_COUNT'); # new.pool_idle_count = rdb$get_context('SYSTEM','EXT_CONN_POOL_IDLE_COUNT'); # end # ^ -# +# # create or alter trigger trg_connect inactive on connect sql security definer as # declare p smallint; # begin # if (current_user <> 'SYSDBA') then # begin -# +# # insert into audit( # evt # ) values ( @@ -534,11 +615,11 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # end # end # ^ -# +# # create or alter trigger trg_disconnect inactive on disconnect sql security definer as # begin # if (current_user <> 'SYSDBA') then -# begin +# begin # insert into audit( # evt # ) values ( @@ -554,61 +635,61 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # grant execute on procedure sys_get_fb_arch to public; # commit; # ''' % locals() -# -# +# +# # f_sql_cmd = open( os.path.join(context['temp_directory'],'ecp-resetting-DDL.sql'), 'w') # f_sql_cmd.write( sql_text ) # flush_and_close( f_sql_cmd ) -# +# # f_sql_log = open( os.path.splitext(f_sql_cmd.name)[0] + '-init.log', 'w') # f_sql_err = open( os.path.splitext(f_sql_cmd.name)[0] + '-init.err', 'w') # subprocess.call( [ os.path.join( FB_BINS, 'isql'), '-q', '-i', f_sql_cmd.name, '-user', 'SYSDBA'], stdout=f_sql_log, stderr=f_sql_err ) # flush_and_close( f_sql_log ) # flush_and_close( f_sql_err ) -# +# # f_init_err = os.path.getsize(f_sql_err.name) -# +# # with open( f_sql_err.name,'r') as f: # for line in f: # print("Unexpected STDERR, file " + f_sql_err.name + ": "+line) -# +# # cleanup( [ i.name for i in (f_sql_cmd, f_sql_log, f_sql_err) ] ) -# +# # #< srvidx == 0 (process 1st of srvmode list) -# +# # ############################################################################## # do_shutdown_bring_online( FB_BINS, TMP_FREE_PORT, 'tmp_ecp_01', TMP_DBA_PSWD ) # ############################################################################## -# +# # if f_init_err == 0: -# +# # sql_for_run=''' # execute block as # declare c int; # begin # execute statement ( q'{ insert into audit( evt ) values( 'RUN DML') }' ) # on external 'localhost/%(TMP_FREE_PORT)s:' || rdb$get_context('SYSTEM','DB_NAME') -# +# # with autonomous transaction -- <<< !!! THIS IS MANDATORY IF WE WANT TO USE EXT CONN POOL !!! <<< -# +# # as user current_user password '123' # ; # end # ''' % locals() -# -# +# +# # ########################################################################### -# +# # for usr_name in ('freq','rare'): # conn_list = [] # for i in range(0, ITER_LOOP_CNT): # conn_list.append( fdb.connect( dsn = 'localhost/%(TMP_FREE_PORT)s:tmp_ecp_01' % locals(), user = usr_name, password = '123' ) ) -# +# # for i,c in enumerate(conn_list): -# +# # # ::: NOTE ::: # # :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -# # On every iteration DIFFERENT connection is used for run ES/EDS, +# # On every iteration DIFFERENT connection is used for run ES/EDS, # # but all of them use the same user/password/role, so apropriate # # item in the ExtConnPool can be used to run this statement. # # But this will be so only for user = 'FREQ' because he does such @@ -619,23 +700,23 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # # expiration of ExtConnPoolLifeTime: # # :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: # c.execute_immediate( sql_for_run ) -# +# # if i < len(conn_list)-1: # time.sleep( 1 if usr_name == 'freq' else ECP_LIFE + ADD_DELAY_FOR_RARE ) # else: # c.execute_immediate( 'ALTER EXTERNAL CONNECTIONS POOL CLEAR ALL' ) -# +# # c.close() -# +# # ### for c in conn_list: # ### c.close() -# +# # ############################################################################## # do_shutdown_bring_online( FB_BINS, TMP_FREE_PORT, 'tmp_ecp_01', TMP_DBA_PSWD ) # ############################################################################## -# +# # if srvidx == len(CHECKED_MODE_LIST)-1: -# +# # sql_check=''' # -- set echo on; # connect 'localhost/%(TMP_FREE_PORT)s:tmp_ecp_01' user sysdba password '%(TMP_DBA_PSWD)s'; @@ -644,140 +725,49 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # --select * from v_audit where who = 'FREQ'; # --select * from v_audit where who = 'RARE'; # ''' % locals() -# +# # f_sql_cmd = open( os.path.join(context['temp_directory'],'ext_conn-pool-results.sql'), 'w') # f_sql_cmd.write( sql_check ) # flush_and_close( f_sql_cmd ) -# +# # f_sql_log = open( os.path.splitext(f_sql_cmd.name)[0] + '.log', 'w') -# +# # ################################## # ### f i n a l q u e r y ### # ################################## # subprocess.call( [ os.path.join( FB_BINS, 'isql'), '-q', '-pag', '99999', '-i', f_sql_cmd.name ], stdout=f_sql_log, stderr=subprocess.STDOUT ) -# +# # flush_and_close( f_sql_log ) -# +# # with open(f_sql_log.name) as f: # for line in f: # print(line) -# +# # #< indent 'f_init_err == 0' -# +# # ######################################################################## # ### S T O P T E M P O R A R Y F B I N S T A N C E ### # ######################################################################## -# +# # fb_process.terminate() # time.sleep(2) -# +# # shutil.move( fbconf_bak, fbconf_cur ) # shutil.move( dbconf_bak, dbconf_cur ) -# +# # if f_init_err > 0: # break -# +# # if check_server('localhost', TMP_FREE_PORT): # print('### ERROR ### FB instance was not yet terminated. Increase delay and repeat!') -# +# # # IDLE 1 4 - Super FREQ 4 7 TAKE FROM POOL: IDLE -> ACTIVE 2 4 - Super FREQ 1 8 BYE 0 6 - Super FREQ 4 9 RUN DML 1 5 - Super FREQ 4 10 MOVE INTO POOL: ACTIVE -> IDLE 1 5 - Super FREQ 4 11 TAKE FROM POOL: IDLE -> ACTIVE 2 5 - Super FREQ 2 12 BYE 0 7 - Super FREQ 4 13 RUN DML 1 6 - Super FREQ 4 14 MOVE INTO POOL: ACTIVE -> IDLE 1 6 - Super FREQ 4 15 TAKE FROM POOL: IDLE -> ACTIVE 2 6 - Super FREQ 4 16 BYE 0 0 - Super FREQ 3 17 BYE 0 0 - Super RARE 1 1 NEW 1 0 - Super RARE 2 2 NEW 1 1 - Super RARE 3 3 NEW 1 2 - Super RARE 4 4 NEW 1 3 - Super RARE 4 5 RUN DML 1 4 - Super RARE 4 6 MOVE INTO POOL: ACTIVE -> IDLE 1 4 - Super RARE 4 7 TAKE FROM POOL: IDLE -> ACTIVE 2 4 - Super RARE 4 8 BYE 0 0 - Super RARE 1 9 BYE 0 0 - Super RARE 5 10 NEW 1 0 - Super RARE 5 11 RUN DML 1 1 - Super RARE 5 12 MOVE INTO POOL: ACTIVE -> IDLE 1 1 - Super RARE 5 13 TAKE FROM POOL: IDLE -> ACTIVE 2 1 - Super RARE 5 14 BYE 0 0 - Super RARE 2 15 BYE 0 0 - Super RARE 6 16 NEW 1 0 - Super RARE 6 17 RUN DML 1 1 - Super RARE 6 18 MOVE INTO POOL: ACTIVE -> IDLE 1 1 - Super RARE 6 19 TAKE FROM POOL: IDLE -> ACTIVE 2 1 - Super RARE 6 20 BYE 0 0 - Super RARE 3 21 BYE 0 0 - SuperClassic FREQ 1 1 NEW 1 0 - SuperClassic FREQ 2 2 NEW 1 1 - SuperClassic FREQ 3 3 NEW 1 2 - SuperClassic FREQ 4 4 NEW 1 3 - SuperClassic FREQ 4 5 RUN DML 1 4 - SuperClassic FREQ 4 6 MOVE INTO POOL: ACTIVE -> IDLE 1 4 - SuperClassic FREQ 4 7 TAKE FROM POOL: IDLE -> ACTIVE 2 4 - SuperClassic FREQ 1 8 BYE 0 6 - SuperClassic FREQ 4 9 RUN DML 1 5 - SuperClassic FREQ 4 10 MOVE INTO POOL: ACTIVE -> IDLE 1 5 - SuperClassic FREQ 4 11 TAKE FROM POOL: IDLE -> ACTIVE 2 5 - SuperClassic FREQ 2 12 BYE 0 7 - SuperClassic FREQ 4 13 RUN DML 1 6 - SuperClassic FREQ 4 14 MOVE INTO POOL: ACTIVE -> IDLE 1 6 - SuperClassic FREQ 4 15 TAKE FROM POOL: IDLE -> ACTIVE 2 6 - SuperClassic FREQ 4 16 BYE 0 0 - SuperClassic FREQ 3 17 BYE 0 0 - SuperClassic RARE 1 1 NEW 1 0 - SuperClassic RARE 2 2 NEW 1 1 - SuperClassic RARE 3 3 NEW 1 2 - SuperClassic RARE 4 4 NEW 1 3 - SuperClassic RARE 4 5 RUN DML 1 4 - SuperClassic RARE 4 6 MOVE INTO POOL: ACTIVE -> IDLE 1 4 - SuperClassic RARE 4 7 TAKE FROM POOL: IDLE -> ACTIVE 2 4 - SuperClassic RARE 4 8 BYE 0 0 - SuperClassic RARE 1 9 BYE 0 0 - SuperClassic RARE 5 10 NEW 1 0 - SuperClassic RARE 5 11 RUN DML 1 1 - SuperClassic RARE 5 12 MOVE INTO POOL: ACTIVE -> IDLE 1 1 - SuperClassic RARE 5 13 TAKE FROM POOL: IDLE -> ACTIVE 2 1 - SuperClassic RARE 5 14 BYE 0 0 - SuperClassic RARE 2 15 BYE 0 0 - SuperClassic RARE 6 16 NEW 1 0 - SuperClassic RARE 6 17 RUN DML 1 1 - SuperClassic RARE 6 18 MOVE INTO POOL: ACTIVE -> IDLE 1 1 - SuperClassic RARE 6 19 TAKE FROM POOL: IDLE -> ACTIVE 2 1 - SuperClassic RARE 6 20 BYE 0 0 - SuperClassic RARE 3 21 BYE 0 0 - - Records affected: 76 -""" - -@pytest.mark.version('>=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/shadow/test_create_01.py b/tests/functional/shadow/test_create_01.py index 6556fcc9..195dab2b 100644 --- a/tests/functional/shadow/test_create_01.py +++ b/tests/functional/shadow/test_create_01.py @@ -1,31 +1,18 @@ #coding:utf-8 -# -# id: functional.shadow.create_01 -# title: CREATE SHADOW -# decription: -# CREATE SHADOW -# -# Dependencies: -# CREATE DATABASE -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 3.0 -# qmid: functional.shadow.create.create_shadow_01 + +""" +ID: shadow.create-01 +TITLE: CREATE SHADOW +DESCRIPTION: +FBTEST: functional.shadow.create_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ create shadow 1 '$(DATABASE_LOCATION)test_defaults.shd'; commit; -- SHOW DATABASE -- Removed from here because this test must verify only ability to create shadow. @@ -34,7 +21,7 @@ test_script_1 = """ select --right(trim(rdb$file_name), char_length('test_defaults.shd')) as file_name iif( replace(rdb$file_name,'\\','/') containing replace('$(DATABASE_LOCATION)','','/') - and + and upper(right( trim(rdb$file_name), char_length('test_defaults.shd') )) = upper('test_defaults.shd') ,'OK' ,'BAD: ' || rdb$file_name @@ -47,9 +34,9 @@ test_script_1 = """ from rdb$files; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ CHECK_SHD_FILE_NAME OK FILE_SEQUENCE 0 FILE_START 0 @@ -60,8 +47,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/shadow/test_create_02.py b/tests/functional/shadow/test_create_02.py index abb626f2..06828193 100644 --- a/tests/functional/shadow/test_create_02.py +++ b/tests/functional/shadow/test_create_02.py @@ -1,36 +1,23 @@ #coding:utf-8 -# -# id: functional.shadow.create_02 -# title: CREATE SHADOW -# decription: -# CREATE SHADOW -# -# Dependencies: -# CREATE DATABASE -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: functional.shadow.create.create_shadow_02 + +""" +ID: shadow.create-02 +TITLE: CREATE SHADOW +DESCRIPTION: +FBTEST: functional.shadow.create_02 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ create shadow 1 manual conditional '$(DATABASE_LOCATION)TEST.SHD' file '$(DATABASE_LOCATION)TEST.S00' starting at page 1000; commit; set list on; set count on; - select + select right(trim(rdb$file_name), char_length('test.s??')) as file_name ,rdb$file_sequence as file_sequence ,rdb$file_start as file_start @@ -40,9 +27,9 @@ test_script_1 = """ from rdb$files; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ FILE_NAME TEST.SHD FILE_SEQUENCE 0 FILE_START 0 @@ -60,9 +47,8 @@ expected_stdout_1 = """ Records affected: 2 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/syspriv/test_access_any_object.py b/tests/functional/syspriv/test_access_any_object.py index 0d3c3f5d..5c4efc80 100644 --- a/tests/functional/syspriv/test_access_any_object.py +++ b/tests/functional/syspriv/test_access_any_object.py @@ -1,39 +1,29 @@ #coding:utf-8 -# -# id: functional.syspriv.access_any_object -# title: Check ability to query, modify and deleting data plus add/drop constraints on any table. -# decription: -# We create two master-detail tables (under SYSDBA) and add some data to them. -# Then we connect as U01 who has system privilege to query and change (including deletion) data from ANY table. -# Under this user we first try to run DML statements (IUD) and after - to remove some old and create new -# constraint. -# -# Checked on WI-T4.0.0.267. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: syspriv.access-any-object +TITLE: Check ability to query, modify and deleting data plus add/drop constraints on any table +DESCRIPTION: + We create two master-detail tables (under SYSDBA) and add some data to them. + Then we connect as U01 who has system privilege to query and change (including deletion) data from ANY table. + Under this user we first try to run DML statements (IUD) and after - to remove some old and create new + constraint. +FBTEST: functional.syspriv.access_any_object +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() +test_user = user_factory('db', name='u01', do_not_create=True) -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set wng off; set bail on; set list on; create or alter view v_check as - select + select current_user as who_ami ,r.rdb$role_name ,rdb$role_in_use(r.rdb$role_name) as RDB_ROLE_IN_USE @@ -43,11 +33,11 @@ test_script_1 = """ grant select on v_check to public; recreate table tdetl( - id int, - pid int, - x int, - y int, - constraint tdetl_pk primary key(id), + id int, + pid int, + x int, + y int, + constraint tdetl_pk primary key(id), constraint tdetl_x_unq unique(x), constraint tdetl_y_gz check(y>0) ); @@ -58,7 +48,7 @@ test_script_1 = """ insert into tdetl(id, pid, x, y) values(10, 1, 111, 7); insert into tdetl(id, pid, x, y) values(20, 1, 222, 6); insert into tdetl(id, pid, x, y) values(30, 1, 333, 5); - commit; + commit; create or alter user u01 password '123' revoke admin role; revoke all on all from u01; @@ -74,10 +64,10 @@ test_script_1 = """ commit; -- Add/change/delete non-system records in RDB$TYPES - create role role_for_ddl_dml_any_obj - set system privileges to - SELECT_ANY_OBJECT_IN_DATABASE, - MODIFY_ANY_OBJECT_IN_DATABASE, + create role role_for_ddl_dml_any_obj + set system privileges to + SELECT_ANY_OBJECT_IN_DATABASE, + MODIFY_ANY_OBJECT_IN_DATABASE, ACCESS_ANY_OBJECT_IN_DATABASE; commit; grant default role_for_ddl_dml_any_obj to user u01; @@ -94,7 +84,7 @@ test_script_1 = """ delete from tdetl order by id rows 1; commit; - alter table tdetl + alter table tdetl add constraint tdetl_fk foreign key(pid) references tmain using index tdetl_fk_pid ,drop constraint tdetl_x_unq ,drop constraint tdetl_y_gz @@ -108,14 +98,14 @@ test_script_1 = """ insert into tdetl(id, pid, x, y) values(40, 1, 222, -777); -- should NOT issue error commit; - connect '$(DSN)' user sysdba password 'masterkey'; - drop user u01; - commit; + --connect '$(DSN)' user sysdba password 'masterkey'; + --drop user u01; + --commit; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ WHO_AMI U01 RDB$ROLE_NAME RDB$ADMIN RDB_ROLE_IN_USE @@ -143,7 +133,8 @@ expected_stdout_1 = """ Records affected: 1 Records affected: 1 """ -expected_stderr_1 = """ + +expected_stderr = """ Statement failed, SQLSTATE = 23000 violation of FOREIGN KEY constraint "TDETL_FK" on table "TDETL" -Foreign key reference target does not exist @@ -151,11 +142,9 @@ expected_stderr_1 = """ """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action, test_user): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/syspriv/test_access_shutdown_database.py b/tests/functional/syspriv/test_access_shutdown_database.py index eee4d126..487c1c4c 100644 --- a/tests/functional/syspriv/test_access_shutdown_database.py +++ b/tests/functional/syspriv/test_access_shutdown_database.py @@ -1,35 +1,33 @@ #coding:utf-8 -# -# id: functional.syspriv.access_shutdown_database -# title: Check ability to access to database in shutdown single mode as non-sysdba. -# decription: -# We create role with granting system privilege ACCESS_SHUTDOWN_DATABASE to it. -# Then we create user and make this role as DEFAULT to him. -# Then we check that user U01: -# 1. can NOT CHANGE database attribute, i.e. can NOT shutdown or bring online database; -# 2. CAN make attachment to DB in 'shutdown single maintenace' mode and select smth from there. -# Also, we check that while U01 is connected, NO other attachment is possible. -# This is done by trying to make ES EDS as SYSDBA - this should fail with "335544528 : database shutdown". -# -# Checked on 4.0.0.267. See also letter from Alex 23.06.2016 11:46. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: syspriv.access-shutdown-database +TITLE: Check ability to access to database in shutdown single mode as non-sysdba +DESCRIPTION: + We create role with granting system privilege ACCESS_SHUTDOWN_DATABASE to it. + Then we create user and make this role as DEFAULT to him. + Then we check that user U01: + 1. can NOT CHANGE database attribute, i.e. can NOT shutdown or bring online database; + 2. CAN make attachment to DB in 'shutdown single maintenace' mode and select smth from there. + Also, we check that while U01 is connected, NO other attachment is possible. + This is done by trying to make ES EDS as SYSDBA - this should fail with "335544528 : database shutdown". + + Checked on 4.0.0.267. See also letter from Alex 23.06.2016 11:46. +FBTEST: functional.syspriv.access_shutdown_database +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +substitutions = [('.* access to database.*', 'access to database'), ('.* -Some database.*', ''), + ('335544528 : database.* shutdown', '335544528 : database shutdown'), + ('Data source : Firebird::localhost:.*', 'Data source : Firebird::localhost:'), + ('-At block line: [\\d]+, col: [\\d]+', '-At block line')] -substitutions_1 = [('.* access to database.*', 'access to database'), ('.* -Some database.*', ''), ('335544528 : database.* shutdown', '335544528 : database shutdown'), ('Data source : Firebird::localhost:.*', 'Data source : Firebird::localhost:'), ('-At block line: [\\d]+, col: [\\d]+', '-At block line')] - -init_script_1 = """ +init_script = """ set wng off; create or alter view v_check as - select + select current_user as who_ami ,r.rdb$role_name ,rdb$role_in_use(r.rdb$role_name) as RDB_ROLE_IN_USE @@ -82,200 +80,18 @@ init_script_1 = """ set term ;^ commit; - create role role_for_access_shutdown_db + create role role_for_access_shutdown_db set system privileges to ACCESS_SHUTDOWN_DATABASE; -- CHANGE_SHUTDOWN_MODE, USE_GFIX_UTILITY, IGNORE_DB_TRIGGERS; commit; grant default role_for_access_shutdown_db to user u01; commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# -# import os -# import subprocess -# from subprocess import Popen -# -# db_file=db_conn.database_name -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# # Change DB state to 'shutdown single maintenance': -# ################################################## -# -# f_dbshut_u01_log=open( os.path.join(context['temp_directory'],'tmp_dbshut_u01.log'), 'w') -# f_dbshut_u01_err=open( os.path.join(context['temp_directory'],'tmp_dbshut_u01.err'), 'w') -# -# # Must FAIL when we do this as non-sysdba: -# -# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr", -# "user", "U01", "password", "123", -# "action_properties", -# "dbname", db_file, -# "prp_shutdown_mode", "prp_sm_single", -# "prp_force_shutdown", "0" -# ], -# stdout=f_dbshut_u01_log, -# stderr=f_dbshut_u01_err -# ) -# -# flush_and_close( f_dbshut_u01_log ) -# flush_and_close( f_dbshut_u01_err ) -# -# #------------------------------------------------- -# # Must PASS because 2nd time se do this as SYSDBA: -# -# f_dbshut_sys_log=open( os.path.join(context['temp_directory'],'tmp_dbshut_sys.log'), 'w') -# f_dbshut_sys_err=open( os.path.join(context['temp_directory'],'tmp_dbshut_sys.err'), 'w') -# -# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr", -# "user", user_name, "password", user_password, -# "action_properties", -# "dbname", db_file, -# "prp_shutdown_mode", "prp_sm_single", -# "prp_force_shutdown", "0" -# ], -# stdout=f_dbshut_sys_log, -# stderr=f_dbshut_sys_err -# ) -# flush_and_close( f_dbshut_sys_log ) -# flush_and_close( f_dbshut_sys_err ) -# -# #--------------------------------------------------------------------------------------------------- -# -# sql_chk=''' -# set list on; -# set count on; -# commit; -# select v.* from v_check v; -# select m.mon$shutdown_mode from mon$database m; -# select a.att_user, att_prot from att_log a; -# set term ^; -# execute block returns( who_else_here rdb$user ) as -# declare another_user varchar(31); -# begin -# execute statement 'select current_user from rdb$database' -# on external 'localhost:' || rdb$get_context('SYSTEM','DB_NAME') -# as user 'SYSDBA' password 'masterkey' -# into who_else_here; -# -# suspend; -# end -# ^ -# set term ;^ -# ''' -# -# # Check ability to connect as NON sysdba to database that is in 'shutdown single' mode (mon$shutdown_mode=2) -# ########################## -# runProgram('isql',[dsn, '-user','U01', '-pas', '123'], sql_chk) -# -# -# # Return database to online state: -# ################################## -# -# f_online_u01_log=open( os.path.join(context['temp_directory'],'tmp_online_u01.log'), 'w') -# f_online_u01_err=open( os.path.join(context['temp_directory'],'tmp_online_u01.err'), 'w') -# -# # Must FAIL when we do this as non-sysdba: -# -# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr", -# "user", "U01", "password", "123", -# "action_properties", -# "dbname", db_file, -# "prp_online_mode", "prp_sm_normal" -# ], -# stdout=f_online_u01_log, -# stderr=f_online_u01_err -# ) -# flush_and_close( f_online_u01_log ) -# flush_and_close( f_online_u01_err ) -# -# #------------------------------------------------- -# # Must PASS because 2nd time se do this as SYSDBA: -# -# f_online_sys_log=open( os.path.join(context['temp_directory'],'tmp_online_sys.log'), 'w') -# f_online_sys_err=open( os.path.join(context['temp_directory'],'tmp_online_sys.err'), 'w') -# -# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr", -# "user", user_name, "password", user_password, -# "action_properties", -# "dbname", db_file, -# "prp_online_mode", "prp_sm_normal" -# ], -# stdout=f_online_sys_log, -# stderr=f_online_sys_err -# ) -# -# flush_and_close( f_online_sys_log ) -# flush_and_close( f_online_sys_err ) -# -# # ------------------------------------------------------------------------ -# -# # Check: -# ######## -# -# f_list=( -# f_dbshut_u01_log, -# f_dbshut_u01_err, -# f_dbshut_sys_log, -# f_dbshut_sys_err, -# f_online_u01_log, -# f_online_u01_err, -# f_online_sys_log, -# f_online_sys_err -# ) -# -# # Only f_dbshut_u01_err and f_online_u01_err must contain single message. -# # All other files from f_list must be EMPTY: -# -# for i in range(len(f_list)): -# with open( f_list[i].name,'r') as f: -# for line in f: -# print( os.path.basename( f_list[i].name ) + ' : ' + line) -# os.remove(f_list[i].name) -# -# # cleanup: drop user 'U01' -# ########## -# runProgram('isql',[dsn, '-user', user_name, '-pas', user_password], 'drop user u01; commit;') -# -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) +act = python_act('db', substitutions=substitutions) -expected_stderr_1 = """ +expected_stderr = """ Statement failed, SQLSTATE = 42000 Execute statement error at attach : 335544528 : database shutdown @@ -283,7 +99,7 @@ expected_stderr_1 = """ -At block line: 4, col: 9 """ -expected_stdout_1 = """ +expected_stdout = """ WHO_AMI U01 RDB$ROLE_NAME RDB$ADMIN RDB_ROLE_IN_USE @@ -308,9 +124,190 @@ expected_stdout_1 = """ tmp_online_u01.err : no permission for bring online access to database """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# test_script_1 +#--- +# +# import os +# import subprocess +# from subprocess import Popen +# +# db_file=db_conn.database_name +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# # Change DB state to 'shutdown single maintenance': +# ################################################## +# +# f_dbshut_u01_log=open( os.path.join(context['temp_directory'],'tmp_dbshut_u01.log'), 'w') +# f_dbshut_u01_err=open( os.path.join(context['temp_directory'],'tmp_dbshut_u01.err'), 'w') +# +# # Must FAIL when we do this as non-sysdba: +# +# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr", +# "user", "U01", "password", "123", +# "action_properties", +# "dbname", db_file, +# "prp_shutdown_mode", "prp_sm_single", +# "prp_force_shutdown", "0" +# ], +# stdout=f_dbshut_u01_log, +# stderr=f_dbshut_u01_err +# ) +# +# flush_and_close( f_dbshut_u01_log ) +# flush_and_close( f_dbshut_u01_err ) +# +# #------------------------------------------------- +# # Must PASS because 2nd time se do this as SYSDBA: +# +# f_dbshut_sys_log=open( os.path.join(context['temp_directory'],'tmp_dbshut_sys.log'), 'w') +# f_dbshut_sys_err=open( os.path.join(context['temp_directory'],'tmp_dbshut_sys.err'), 'w') +# +# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr", +# "user", user_name, "password", user_password, +# "action_properties", +# "dbname", db_file, +# "prp_shutdown_mode", "prp_sm_single", +# "prp_force_shutdown", "0" +# ], +# stdout=f_dbshut_sys_log, +# stderr=f_dbshut_sys_err +# ) +# flush_and_close( f_dbshut_sys_log ) +# flush_and_close( f_dbshut_sys_err ) +# +# #--------------------------------------------------------------------------------------------------- +# +# sql_chk=''' +# set list on; +# set count on; +# commit; +# select v.* from v_check v; +# select m.mon$shutdown_mode from mon$database m; +# select a.att_user, att_prot from att_log a; +# set term ^; +# execute block returns( who_else_here rdb$user ) as +# declare another_user varchar(31); +# begin +# execute statement 'select current_user from rdb$database' +# on external 'localhost:' || rdb$get_context('SYSTEM','DB_NAME') +# as user 'SYSDBA' password 'masterkey' +# into who_else_here; +# +# suspend; +# end +# ^ +# set term ;^ +# ''' +# +# # Check ability to connect as NON sysdba to database that is in 'shutdown single' mode (mon$shutdown_mode=2) +# ########################## +# runProgram('isql',[dsn, '-user','U01', '-pas', '123'], sql_chk) +# +# +# # Return database to online state: +# ################################## +# +# f_online_u01_log=open( os.path.join(context['temp_directory'],'tmp_online_u01.log'), 'w') +# f_online_u01_err=open( os.path.join(context['temp_directory'],'tmp_online_u01.err'), 'w') +# +# # Must FAIL when we do this as non-sysdba: +# +# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr", +# "user", "U01", "password", "123", +# "action_properties", +# "dbname", db_file, +# "prp_online_mode", "prp_sm_normal" +# ], +# stdout=f_online_u01_log, +# stderr=f_online_u01_err +# ) +# flush_and_close( f_online_u01_log ) +# flush_and_close( f_online_u01_err ) +# +# #------------------------------------------------- +# # Must PASS because 2nd time se do this as SYSDBA: +# +# f_online_sys_log=open( os.path.join(context['temp_directory'],'tmp_online_sys.log'), 'w') +# f_online_sys_err=open( os.path.join(context['temp_directory'],'tmp_online_sys.err'), 'w') +# +# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr", +# "user", user_name, "password", user_password, +# "action_properties", +# "dbname", db_file, +# "prp_online_mode", "prp_sm_normal" +# ], +# stdout=f_online_sys_log, +# stderr=f_online_sys_err +# ) +# +# flush_and_close( f_online_sys_log ) +# flush_and_close( f_online_sys_err ) +# +# # ------------------------------------------------------------------------ +# +# # Check: +# ######## +# +# f_list=( +# f_dbshut_u01_log, +# f_dbshut_u01_err, +# f_dbshut_sys_log, +# f_dbshut_sys_err, +# f_online_u01_log, +# f_online_u01_err, +# f_online_sys_log, +# f_online_sys_err +# ) +# +# # Only f_dbshut_u01_err and f_online_u01_err must contain single message. +# # All other files from f_list must be EMPTY: +# +# for i in range(len(f_list)): +# with open( f_list[i].name,'r') as f: +# for line in f: +# print( os.path.basename( f_list[i].name ) + ' : ' + line) +# os.remove(f_list[i].name) +# +# # cleanup: drop user 'U01' +# ########## +# runProgram('isql',[dsn, '-user', user_name, '-pas', user_password], 'drop user u01; commit;') +# +#--- diff --git a/tests/functional/syspriv/test_change_header_settings.py b/tests/functional/syspriv/test_change_header_settings.py index e4c1f371..91735d81 100644 --- a/tests/functional/syspriv/test_change_header_settings.py +++ b/tests/functional/syspriv/test_change_header_settings.py @@ -1,32 +1,25 @@ #coding:utf-8 -# -# id: functional.syspriv.change_header_settings -# title: Check ability to change some database header attributes by non-sysdba user who is granted with necessary system privileges. -# decription: -# Checked on 4.0.0.262. -# NB: attributes should be changed one at a time, i.e. one fbsvcmgr call should change only ONE atribute. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: syspriv.change-header-settings +TITLE: Check ability to change some database header attributes by non-sysdba user who + is granted with necessary system privileges +DESCRIPTION: + NB: attributes should be changed one at a time, i.e. one fbsvcmgr call should change only ONE atribute. +FBTEST: functional.syspriv.change_header_settings +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """ +init_script = """ set wng off; set bail on; set list on; set count on; create or alter view v_check as - select + select current_user as who_ami ,r.rdb$role_name ,rdb$role_in_use(r.rdb$role_name) as RDB_ROLE_IN_USE @@ -84,123 +77,18 @@ init_script_1 = """ -- NB: Privilege 'IGNORE_DB_TRIGGERS' is needed when we return database to ONLINE -- and this DB has DB-level trigger. - create role role_for_change_header_settings + create role role_for_change_header_settings set system privileges to CHANGE_HEADER_SETTINGS, USE_GFIX_UTILITY, IGNORE_DB_TRIGGERS; commit; grant default role_for_change_header_settings to user u01; commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# -# import os -# import subprocess -# -# db_file = db_conn.database_name -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# -# # Check that current non-sysdba user: -# # 1) can SKIP db-level trigger firing: -# # 2) IS granted with role 'role_for_change_header_settings': -# -# runProgram('isql',[dsn,'-nod','-user','U01', '-pas', '123'], 'set list on; set count on; select * from att_log; select * from v_check;') -# -# f_hdr_props_log = open( os.path.join(context['temp_directory'],'tmp_syspriv_hdr_props.log'), 'w') -# subprocess.call( [context['fbsvcmgr_path'],"localhost:service_mgr", -# "user","U01", "password", "123", -# "action_properties", -# "dbname", db_file, -# "prp_sweep_interval", "54321", -# ], -# stdout=f_hdr_props_log, -# stderr=subprocess.STDOUT -# ) -# -# subprocess.call( [context['fbsvcmgr_path'],"localhost:service_mgr", -# "user","U01", "password", "123", -# "action_properties", -# "dbname", db_file, -# "prp_set_sql_dialect", "1", -# ], -# stdout=f_hdr_props_log, -# stderr=subprocess.STDOUT -# ) -# -# subprocess.call( [context['fbsvcmgr_path'],"localhost:service_mgr", -# "user","U01", "password", "123", -# "action_properties", -# "dbname", db_file, -# "prp_write_mode", "prp_wm_async" -# ], -# stdout=f_hdr_props_log, -# stderr=subprocess.STDOUT -# ) -# -# flush_and_close( f_hdr_props_log ) -# -# # Checks -# ######## -# -# sql_chk=''' -# set list on; -# set count on; -# select m.mon$sweep_interval, m.mon$sql_dialect, m.mon$forced_writes from mon$database m; -# ''' -# runProgram('isql',[dsn,'-nod','-user','U01', '-pas', '123'], sql_chk) -# -# -# # Must be EMPTY: -# ################ -# with open( f_hdr_props_log.name,'r') as f: -# for line in f: -# print('DB SHUTDOWN LOG: '+line.upper()) -# -# -# # Cleanup: -# ########## -# cleanup( (f_hdr_props_log,) ) -# -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) +act = python_act('db') -expected_stdout_1 = """ +expected_stdout = """ Records affected: 0 WHO_AMI U01 RDB$ROLE_NAME RDB$ADMIN @@ -217,9 +105,113 @@ expected_stdout_1 = """ Records affected: 1 """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# test_script_1 +#--- +# +# import os +# import subprocess +# +# db_file = db_conn.database_name +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# +# # Check that current non-sysdba user: +# # 1) can SKIP db-level trigger firing: +# # 2) IS granted with role 'role_for_change_header_settings': +# +# runProgram('isql',[dsn,'-nod','-user','U01', '-pas', '123'], 'set list on; set count on; select * from att_log; select * from v_check;') +# +# f_hdr_props_log = open( os.path.join(context['temp_directory'],'tmp_syspriv_hdr_props.log'), 'w') +# subprocess.call( [context['fbsvcmgr_path'],"localhost:service_mgr", +# "user","U01", "password", "123", +# "action_properties", +# "dbname", db_file, +# "prp_sweep_interval", "54321", +# ], +# stdout=f_hdr_props_log, +# stderr=subprocess.STDOUT +# ) +# +# subprocess.call( [context['fbsvcmgr_path'],"localhost:service_mgr", +# "user","U01", "password", "123", +# "action_properties", +# "dbname", db_file, +# "prp_set_sql_dialect", "1", +# ], +# stdout=f_hdr_props_log, +# stderr=subprocess.STDOUT +# ) +# +# subprocess.call( [context['fbsvcmgr_path'],"localhost:service_mgr", +# "user","U01", "password", "123", +# "action_properties", +# "dbname", db_file, +# "prp_write_mode", "prp_wm_async" +# ], +# stdout=f_hdr_props_log, +# stderr=subprocess.STDOUT +# ) +# +# flush_and_close( f_hdr_props_log ) +# +# # Checks +# ######## +# +# sql_chk=''' +# set list on; +# set count on; +# select m.mon$sweep_interval, m.mon$sql_dialect, m.mon$forced_writes from mon$database m; +# ''' +# runProgram('isql',[dsn,'-nod','-user','U01', '-pas', '123'], sql_chk) +# +# +# # Must be EMPTY: +# ################ +# with open( f_hdr_props_log.name,'r') as f: +# for line in f: +# print('DB SHUTDOWN LOG: '+line.upper()) +# +# +# # Cleanup: +# ########## +# cleanup( (f_hdr_props_log,) ) +# +#--- diff --git a/tests/functional/syspriv/test_change_mapping_rules.py b/tests/functional/syspriv/test_change_mapping_rules.py index f30bfc74..885a2e81 100644 --- a/tests/functional/syspriv/test_change_mapping_rules.py +++ b/tests/functional/syspriv/test_change_mapping_rules.py @@ -1,29 +1,22 @@ #coding:utf-8 -# -# id: functional.syspriv.change_mapping_rules -# title: Check ability to manage auth mappings -# decription: -# Verify ability to issue CREATE / ALTER / DROP MAPPING by non-sysdba user. -# Checked on 5.0.0.133 SS/CS, 4.0.1.2563 SS/CS -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: syspriv.change-mapping-rules +TITLE: Check ability to manage auth mappings +DESCRIPTION: + Verify ability to issue CREATE / ALTER / DROP MAPPING by non-sysdba user. +FBTEST: functional.syspriv.change_mapping_rules +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('.*Global mapping.*', '')] +test_user = user_factory('db', name='john_smith_mapping_manager', do_not_create=True) +test_role = role_factory('db', name='tmp_role_for_change_mapping', do_not_create=True) -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set wng off; -- set bail on; set list on; @@ -58,28 +51,27 @@ test_script_1 = """ commit; show mapping; - + drop global mapping tmp_syspriv_global_map; drop mapping tmp_syspriv_local_map; commit; - connect '$(DSN)' user sysdba password 'masterkey'; - drop user john_smith_mapping_manager; - drop role tmp_role_for_change_mapping; - commit; + --connect '$(DSN)' user sysdba password 'masterkey'; + --drop user john_smith_mapping_manager; + --drop role tmp_role_for_change_mapping; + --commit; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('.*Global mapping.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ TMP_SYSPRIV_LOCAL_MAP USING PLUGIN SRP FROM ANY USER TO USER *** Global mapping *** TMP_SYSPRIV_GLOBAL_MAP USING PLUGIN SRP FROM ANY USER TO USER """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action, test_user, test_role): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/syspriv/test_change_shutdown_mode.py b/tests/functional/syspriv/test_change_shutdown_mode.py index 60462c9f..06391107 100644 --- a/tests/functional/syspriv/test_change_shutdown_mode.py +++ b/tests/functional/syspriv/test_change_shutdown_mode.py @@ -1,31 +1,24 @@ #coding:utf-8 -# -# id: functional.syspriv.change_shutdown_mode -# title: Check ability to change database shutdown mode by non-sysdba user who is granted with necessary system privileges. -# decription: -# Checked on 4.0.0.262. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: syspriv.change-shutdown-mode +TITLE: Check ability to change database shutdown mode by non-sysdba user who is + granted with necessary system privileges +DESCRIPTION: +FBTEST: functional.syspriv.change_shutdown_mode +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """ +init_script = """ set wng off; set bail on; set list on; set count on; create or alter view v_check as - select + select current_user as who_ami ,r.rdb$role_name ,rdb$role_in_use(r.rdb$role_name) as RDB_ROLE_IN_USE @@ -85,124 +78,18 @@ init_script_1 = """ -- Add/change/delete non-system records in RDB$TYPES. -- NB: Privilege 'IGNORE_DB_TRIGGERS' is needed when we return database to ONLINE -- and this DB has DB-level trigger. - create role role_for_change_shutdown_mode + create role role_for_change_shutdown_mode set system privileges to CHANGE_SHUTDOWN_MODE, USE_GFIX_UTILITY, IGNORE_DB_TRIGGERS; commit; grant default role_for_change_shutdown_mode to user u01; commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# -# import os -# import subprocess -# -# db_file = db_conn.database_name -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# -# # Check that current non-sysdba user: -# # 1) can SKIP db-level trigger firing: -# # 2) IS granted with role 'role_for_change_shutdown_mode': -# -# runProgram('isql',[dsn,'-nod','-user','U01', '-pas', '123'], 'set list on; set count on; select * from att_log; select * from v_check;') -# -# f_shutdown_log = open( os.path.join(context['temp_directory'],'tmp_syspriv_dbshut.log'), 'w') -# subprocess.call( [context['fbsvcmgr_path'],"localhost:service_mgr", -# "user","U01", "password", "123", -# "action_properties", -# "dbname", db_file, -# "prp_shutdown_mode", "prp_sm_full", "prp_force_shutdown", "0" -# ], -# stdout=f_shutdown_log, -# stderr=subprocess.STDOUT -# ) -# flush_and_close( f_shutdown_log ) -# -# f_dbheader_log = open( os.path.join(context['temp_directory'],'tmp_syspriv_dbhead.log'), 'w') -# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr", -# "user", "U01", "password" , "123", -# "action_db_stats", "sts_hdr_pages", -# "dbname", db_file -# ], -# stdout=f_dbheader_log, -# stderr=subprocess.STDOUT -# ) -# flush_and_close( f_dbheader_log ) -# -# f_ret2online_log = open( os.path.join(context['temp_directory'],'tmp_syspriv_dbonline.log'), 'w') -# subprocess.call( [context['fbsvcmgr_path'], "localhost:service_mgr", -# "user","U01", "password", "123", -# "action_properties", "prp_db_online", -# "dbname", db_file, -# ], -# stdout = f_ret2online_log, -# stderr = subprocess.STDOUT -# ) -# flush_and_close( f_ret2online_log ) -# -# # Must be EMPTY: -# with open( f_shutdown_log.name,'r') as f: -# for line in f: -# print('DB SHUTDOWN LOG: '+line.upper()) -# -# -# # Must contain: "Attributes force write, full shutdown" -# with open( f_dbheader_log.name,'r') as f: -# for line in f: -# if 'Attributes' in line: -# print('DB HEADER: ' + ' '.join(line.split()).upper() ) -# -# -# # Must be EMPTY: -# with open( f_ret2online_log.name,'r') as f: -# for line in f: -# print('DB ONLINE LOG: '+line.upper()) -# -# -# # Cleanup: -# ########## -# cleanup( (f_shutdown_log, f_dbheader_log, f_ret2online_log) ) -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) +act = python_act('db') -expected_stdout_1 = """ +expected_stdout = """ Records affected: 0 WHO_AMI U01 RDB$ROLE_NAME RDB$ADMIN @@ -216,9 +103,114 @@ expected_stdout_1 = """ DB HEADER: ATTRIBUTES FORCE WRITE, FULL SHUTDOWN """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# test_script_1 +#--- +# +# import os +# import subprocess +# +# db_file = db_conn.database_name +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# +# # Check that current non-sysdba user: +# # 1) can SKIP db-level trigger firing: +# # 2) IS granted with role 'role_for_change_shutdown_mode': +# +# runProgram('isql',[dsn,'-nod','-user','U01', '-pas', '123'], 'set list on; set count on; select * from att_log; select * from v_check;') +# +# f_shutdown_log = open( os.path.join(context['temp_directory'],'tmp_syspriv_dbshut.log'), 'w') +# subprocess.call( [context['fbsvcmgr_path'],"localhost:service_mgr", +# "user","U01", "password", "123", +# "action_properties", +# "dbname", db_file, +# "prp_shutdown_mode", "prp_sm_full", "prp_force_shutdown", "0" +# ], +# stdout=f_shutdown_log, +# stderr=subprocess.STDOUT +# ) +# flush_and_close( f_shutdown_log ) +# +# f_dbheader_log = open( os.path.join(context['temp_directory'],'tmp_syspriv_dbhead.log'), 'w') +# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr", +# "user", "U01", "password" , "123", +# "action_db_stats", "sts_hdr_pages", +# "dbname", db_file +# ], +# stdout=f_dbheader_log, +# stderr=subprocess.STDOUT +# ) +# flush_and_close( f_dbheader_log ) +# +# f_ret2online_log = open( os.path.join(context['temp_directory'],'tmp_syspriv_dbonline.log'), 'w') +# subprocess.call( [context['fbsvcmgr_path'], "localhost:service_mgr", +# "user","U01", "password", "123", +# "action_properties", "prp_db_online", +# "dbname", db_file, +# ], +# stdout = f_ret2online_log, +# stderr = subprocess.STDOUT +# ) +# flush_and_close( f_ret2online_log ) +# +# # Must be EMPTY: +# with open( f_shutdown_log.name,'r') as f: +# for line in f: +# print('DB SHUTDOWN LOG: '+line.upper()) +# +# +# # Must contain: "Attributes force write, full shutdown" +# with open( f_dbheader_log.name,'r') as f: +# for line in f: +# if 'Attributes' in line: +# print('DB HEADER: ' + ' '.join(line.split()).upper() ) +# +# +# # Must be EMPTY: +# with open( f_ret2online_log.name,'r') as f: +# for line in f: +# print('DB ONLINE LOG: '+line.upper()) +# +# +# # Cleanup: +# ########## +# cleanup( (f_shutdown_log, f_dbheader_log, f_ret2online_log) ) +#--- diff --git a/tests/functional/syspriv/test_create_database.py b/tests/functional/syspriv/test_create_database.py index 04de694f..ec91a1c8 100644 --- a/tests/functional/syspriv/test_create_database.py +++ b/tests/functional/syspriv/test_create_database.py @@ -1,24 +1,18 @@ #coding:utf-8 -# -# id: functional.syspriv.create_database -# title: Check ability to CREATE database by non-sysdba user who is granted with necessary system privilege. -# decription: -# Checked on 5.0.0.133 SS/CS; 4.0.1.2563 SS/CS. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: syspriv.create-database +TITLE: Check ability to CREATE database by non-sysdba user who is granted with necessary system privilege +DESCRIPTION: +FBTEST: functional.syspriv.create_database +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +substitutions = [('DB_NAME.*FUNCTIONAL.SYSPRIV.CREATE_DATABASE.TMP', 'DB_NAME FUNCTIONAL.SYSPRIV.DROP_DATABASE.TMP')] -substitutions_1 = [('DB_NAME.*FUNCTIONAL.SYSPRIV.CREATE_DATABASE.TMP', 'DB_NAME FUNCTIONAL.SYSPRIV.DROP_DATABASE.TMP')] - -init_script_1 = """ +init_script = """ set wng off; set bail on; set list on; @@ -47,35 +41,50 @@ init_script_1 = """ commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) + +act = python_act('db', substitutions=substitutions) + +expected_stdout = """ + DB_NAME C:\\FBTESTING\\QA\\FBT-REPO\\TMP\\FUNCTIONAL.SYSPRIV.CREATE_DATABASE.TMP + WHO_AMI JOHN_SMITH_DB_CREATOR + RDB$ROLE_NAME RDB$ADMIN + RDB_ROLE_IN_USE + RDB$SYSTEM_PRIVILEGES FFFFFFFFFFFFFFFF +""" + +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=4.0') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import subprocess # import time -# +# # db_pref = os.path.splitext(db_conn.database_name)[0] # db_conn.close() -# +# # #-------------------------------------------- -# +# # def flush_and_close( file_handle ): # # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and +# # If you're starting with a Python file object f, +# # first do f.flush(), and # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # global os -# +# # file_handle.flush() # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # # otherwise: "OSError: [Errno 9] Bad file descriptor"! # os.fsync(file_handle.fileno()) # file_handle.close() -# +# # #-------------------------------------------- -# +# # def cleanup( f_names_list ): # global os # for f in f_names_list: @@ -86,63 +95,47 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # else: # print('Unrecognized type of element:', f, ' - can not be treated as file.') # del_name = None -# +# # if del_name and os.path.isfile( del_name ): # os.remove( del_name ) -# +# # #-------------------------------------------- -# +# # fdb_test = db_pref+'.tmp' -# +# # cleanup( fdb_test, ) -# +# # # Check that non-sysdba user can connect and DROP database # ####### # sql_chk=''' -# set list on; -# +# set list on; +# # create database 'localhost:%(fdb_test)s' user john_smith_db_creator password '123'; # commit; -# -# select +# +# select # upper(mon$database_name) as db_name # ,current_user as who_ami # ,r.rdb$role_name # ,rdb$role_in_use(r.rdb$role_name) as RDB_ROLE_IN_USE # ,r.rdb$system_privileges # from mon$database m cross join rdb$roles r; -# +# # commit; -# +# # connect 'localhost:%(fdb_test)s' user sysdba password 'masterkey'; # drop user john_smith_db_creator using plugin Srp; # commit; # ''' % locals() -# +# # runProgram('isql',['-q'], sql_chk) -# +# # if not os.path.isfile(fdb_test): # print('ERROR WHILE CREATE DATABASE: FILE NOT FOUND.') -# +# # # Cleanup: # ########## # time.sleep(1) # cleanup( (fdb_test,) ) -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - DB_NAME C:\\FBTESTING\\QA\\FBT-REPO\\TMP\\FUNCTIONAL.SYSPRIV.CREATE_DATABASE.TMP - WHO_AMI JOHN_SMITH_DB_CREATOR - RDB$ROLE_NAME RDB$ADMIN - RDB_ROLE_IN_USE - RDB$SYSTEM_PRIVILEGES FFFFFFFFFFFFFFFF -""" - -@pytest.mark.version('>=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/syspriv/test_create_privileged_roles.py b/tests/functional/syspriv/test_create_privileged_roles.py index 3b08e5e3..48475e75 100644 --- a/tests/functional/syspriv/test_create_privileged_roles.py +++ b/tests/functional/syspriv/test_create_privileged_roles.py @@ -1,38 +1,30 @@ #coding:utf-8 -# -# id: functional.syspriv.create_privileged_roles -# title: Check ability of non-sysdba user to CREATE privileged role (but NOT use it) -# decription: -# Checked on WI-T4.0.0.267. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: syspriv.create-privileged-roles +TITLE: Check ability of non-sysdba user to CREATE privileged role (but NOT use it) +DESCRIPTION: +FBTEST: functional.syspriv.create_privileged_roles +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() +test_user = user_factory('db', name='u01', do_not_create=True) +role_create = role_factory('db', name='role_for_CREATE_PRIVILEGED_ROLES', do_not_create=True) +role_granted = role_factory('db', name='role_for_USE_GRANTED_BY_CLAUSE', do_not_create=True) -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set wng off; set bail on; set list on; - create or alter user u01 password '123' revoke admin role; revoke all on all from u01; grant create role to u01; commit; - +/* set term ^; execute block as begin @@ -47,7 +39,7 @@ test_script_1 = """ end^ set term ;^ commit; - +*/ create role role_for_CREATE_PRIVILEGED_ROLES set system privileges to CREATE_PRIVILEGED_ROLES; commit; grant default role_for_CREATE_PRIVILEGED_ROLES to user u01; @@ -62,17 +54,17 @@ test_script_1 = """ select current_user as who_am_i,r.rdb$role_name,rdb$role_in_use(r.rdb$role_name),r.rdb$system_privileges from mon$database m cross join rdb$roles r; - + commit; - connect '$(DSN)' user sysdba password 'masterkey'; - drop user u01; - commit; + --connect '$(DSN)' user sysdba password 'masterkey'; + --drop user u01; + --commit; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ WHO_AM_I U01 RDB$ROLE_NAME RDB$ADMIN RDB$ROLE_IN_USE @@ -90,8 +82,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action, test_user, role_create, role_granted): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/syspriv/test_create_user_types.py b/tests/functional/syspriv/test_create_user_types.py index b04df709..c380c3b5 100644 --- a/tests/functional/syspriv/test_create_user_types.py +++ b/tests/functional/syspriv/test_create_user_types.py @@ -1,32 +1,26 @@ #coding:utf-8 -# -# id: functional.syspriv.create_user_types -# title: Check ability to update content of RDB$TYPES. -# decription: -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: syspriv.create-user-types +TITLE: Check ability to update content of RDB$TYPES +DESCRIPTION: +FBTEST: functional.syspriv.create_user_types +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('RDB\\$DESCRIPTION.*', 'RDB$DESCRIPTION')] +test_user = user_factory('db', name='dba_helper_create_usr_types', do_not_create=True) +test_role = role_factory('db', name='role_for_create_user_types', do_not_create=True) -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set wng off; set list on; create or alter view v_check as - select + select current_user as who_ami ,r.rdb$role_name ,rdb$role_in_use(r.rdb$role_name) as RDB_ROLE_IN_USE @@ -39,7 +33,7 @@ test_script_1 = """ create or alter user dba_helper_create_usr_types password '123' revoke admin role; revoke all on all from dba_helper_create_usr_types; commit; - +/* set term ^; execute block as begin @@ -48,7 +42,7 @@ test_script_1 = """ end^ set term ;^ commit; - +*/ -- Add/change/delete non-system records in RDB$TYPES create role role_for_create_user_types set system privileges to CREATE_USER_TYPES; commit; @@ -62,65 +56,65 @@ test_script_1 = """ --set echo on; insert into rdb$types(rdb$field_name, rdb$type, rdb$type_name, rdb$description, rdb$system_flag) - values( 'amount_avaliable', - -32767, - 'stock_amount', - 'Total number of units that can be sold immediately to any customer', + values( 'amount_avaliable', + -32767, + 'stock_amount', + 'Total number of units that can be sold immediately to any customer', 0 -- rdb$system_flag ) returning rdb$field_name, rdb$type, rdb$type_name, rdb$description, rdb$system_flag ; insert into rdb$types(rdb$field_name, rdb$type, rdb$type_name, rdb$description, rdb$system_flag) - values( 'amount_ion_reserve', - -2, - 'stock_amount', - 'Total number of units that is to be sold for customers who previously did order them', + values( 'amount_ion_reserve', + -2, + 'stock_amount', + 'Total number of units that is to be sold for customers who previously did order them', 1 -- rdb$system_flag ); update rdb$types set rdb$type = -32768, rdb$field_name = null - where rdb$type < 0 - order by rdb$type + where rdb$type < 0 + order by rdb$type rows 1 returning rdb$field_name, rdb$type, rdb$type_name, rdb$description, rdb$system_flag; delete from rdb$types where rdb$type < 0 - returning rdb$field_name, rdb$type, rdb$type_name, + returning rdb$field_name, rdb$type, rdb$type_name, -- rdb$description, -- TODO: uncomment this after core-5287 will be fixed rdb$system_flag ; commit; - connect '$(DSN)' user sysdba password 'masterkey'; - drop user dba_helper_create_usr_types; - drop role role_for_create_user_types; - commit; + -- connect '$(DSN)' user sysdba password 'masterkey'; + -- drop user dba_helper_create_usr_types; + -- drop role role_for_create_user_types; + -- commit; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('RDB\\$DESCRIPTION.*', 'RDB$DESCRIPTION')]) -expected_stdout_1 = """ +expected_stdout = """ WHO_AMI DBA_HELPER_CREATE_USR_TYPES - RDB$ROLE_NAME RDB$ADMIN + RDB$ROLE_NAME RDB$ADMIN RDB_ROLE_IN_USE RDB$SYSTEM_PRIVILEGES FFFFFFFFFFFFFFFF WHO_AMI DBA_HELPER_CREATE_USR_TYPES - RDB$ROLE_NAME ROLE_FOR_CREATE_USER_TYPES + RDB$ROLE_NAME ROLE_FOR_CREATE_USER_TYPES RDB_ROLE_IN_USE RDB$SYSTEM_PRIVILEGES 0800000000000000 - RDB$FIELD_NAME amount_avaliable + RDB$FIELD_NAME amount_avaliable RDB$TYPE -32767 - RDB$TYPE_NAME stock_amount + RDB$TYPE_NAME stock_amount RDB$DESCRIPTION b:782 Total number of units that can be sold immediately to any customer RDB$SYSTEM_FLAG 0 RDB$FIELD_NAME RDB$TYPE -32768 - RDB$TYPE_NAME stock_amount + RDB$TYPE_NAME stock_amount RDB$DESCRIPTION b:782 Total number of units that can be sold immediately to any customer RDB$SYSTEM_FLAG 0 @@ -128,20 +122,19 @@ expected_stdout_1 = """ RDB$FIELD_NAME RDB$TYPE -32768 - RDB$TYPE_NAME stock_amount + RDB$TYPE_NAME stock_amount RDB$SYSTEM_FLAG 0 """ -expected_stderr_1 = """ + +expected_stderr = """ Statement failed, SQLSTATE = 42000 INSERT operation is not allowed for system table RDB$TYPES """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action, test_user, test_role): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/syspriv/test_drop_database.py b/tests/functional/syspriv/test_drop_database.py index 881a86e3..b0679ed9 100644 --- a/tests/functional/syspriv/test_drop_database.py +++ b/tests/functional/syspriv/test_drop_database.py @@ -1,35 +1,28 @@ #coding:utf-8 -# -# id: functional.syspriv.drop_database -# title: Check ability to DROP database by non-sysdba user who is granted with necessary system privileges. -# decription: -# We make backup and restore of current DB to other name ('functional.syspriv.drop_database.tmp'). -# Than we attach to DB 'functional.syspriv.drop_database.tmp' as user U01 and try to DROP it. -# This should NOT raise any error, database file should be deleted from disk. -# -# Checked on 4.0.0.267. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: syspriv.drop-database +TITLE: Check ability to DROP database by non-sysdba user who is granted with necessary system privileges +DESCRIPTION: + We make backup and restore of current DB to other name ('functional.syspriv.drop_database.tmp'). + Than we attach to DB 'functional.syspriv.drop_database.tmp' as user U01 and try to DROP it. + This should NOT raise any error, database file should be deleted from disk. +FBTEST: functional.syspriv.drop_database +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +substitutions = [('DB_NAME.*FUNCTIONAL.SYSPRIV.DROP_DATABASE.TMP', 'DB_NAME FUNCTIONAL.SYSPRIV.DROP_DATABASE.TMP')] -substitutions_1 = [('DB_NAME.*FUNCTIONAL.SYSPRIV.DROP_DATABASE.TMP', 'DB_NAME FUNCTIONAL.SYSPRIV.DROP_DATABASE.TMP')] - -init_script_1 = """ +init_script = """ set wng off; set bail on; set list on; set count on; create or alter view v_check as - select + select upper(mon$database_name) as db_name ,current_user as who_ami ,r.rdb$role_name @@ -60,102 +53,11 @@ init_script_1 = """ commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# -# import os -# import subprocess -# import time -# -# db_pref = os.path.splitext(db_conn.database_name)[0] -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# fdb_this = db_pref+'.fdb' -# fbk_name = db_pref+'.fbk' -# fdb_test = db_pref+'.tmp' -# -# f_backup_restore=open( os.path.join(context['temp_directory'],'tmp_drop_db_backup_restore.log'), 'w') -# subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr", -# "user","SYSDBA","password","masterkey", -# "action_backup", -# "dbname", fdb_this, -# "bkp_file", fbk_name, -# "verbose"], -# stdout=f_backup_restore, -# stderr=subprocess.STDOUT -# ) -# -# subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr", -# "user","SYSDBA","password","masterkey", -# "action_restore", "res_replace", -# "verbose", -# "bkp_file", fbk_name, -# "dbname", fdb_test], -# stdout=f_backup_restore, -# stderr=subprocess.STDOUT -# ) -# flush_and_close( f_backup_restore ) -# -# -# # Check that non-sysdba user can connect and DROP database -# ####### -# sql_chk=''' -# set list on; -# set count on; -# select * from v_check; -# commit; -# drop database; -# ''' -# -# runProgram('isql',['localhost:'+fdb_test,'-user','U01', '-pas', '123'], sql_chk) -# -# if os.path.isfile(fdb_test): -# print('ERROR WHILE DROP DATABASE: FILE REMAINS ON DISK!') -# -# # Cleanup: -# ########## -# time.sleep(1) -# cleanup( (fbk_name, fdb_test, f_backup_restore) ) -# -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) +act = python_act('db', substitutions=substitutions) -expected_stdout_1 = """ +expected_stdout = """ DB_NAME FUNCTIONAL.SYSPRIV.DROP_DATABASE.TMP WHO_AMI U01 RDB$ROLE_NAME RDB$ADMIN @@ -170,9 +72,99 @@ expected_stdout_1 = """ Records affected: 2 """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# test_script_1 +#--- +# +# import os +# import subprocess +# import time +# +# db_pref = os.path.splitext(db_conn.database_name)[0] +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# fdb_this = db_pref+'.fdb' +# fbk_name = db_pref+'.fbk' +# fdb_test = db_pref+'.tmp' +# +# f_backup_restore=open( os.path.join(context['temp_directory'],'tmp_drop_db_backup_restore.log'), 'w') +# subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr", +# "user","SYSDBA","password","masterkey", +# "action_backup", +# "dbname", fdb_this, +# "bkp_file", fbk_name, +# "verbose"], +# stdout=f_backup_restore, +# stderr=subprocess.STDOUT +# ) +# +# subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr", +# "user","SYSDBA","password","masterkey", +# "action_restore", "res_replace", +# "verbose", +# "bkp_file", fbk_name, +# "dbname", fdb_test], +# stdout=f_backup_restore, +# stderr=subprocess.STDOUT +# ) +# flush_and_close( f_backup_restore ) +# +# +# # Check that non-sysdba user can connect and DROP database +# ####### +# sql_chk=''' +# set list on; +# set count on; +# select * from v_check; +# commit; +# drop database; +# ''' +# +# runProgram('isql',['localhost:'+fdb_test,'-user','U01', '-pas', '123'], sql_chk) +# +# if os.path.isfile(fdb_test): +# print('ERROR WHILE DROP DATABASE: FILE REMAINS ON DISK!') +# +# # Cleanup: +# ########## +# time.sleep(1) +# cleanup( (fbk_name, fdb_test, f_backup_restore) ) +# +#--- diff --git a/tests/functional/syspriv/test_grant_revoke_any_ddl_right.py b/tests/functional/syspriv/test_grant_revoke_any_ddl_right.py index 55bcef07..4f6240eb 100644 --- a/tests/functional/syspriv/test_grant_revoke_any_ddl_right.py +++ b/tests/functional/syspriv/test_grant_revoke_any_ddl_right.py @@ -1,43 +1,34 @@ #coding:utf-8 -# -# id: functional.syspriv.grant_revoke_any_ddl_right -# title: Check ability to grant right for issuing CREATE/ALTER/DROP statements. -# decription: -# Test creates user with name 'john_smith_ddl_grantor' and grants to him system privilege -# to allow another user to run any DDL statement, and also to revoke all privileges from -# this user. Name of another user (who will perform DDL): 'mike_adams_ddl_grantee'. -# -# After this, we connect as 'john_smith_ddl_grantor' and give all kinds of DDL rights -# for CREATE, ALTER and DROP objects to user 'mike_adams_ddl_grantee'. -# -# We then connect to database as 'mike_adams_ddl_grantee' and try to create all kind of -# database objects, then alter and drop them. No errors must occur here. -# -# Finally, we make connect as 'john_smith_ddl_grantor' and revoke from 'mike_adams_ddl_grantee' -# all grants. User'mike_adams_ddl_grantee' then makes connect and tries to CREATE any kind -# of DB objects. All of them must NOT be created and exception SQLSTATE = 42000 must raise. -# -# -# Checked on 5.0.0.139; 4.0.1.2568 -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: syspriv.grant-revoke-any-ddl-right +TITLE: Check ability to grant right for issuing CREATE/ALTER/DROP statements +DESCRIPTION: + Test creates user with name 'john_smith_ddl_grantor' and grants to him system privilege + to allow another user to run any DDL statement, and also to revoke all privileges from + this user. Name of another user (who will perform DDL): 'mike_adams_ddl_grantee'. + + After this, we connect as 'john_smith_ddl_grantor' and give all kinds of DDL rights + for CREATE, ALTER and DROP objects to user 'mike_adams_ddl_grantee'. + + We then connect to database as 'mike_adams_ddl_grantee' and try to create all kind of + database objects, then alter and drop them. No errors must occur here. + + Finally, we make connect as 'john_smith_ddl_grantor' and revoke from 'mike_adams_ddl_grantee' + all grants. User'mike_adams_ddl_grantee' then makes connect and tries to CREATE any kind + of DB objects. All of them must NOT be created and exception SQLSTATE = 42000 must raise. +FBTEST: functional.syspriv.grant_revoke_any_ddl_right +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() +user_grantor = user_factory('db', name='john_smith_ddl_grantor', do_not_create=True) +user_grantee = user_factory('db', name='mike_adams_ddl_grantee', do_not_create=True) +role_revoke = role_factory('db', name='r_for_grant_revoke_any_ddl_right', do_not_create=True) -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set wng off; set bail on; set list on; @@ -46,7 +37,7 @@ test_script_1 = """ create or alter user john_smith_ddl_grantor password '123' revoke admin role; create or alter user mike_adams_ddl_grantee password '456' revoke admin role; commit; - +/* set term ^; execute block as begin @@ -55,7 +46,7 @@ test_script_1 = """ end^ set term ;^ commit; - +*/ -- Add/change/delete non-system records in RDB$TYPES create role r_for_grant_revoke_any_ddl_right set system privileges to GRANT_REVOKE_ANY_DDL_RIGHT; commit; @@ -74,7 +65,7 @@ test_script_1 = """ grant create collation to mike_adams_ddl_grantee; grant alter any collation to mike_adams_ddl_grantee; grant drop any collation to mike_adams_ddl_grantee; - + grant create exception to mike_adams_ddl_grantee; grant alter any exception to mike_adams_ddl_grantee; grant drop any exception to mike_adams_ddl_grantee; @@ -108,7 +99,7 @@ test_script_1 = """ grant create function to mike_adams_ddl_grantee; grant alter any function to mike_adams_ddl_grantee; grant drop any function to mike_adams_ddl_grantee; - + grant create package to mike_adams_ddl_grantee; grant alter any package to mike_adams_ddl_grantee; grant drop any package to mike_adams_ddl_grantee; @@ -171,7 +162,7 @@ test_script_1 = """ alter exception exc_test 'You have to change value from @1 to @2'; alter sequence gen_test restart with -9223372036854775808 increment by 2147483647; alter domain dm_test type bigint set default 2147483647 set not null add check(value > 0); - + alter table table_test drop constraint m_test_fk; create descending index table_test_x_desc on table_test(x); comment on table table_test is 'New comment for this table.'; @@ -244,9 +235,9 @@ test_script_1 = """ commit; set bail off; - + connect '$(DSN)' user mike_adams_ddl_grantee password '456'; - + --########################################################################### --### v e r i f y t h a t N O r i g h t s r e m a i n s ### --########################################################################### @@ -259,7 +250,7 @@ test_script_1 = """ create role r_test2; create table table_test2(id int, pid int, x int, constraint mtest_pk primary key(id), constraint m_test_fk foreign key(pid) references table_test(id)); create view v_table_test2 as select 1 from rdb$database; - + set term ^; create procedure sp_test2 as begin end ^ @@ -278,15 +269,15 @@ test_script_1 = """ set bail on; - connect '$(DSN)' user sysdba password 'masterkey'; - drop user john_smith_ddl_grantor; - drop user mike_adams_ddl_grantee; - commit; + -- connect '$(DSN)' user sysdba password 'masterkey'; + -- drop user john_smith_ddl_grantor; + -- drop user mike_adams_ddl_grantee; + -- commit; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ WHO_AM_I JOHN_SMITH_DDL_GRANTOR RDB$ROLE_NAME RDB$ADMIN RDB$ROLE_IN_USE @@ -296,7 +287,8 @@ expected_stdout_1 = """ RDB$ROLE_IN_USE RDB$SYSTEM_PRIVILEGES 0000400000000000 """ -expected_stderr_1 = """ + +expected_stderr = """ Statement failed, SQLSTATE = 42000 unsuccessful metadata update -CREATE COLLATION COLL_TEST2 failed @@ -354,11 +346,9 @@ expected_stderr_1 = """ """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action, user_grantor, user_grantee, role_revoke): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/syspriv/test_grant_revoke_any_object.py b/tests/functional/syspriv/test_grant_revoke_any_object.py index 8cb647d1..f70097a7 100644 --- a/tests/functional/syspriv/test_grant_revoke_any_object.py +++ b/tests/functional/syspriv/test_grant_revoke_any_object.py @@ -1,38 +1,30 @@ #coding:utf-8 -# -# id: functional.syspriv.grant_revoke_any_object -# title: Check ability to query, modify and deleting data plus add/drop constraints on any table. -# decription: -# Two users are created, U01 and U02. -# User U01 is granted with system privilege grant_revoke_any_object. -# User U02 has NO any privilege. -# User U01 then creates table and issue GRANT SELECT statement for U02 (WITHOUT using 'granted by clause). -# Then we -# 1) check result (contrent of RDB$ tables) -# 2) connect as U02 and query this table - this should work OK -# 3) connect as U01 and revoke grant on just queried table from U02 -# 4) connect again as U02 and repeat select - this shoiuld fail. -# -# Checked on WI-T4.0.0.267. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: syspriv.grant-revoke-any-object +TITLE: Check ability to query, modify and deleting data plus add/drop constraints on any table +DESCRIPTION: + Two users are created, U01 and U02. + User U01 is granted with system privilege grant_revoke_any_object. + User U02 has NO any privilege. + User U01 then creates table and issue GRANT SELECT statement for U02 (WITHOUT using 'granted by clause). + Then we + 1) check result (contrent of RDB$ tables) + 2) connect as U02 and query this table - this should work OK + 3) connect as U01 and revoke grant on just queried table from U02 + 4) connect again as U02 and repeat select - this shoiuld fail. +FBTEST: functional.syspriv.grant_revoke_any_object +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() +user_01 = user_factory('db', name='u01', do_not_create=True) +user_02 = user_factory('db', name='u02', do_not_create=True) +role_revoke = role_factory('db', name='role_for_grant_revoke_any_object', do_not_create=True) -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set wng off; set bail on; set list on; @@ -44,7 +36,7 @@ test_script_1 = """ revoke all on all from u02; grant create table to u01; commit; - +/* set term ^; execute block as begin @@ -53,7 +45,7 @@ test_script_1 = """ end^ set term ;^ commit; - +*/ -- Add/change/delete non-system records in RDB$TYPES create role role_for_grant_revoke_any_object set system privileges to GRANT_REVOKE_ON_ANY_OBJECT; commit; @@ -91,15 +83,15 @@ test_script_1 = """ commit; set bail on; - connect '$(DSN)' user sysdba password 'masterkey'; - drop user u01; - drop user u02; - commit; + -- connect '$(DSN)' user sysdba password 'masterkey'; + -- drop user u01; + -- drop user u02; + -- commit; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ WHO_AM_I U01 RDB$ROLE_NAME RDB$ADMIN RDB$ROLE_IN_USE @@ -118,23 +110,22 @@ expected_stdout_1 = """ RDB$FIELD_NAME RDB$USER_TYPE 8 RDB$OBJECT_TYPE 0 - + WHO_AM_I U02 ID 1 WHO_IS_AUTHOR U01 """ -expected_stderr_1 = """ + +expected_stderr = """ Statement failed, SQLSTATE = 28000 no permission for SELECT access to TABLE TEST_U01 -Effective user is U02 """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action, user_01, user_02, role_revoke): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/syspriv/test_modify_ext_conn_pool.py b/tests/functional/syspriv/test_modify_ext_conn_pool.py index d2006d57..1ac6072c 100644 --- a/tests/functional/syspriv/test_modify_ext_conn_pool.py +++ b/tests/functional/syspriv/test_modify_ext_conn_pool.py @@ -1,29 +1,21 @@ #coding:utf-8 -# -# id: functional.syspriv.modify_ext_conn_pool -# title: Check ability to manage extyernal connections pool -# decription: -# Verify ability to issue ALTER EXTERNAL CONNECTIONS POOL <...> by non-sysdba user. -# Checked on 5.0.0.133 SS/CS, 4.0.1.2563 SS/CS -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: syspriv.modify-ext-conn-pool +TITLE: Check ability to manage external connections pool +DESCRIPTION: + Verify ability to issue ALTER EXTERNAL CONNECTIONS POOL <...> by non-sysdba user. +FBTEST: functional.syspriv.modify_ext_conn_pool +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() +test_user = user_factory('db', name='john_smith_extpool_manager', do_not_create=True) +test_role = role_factory('db', name='tmp_role_for_change_extpool', do_not_create=True) -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set wng off; set list on; @@ -31,7 +23,7 @@ test_script_1 = """ user john_smith_extpool_manager password '123' ; - +/* set term ^; execute block as begin @@ -39,7 +31,7 @@ test_script_1 = """ when any do begin end end^ set term ;^ - +*/ create role tmp_role_for_change_extpool set system privileges to MODIFY_EXT_CONN_POOL; commit; @@ -52,28 +44,27 @@ test_script_1 = """ alter external connections pool set lifetime 789 second; commit; - select + select cast(rdb$get_context('SYSTEM', 'EXT_CONN_POOL_SIZE') as int) as pool_size, cast(rdb$get_context('SYSTEM', 'EXT_CONN_POOL_LIFETIME') as int) as pool_lifetime from rdb$database; rollback; - connect '$(DSN)' user sysdba password 'masterkey'; - drop user john_smith_extpool_manager; - drop role tmp_role_for_change_extpool; - commit; + -- connect '$(DSN)' user sysdba password 'masterkey'; + -- drop user john_smith_extpool_manager; + -- drop role tmp_role_for_change_extpool; + -- commit; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ POOL_SIZE 345 POOL_LIFETIME 789 """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action, test_user, test_role): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/syspriv/test_monitor_any_attachment.py b/tests/functional/syspriv/test_monitor_any_attachment.py index e69ff44a..0f26491c 100644 --- a/tests/functional/syspriv/test_monitor_any_attachment.py +++ b/tests/functional/syspriv/test_monitor_any_attachment.py @@ -1,37 +1,27 @@ #coding:utf-8 -# -# id: functional.syspriv.monitor_any_attachment -# title: Check ability to monitor any attachment. -# decription: -# Checked: -# 4.0.0.1635 SS: 1.497s. -# 4.0.0.1633 CS: 2.024s. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: syspriv.monitor-any-attachment +TITLE: Check ability to monitor any attachment +DESCRIPTION: +FBTEST: functional.syspriv.monitor_any_attachment +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() +test_user = user_factory('db', name='u01', do_not_create=True) +test_role = role_factory('db', name='role_for_monitor_any_attach', do_not_create=True) -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set wng off; set bail on; set list on; set count on; create or alter view v_check as - select + select current_user as who_ami ,r.rdb$role_name ,rdb$role_in_use(r.rdb$role_name) as RDB_ROLE_IN_USE @@ -45,7 +35,7 @@ test_script_1 = """ grant select on v_check to public; commit; - +/* set term ^; execute block as begin @@ -55,9 +45,9 @@ test_script_1 = """ ^ set term ;^ commit; - +*/ -- Monitor (via MON$ tables) attachments from other users: - create role role_for_monitor_any_attach + create role role_for_monitor_any_attach set system privileges to MONITOR_ANY_ATTACHMENT; commit; grant default role_for_monitor_any_attach to user u01; @@ -84,25 +74,25 @@ test_script_1 = """ as user 'SYSDBA' password 'masterkey' into another_user; - for - select + for + select current_user, a.mon$user, s.mon$sql_text - from mon$attachments a + from mon$attachments a join mon$statements s using(mon$attachment_id) - where - a.mon$user<>current_user - and a.mon$system_flag is distinct from 1 + where + a.mon$user<>current_user + and a.mon$system_flag is distinct from 1 -- NB: for Classic 4.0 we should prevent output from: - -- SELECT - -- RDB$MAP_USING, RDB$MAP_PLUGIN, RDB$MAP_DB, - -- RDB$MAP_FROM_TYPE, RDB$MAP_FROM, RDB$MAP_TO_TYPE, RDB$MAP_TO + -- SELECT + -- RDB$MAP_USING, RDB$MAP_PLUGIN, RDB$MAP_DB, + -- RDB$MAP_FROM_TYPE, RDB$MAP_FROM, RDB$MAP_TO_TYPE, RDB$MAP_TO -- FROM RDB$AUTH_MAPPING -- -- so we add filter on s.mon$sql_text: and s.mon$sql_text containing :v_other_sttm into who_am_i, who_else_here, what_he_is_doing - do + do suspend; end ^ @@ -125,15 +115,15 @@ test_script_1 = """ -- SQLCODE: -901 / lock time-out on wait transaction / object is in use -- ############################################################################################# delete from mon$attachments where mon$attachment_id != current_connection; - commit; - - drop user u01; commit; + + -- drop user u01; + -- commit; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ WHO_AMI U01 RDB$ROLE_NAME RDB$ADMIN RDB_ROLE_IN_USE @@ -150,8 +140,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action, test_user, test_role): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/syspriv/test_read_raw_pages.py b/tests/functional/syspriv/test_read_raw_pages.py index 6c27999e..02cdd304 100644 --- a/tests/functional/syspriv/test_read_raw_pages.py +++ b/tests/functional/syspriv/test_read_raw_pages.py @@ -1,29 +1,21 @@ #coding:utf-8 -# -# id: functional.syspriv.read_raw_pages -# title: Check ability to get binary content of DB page by non-sysdba user who is granted with necessary system privilege. -# decription: -# Test uses ability to read binary content of DB page that is provided by FDB driver (see con.get_page_contents() call). -# We obtain content of page with ID=1 (this is PIP) and get its type (it must be 2). -# This action can be done by NON-dba user only if he has apropriate system privilege, otherwise FDB raises Python-related -# error. We catch this error in order to prevent failing of test with 'E' outcome and print text of exception. -# -# Checked on 5.0.0.139 SS/CS; 4.0.1.2568 SS/CS. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: syspriv.read-raw-pages +TITLE: Check ability to get binary content of DB page by non-sysdba user who is + granted with necessary system privilege +DESCRIPTION: + Test uses ability to read binary content of DB page that is provided by FDB driver (see con.get_page_contents() call). + We obtain content of page with ID=1 (this is PIP) and get its type (it must be 2). + This action can be done by NON-dba user only if he has apropriate system privilege, otherwise FDB raises Python-related + error. We catch this error in order to prevent failing of test with 'E' outcome and print text of exception. +FBTEST: functional.syspriv.read_raw_pages +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """ +init_script = """ set wng off; set bail on; set list on; @@ -56,17 +48,33 @@ init_script_1 = """ commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) + +act = python_act('db') + +expected_stdout = """ + User: mike_adams_bad_hacker + Exception occured: + Result code does not match request code. + + User: john_smith_raw_reader + Successfully get content of page, its type: 2 +""" + +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=4.0') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # from struct import unpack_from -# +# # db_file = db_conn.database_name # db_conn.close() -# +# # for i in ( 'mike_adams_bad_hacker', 'john_smith_raw_reader'): # print('User: %s' % i ) # con = fdb.connect(dsn = dsn, user = i, password = '123', role = 'role_for_read_raw_pages') @@ -80,30 +88,13 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # print(x) # finally: # con.close() -# +# # #------------------------ -# +# # con = fdb.connect(dsn = dsn, user = user_name, password = user_password) # con.execute_immediate('drop user mike_adams_bad_hacker using plugin Srp') # con.execute_immediate('drop user john_smith_raw_reader using plugin Srp') # con.commit() # con.close() -# +# #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - User: mike_adams_bad_hacker - Exception occured: - Result code does not match request code. - - User: john_smith_raw_reader - Successfully get content of page, its type: 2 -""" - -@pytest.mark.version('>=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/syspriv/test_trace_any_attachment.py b/tests/functional/syspriv/test_trace_any_attachment.py index 625a8e0e..4a499fe6 100644 --- a/tests/functional/syspriv/test_trace_any_attachment.py +++ b/tests/functional/syspriv/test_trace_any_attachment.py @@ -1,35 +1,23 @@ #coding:utf-8 -# -# id: functional.syspriv.trace_any_attachment -# title: Check ability to trace any attachment by non-sysdba user who is granted with necessary system privileges. -# decription: -# Checked on 4.0.0.262. -# 03-mar-2021. Checked on: -# * Windows: 4.0.0.2377, 3.0.8.33420 -# * Linux: 4.0.0.2377, 3.0.8.33415 -# -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: syspriv.trace-any-attachment +TITLE: Check ability to trace any attachment by non-sysdba user who is granted with necessary system privileges +DESCRIPTION: +FBTEST: functional.syspriv.trace_any_attachment +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """ +init_script = """ set wng off; set bail on; create or alter user sys_tracer_for_anyone password '123' revoke admin role; revoke all on all from sys_tracer_for_anyone; commit; -- Trace other users' attachments - create role role_for_trace_any_attachment + create role role_for_trace_any_attachment set system privileges to TRACE_ANY_ATTACHMENT; commit; grant default role_for_trace_any_attachment to user sys_tracer_for_anyone; @@ -39,39 +27,51 @@ init_script_1 = """ commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) + +act = python_act('db') + +expected_stdout = """ + FOUND SYSDBA ATTACHMENT. + FOUND SYSDBA STATEMENT. +""" + +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=4.0') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") # test_script_1 #--- -# +# # import os # import subprocess # from subprocess import Popen # import time -# +# # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password -# +# # db_file=os.path.basename(db_conn.database_name) # db_conn.close() -# +# # #-------------------------------------------- -# +# # def flush_and_close( file_handle ): # # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and +# # If you're starting with a Python file object f, +# # first do f.flush(), and # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # global os -# +# # file_handle.flush() # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # # otherwise: "OSError: [Errno 9] Bad file descriptor"! # os.fsync(file_handle.fileno()) # file_handle.close() -# +# # #-------------------------------------------- -# +# # def cleanup( f_names_list ): # global os # for f in f_names_list: @@ -82,12 +82,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # else: # print('Unrecognized type of element:', f, ' - can not be treated as file.') # del_name = None -# +# # if del_name and os.path.isfile( del_name ): # os.remove( del_name ) -# +# # #-------------------------------------------- -# +# # trace_options = '''# Trace config, format for 3.0 and above. Generated auto, do not edit! # database=%%[\\\\\\\\/]%(db_file)s # { @@ -101,42 +101,42 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # max_sql_length = 16384 # } # ''' % locals() -# +# # f_trccfg=open( os.path.join(context['temp_directory'],'tmp_syspriv_trace_any_attacmhent.cfg'), 'w') # f_trccfg.write(trace_options) # flush_and_close( f_trccfg ) -# +# # # Starting trace session in new child process (async.): # ####################################################### -# +# # f_trclog=open( os.path.join(context['temp_directory'],'tmp_syspriv_trace_any_attacmhent.log'), 'w') # f_trcerr=open( os.path.join(context['temp_directory'],'tmp_syspriv_trace_any_attacmhent.err'), 'w') -# +# # # Execute a child program in a new process, redirecting STDERR to the same target as of STDOUT: # p_trace=Popen([context['fbsvcmgr_path'], "localhost:service_mgr", # "user", "sys_tracer_for_anyone", "password", "123", # "action_trace_start", # "trc_cfg", f_trccfg.name], -# stdout=f_trclog, +# stdout=f_trclog, # stderr=f_trcerr # ) -# -# +# +# # time.sleep(1) -# +# # ##################################################### # # Getting ID of launched trace session and STOP it: -# +# # # Save active trace session info into file for further parsing it and obtain session_id back (for stop): # f_trclst=open( os.path.join(context['temp_directory'],'tmp_trace_5273.lst'), 'w') # subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr", # "user", "sys_tracer_for_anyone", "password", "123", # "action_trace_list"], -# stdout=f_trclst, +# stdout=f_trclst, # stderr=subprocess.STDOUT # ) # flush_and_close( f_trclst ) -# +# # trcssn=0 # with open( f_trclst.name,'r') as f: # for line in f: @@ -147,12 +147,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # trcssn=word # i=i+1 # break -# +# # # Result: `trcssn` is ID of active trace session. Now we have to terminate it: -# +# # runProgram('isql',[dsn, '-q', '-n'], 'insert into test_trace_any_attachment(id) values(123456789);') # time.sleep(1) -# +# # # REQUEST TRACE TO STOP: # ######################## # f_trclst=open(f_trclst.name,'a') @@ -164,22 +164,22 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # stdout=f_trclst, stderr=subprocess.STDOUT # ) # flush_and_close( f_trclst ) -# +# # time.sleep(2) -# +# # # Terminate child process of launched trace session (though it should already be killed): # p_trace.terminate() # flush_and_close( f_trclog ) # flush_and_close( f_trcerr ) -# -# +# +# # # Must be EMPTY: # with open( f_trcerr.name,'r') as f: # for line in f: # print(line) -# +# # # Must contain info about SYSDBA activity (this was traced by non-sysdba user): -# +# # found_sysdba_attachment, found_sysdba_statement = False, False # with open( f_trclog.name,'r') as f: # for line in f: @@ -191,24 +191,11 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1) # if not found_sysdba_statement: # print('FOUND SYSDBA STATEMENT.') # found_sysdba_statement = True -# +# # runProgram('isql',[dsn], 'drop user sys_tracer_for_anyone;') -# +# # # Cleanup: # ########## # time.sleep(1) # cleanup( (f_trclst,f_trcerr,f_trclog, f_trccfg) ) #--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ - FOUND SYSDBA ATTACHMENT. - FOUND SYSDBA STATEMENT. -""" - -@pytest.mark.version('>=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - - diff --git a/tests/functional/syspriv/test_use_gbak_utility.py b/tests/functional/syspriv/test_use_gbak_utility.py index bb2638da..cb2050f1 100644 --- a/tests/functional/syspriv/test_use_gbak_utility.py +++ b/tests/functional/syspriv/test_use_gbak_utility.py @@ -1,38 +1,35 @@ #coding:utf-8 -# -# id: functional.syspriv.use_gbak_utility -# title: Check ability to to make database backup. -# decription: -# We create user and grant system privileges USE_GBAK_UTILITY, SELECT_ANY_OBJECT_IN_DATABASE to him -# (but revoke all other rights), and then we try to make BACKUP with attaching to database as this user (U01). -# Then we check that this user: -# 1) can NOT restore .fbk to another file name (backup <> restore!) -# 2) CAN query to the table which is not granted to him by regular GRANT statement -# (restoring is done by SYSDBA). -# -# Checked on 4.0.0.267. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: syspriv.use-gbak-utility +TITLE: Check ability to to make database backup +DESCRIPTION: + We create user and grant system privileges USE_GBAK_UTILITY, SELECT_ANY_OBJECT_IN_DATABASE to him + (but revoke all other rights), and then we try to make BACKUP with attaching to database as this user (U01). + Then we check that this user: + 1) can NOT restore .fbk to another file name (backup <> restore!) + 2) CAN query to the table which is not granted to him by regular GRANT statement + (restoring is done by SYSDBA). +FBTEST: functional.syspriv.use_gbak_utility +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +substitutions = [('.*NO PERMISSION FOR CREATE ACCESS TO DATABASE.*', 'NO PERMISSION FOR CREATE ACCESS TO DATABASE'), + ('.*-FAILED TO CREATE DATABASE.*', '-FAILED TO CREATE DATABASE'), + ('CLOSING FILE, COMMITTING, AND FINISHING.*', 'CLOSING FILE, COMMITTING, AND FINISHING'), + ('DB_NAME.*FUNCTIONAL.SYSPRIV.USE_GBAK_UTILITY.TMP', 'DB_NAME FUNCTIONAL.SYSPRIV.USE_GBAK_UTILITY.TMP'), + ('BLOB_ID.*', '')] -substitutions_1 = [('.*NO PERMISSION FOR CREATE ACCESS TO DATABASE.*', 'NO PERMISSION FOR CREATE ACCESS TO DATABASE'), ('.*-FAILED TO CREATE DATABASE.*', '-FAILED TO CREATE DATABASE'), ('CLOSING FILE, COMMITTING, AND FINISHING.*', 'CLOSING FILE, COMMITTING, AND FINISHING'), ('DB_NAME.*FUNCTIONAL.SYSPRIV.USE_GBAK_UTILITY.TMP', 'DB_NAME FUNCTIONAL.SYSPRIV.USE_GBAK_UTILITY.TMP'), ('BLOB_ID.*', '')] - -init_script_1 = """ +init_script = """ set wng off; set bail on; set list on; set count on; create or alter view v_check as - select + select upper(mon$database_name) as db_name ,current_user as who_ami ,r.rdb$role_name @@ -52,7 +49,7 @@ init_script_1 = """ commit; grant select on v_check to public; - --------------------------------- [ !! ] -- do NOT: grant select on test to u01; -- [ !! ] + --------------------------------- [ !! ] -- do NOT: grant select on test to u01; -- [ !! ] commit; set term ^; @@ -67,7 +64,7 @@ init_script_1 = """ -- Ability to make database backup. -- NB: SELECT_ANY_OBJECT_IN_DATABASE - mandatory for reading data from tables et al. - create role role_for_use_gbak_utility + create role role_for_use_gbak_utility set system privileges to USE_GBAK_UTILITY, SELECT_ANY_OBJECT_IN_DATABASE; commit; grant default role_for_use_gbak_utility to user u01; @@ -75,172 +72,11 @@ init_script_1 = """ """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# -# import os -# import subprocess -# -# db_pref = os.path.splitext(db_conn.database_name)[0] -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# fdb_this = db_pref+'.fdb' -# fbk_name = db_pref+'.fbk' -# fdb_test = db_pref+'.tmp' -# -# # !!! NB !!! See CORE-5291. We have to remove file that will be used as target for restoring, -# # otherwise error msg will contain strange phrase "gbak: ERROR:could not drop ... (database might be in use)" -# cleanup( (fdb_test,) ) -# -# f_backup_u01_log=open( os.path.join(context['temp_directory'],'tmp_backup_u01.log'), 'w') -# f_backup_u01_err=open( os.path.join(context['temp_directory'],'tmp_backup_u01.err'), 'w') -# subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr", -# "user","U01","password","123", -# "action_backup", -# "dbname", fdb_this, -# "bkp_file", fbk_name, -# "verbose"], -# stdout=f_backup_u01_log, -# stderr=f_backup_u01_err -# ) -# -# flush_and_close( f_backup_u01_log ) -# flush_and_close( f_backup_u01_err ) -# -# # NB: user U01 has right only to make BACKUP, but has NO right for RESTORING database -# # (to restore he has to be granted with system privilege CREATE_DATABASE). -# # Thus following attempt should be finished with ERROR: -# # === -# # gbak: ERROR:no permission for CREATE access to DATABASE C:\\MIX\\FIREBIRD\\QA\\FBT-REPO\\TMP\\TMP.TMP -# # gbak: ERROR:failed to create database localhost/3400:C:\\MIX -# irebird\\QA -# bt-repo mp mp.tmp -# # gbak:Exiting before completion due to errors -# # === -# -# f_restore_u01_log=open( os.path.join(context['temp_directory'],'tmp_restore_u01.log'), 'w') -# f_restore_u01_err=open( os.path.join(context['temp_directory'],'tmp_restore_u01.err'), 'w') -# subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr", -# "user","U01","password","123", -# "action_restore", "res_replace", -# "verbose", -# "bkp_file", fbk_name, -# "dbname", fdb_test], -# stdout=f_restore_u01_log, -# stderr=f_restore_u01_err -# ) -# flush_and_close( f_restore_u01_log ) -# flush_and_close( f_restore_u01_err ) -# -# -# # Now try to restore as SYSDBA and then check that U01 will be able -# # to connect to this DB and run query on table TEST: -# -# f_restore_sys_log=open( os.path.join(context['temp_directory'],'tmp_restore_sys.log'), 'w') -# f_restore_sys_err=open( os.path.join(context['temp_directory'],'tmp_restore_sys.err'), 'w') -# subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr", -# "user", user_name, "password", user_password, -# "action_restore", "res_replace", -# "verbose", -# "bkp_file", fbk_name, -# "dbname", fdb_test], -# stdout=f_restore_sys_log, -# stderr=f_restore_sys_err -# ) -# flush_and_close( f_restore_sys_log ) -# flush_and_close( f_restore_sys_err ) -# -# -# # Check content of logs. -# -# # Must be EMPTY: -# with open( f_backup_u01_err.name,'r') as f: -# for line in f: -# print('U01 BACKUP STDERR: '+line.upper()) -# -# # Must contain: "closing file, committing, and finishing" -# with open( f_backup_u01_log.name,'r') as f: -# for line in f: -# if 'closing file' in line: -# print('U01 BACKUP STDLOG: ' + ' '.join(line.split()).upper() ) -# -# -# # Must contain errors: -# # no permission for CREATE access to DATABASE C:/MIX/firebird/QA/fbt-repo/tmp/functional.syspriv.use_gbak_utility.tmp -# # -failed to create database C:/MIX/firebird/QA/fbt-repo/tmp/functional.syspriv.use_gbak_utility.tmp -# # -Exiting before completion due to errors -# with open( f_restore_u01_err.name,'r') as f: -# for line in f: -# print('U01 RESTORE STDERR: ' + ' '.join(line.split()).upper() ) -# -# # Must contain: "finishing, closing, and going home " -# with open( f_restore_sys_log.name,'r') as f: -# for line in f: -# if 'going home' in line: -# print('SYSDBA RESTORE STDLOG: ' + ' '.join(line.split()).upper() ) -# -# # Must be EMPTY: -# with open( f_restore_sys_err.name,'r') as f: -# for line in f: -# print('SYSDBA RESTORE STDERR: '+line.upper()) -# -# # Check that non-sysdba user can connect and query table 'test': -# ####### -# sql_chk=''' -# set list on; -# set count on; -# set blob all; -# select * from v_check; -# select x,b as blob_id from test; -# commit; -# ''' -# -# runProgram('isql',['localhost:'+fdb_test,'-user','U01', '-pas', '123'], sql_chk) -# -# # Cleanup: -# ########## -# runProgram('isql',[dsn,'-user',user_name, '-pas', user_password], 'drop user u01; commit;') -# cleanup( (fbk_name, fdb_test, f_backup_u01_log,f_backup_u01_err,f_restore_u01_log,f_restore_u01_err,f_restore_sys_log,f_restore_sys_err) ) -# -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) +act = python_act('db', substitutions=substitutions) -expected_stdout_1 = """ +expected_stdout = """ U01 BACKUP STDLOG: GBAK:CLOSING FILE, COMMITTING, AND FINISHING U01 RESTORE STDERR: NO PERMISSION FOR CREATE ACCESS TO DATABASE U01 RESTORE STDERR: -FAILED TO CREATE DATABASE @@ -263,9 +99,169 @@ expected_stdout_1 = """ Records affected: 1 """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# test_script_1 +#--- +# +# import os +# import subprocess +# +# db_pref = os.path.splitext(db_conn.database_name)[0] +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# fdb_this = db_pref+'.fdb' +# fbk_name = db_pref+'.fbk' +# fdb_test = db_pref+'.tmp' +# +# # !!! NB !!! See CORE-5291. We have to remove file that will be used as target for restoring, +# # otherwise error msg will contain strange phrase "gbak: ERROR:could not drop ... (database might be in use)" +# cleanup( (fdb_test,) ) +# +# f_backup_u01_log=open( os.path.join(context['temp_directory'],'tmp_backup_u01.log'), 'w') +# f_backup_u01_err=open( os.path.join(context['temp_directory'],'tmp_backup_u01.err'), 'w') +# subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr", +# "user","U01","password","123", +# "action_backup", +# "dbname", fdb_this, +# "bkp_file", fbk_name, +# "verbose"], +# stdout=f_backup_u01_log, +# stderr=f_backup_u01_err +# ) +# +# flush_and_close( f_backup_u01_log ) +# flush_and_close( f_backup_u01_err ) +# +# # NB: user U01 has right only to make BACKUP, but has NO right for RESTORING database +# # (to restore he has to be granted with system privilege CREATE_DATABASE). +# # Thus following attempt should be finished with ERROR: +# # === +# # gbak: ERROR:no permission for CREATE access to DATABASE C:\\MIX\\FIREBIRD\\QA\\FBT-REPO\\TMP\\TMP.TMP +# # gbak: ERROR:failed to create database localhost/3400:C:\\MIX +# irebird\\QA +# bt-repo mp mp.tmp +# # gbak:Exiting before completion due to errors +# # === +# +# f_restore_u01_log=open( os.path.join(context['temp_directory'],'tmp_restore_u01.log'), 'w') +# f_restore_u01_err=open( os.path.join(context['temp_directory'],'tmp_restore_u01.err'), 'w') +# subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr", +# "user","U01","password","123", +# "action_restore", "res_replace", +# "verbose", +# "bkp_file", fbk_name, +# "dbname", fdb_test], +# stdout=f_restore_u01_log, +# stderr=f_restore_u01_err +# ) +# flush_and_close( f_restore_u01_log ) +# flush_and_close( f_restore_u01_err ) +# +# +# # Now try to restore as SYSDBA and then check that U01 will be able +# # to connect to this DB and run query on table TEST: +# +# f_restore_sys_log=open( os.path.join(context['temp_directory'],'tmp_restore_sys.log'), 'w') +# f_restore_sys_err=open( os.path.join(context['temp_directory'],'tmp_restore_sys.err'), 'w') +# subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr", +# "user", user_name, "password", user_password, +# "action_restore", "res_replace", +# "verbose", +# "bkp_file", fbk_name, +# "dbname", fdb_test], +# stdout=f_restore_sys_log, +# stderr=f_restore_sys_err +# ) +# flush_and_close( f_restore_sys_log ) +# flush_and_close( f_restore_sys_err ) +# +# +# # Check content of logs. +# +# # Must be EMPTY: +# with open( f_backup_u01_err.name,'r') as f: +# for line in f: +# print('U01 BACKUP STDERR: '+line.upper()) +# +# # Must contain: "closing file, committing, and finishing" +# with open( f_backup_u01_log.name,'r') as f: +# for line in f: +# if 'closing file' in line: +# print('U01 BACKUP STDLOG: ' + ' '.join(line.split()).upper() ) +# +# +# # Must contain errors: +# # no permission for CREATE access to DATABASE C:/MIX/firebird/QA/fbt-repo/tmp/functional.syspriv.use_gbak_utility.tmp +# # -failed to create database C:/MIX/firebird/QA/fbt-repo/tmp/functional.syspriv.use_gbak_utility.tmp +# # -Exiting before completion due to errors +# with open( f_restore_u01_err.name,'r') as f: +# for line in f: +# print('U01 RESTORE STDERR: ' + ' '.join(line.split()).upper() ) +# +# # Must contain: "finishing, closing, and going home " +# with open( f_restore_sys_log.name,'r') as f: +# for line in f: +# if 'going home' in line: +# print('SYSDBA RESTORE STDLOG: ' + ' '.join(line.split()).upper() ) +# +# # Must be EMPTY: +# with open( f_restore_sys_err.name,'r') as f: +# for line in f: +# print('SYSDBA RESTORE STDERR: '+line.upper()) +# +# # Check that non-sysdba user can connect and query table 'test': +# ####### +# sql_chk=''' +# set list on; +# set count on; +# set blob all; +# select * from v_check; +# select x,b as blob_id from test; +# commit; +# ''' +# +# runProgram('isql',['localhost:'+fdb_test,'-user','U01', '-pas', '123'], sql_chk) +# +# # Cleanup: +# ########## +# runProgram('isql',[dsn,'-user',user_name, '-pas', user_password], 'drop user u01; commit;') +# cleanup( (fbk_name, fdb_test, f_backup_u01_log,f_backup_u01_err,f_restore_u01_log,f_restore_u01_err,f_restore_sys_log,f_restore_sys_err) ) +# +#--- diff --git a/tests/functional/syspriv/test_use_granted_by_clause.py b/tests/functional/syspriv/test_use_granted_by_clause.py index b5ec5776..a2483e0c 100644 --- a/tests/functional/syspriv/test_use_granted_by_clause.py +++ b/tests/functional/syspriv/test_use_granted_by_clause.py @@ -1,38 +1,31 @@ #coding:utf-8 -# -# id: functional.syspriv.use_granted_by_clause -# title: Check ability to query, modify and deleting data plus add/drop constraints on any table. -# decription: -# Two users are created, U01 and U02. -# User U01 is granted with system privilege USE_GRANTED_BY_CLAUSE. -# User U02 has NO any privilege. -# User U01 then creates table and issue GRANT SELECT statement for U02 as it was granted by SYSDBA. -# Then we -# 1) check result (contrent of RDB$ tables) -# 2) connect as U02 and query this table - this should work OK -# 3) connect as U01 and revoke grant on just queried table from U02 -# 4) connect again as U02 and repeat select - this shoiuld fail. -# -# Checked on WI-T4.0.0.267. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: syspriv.use-granted-by-clause +TITLE: Check ability to query, modify and deleting data plus add/drop constraints on any table +DESCRIPTION: + Two users are created, U01 and U02. + User U01 is granted with system privilege USE_GRANTED_BY_CLAUSE. + User U02 has NO any privilege. + User U01 then creates table and issue GRANT SELECT statement for U02 as it was granted by SYSDBA. + Then we + 1) check result (contrent of RDB$ tables) + 2) connect as U02 and query this table - this should work OK + 3) connect as U01 and revoke grant on just queried table from U02 + 4) connect again as U02 and repeat select - this shoiuld fail. +FBTEST: functional.syspriv.use_granted_by_clause +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [] +user_01 = user_factory('db', name='u01', do_not_create=True) +user_02 = user_factory('db', name='u02', do_not_create=True) +test_role = role_factory('db', name='role_for_use_granted_by_clause', do_not_create=True) -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set wng off; set bail on; set list on; @@ -44,7 +37,7 @@ test_script_1 = """ revoke all on all from u02; grant create table to u01; commit; - +/* set term ^; execute block as begin @@ -53,7 +46,7 @@ test_script_1 = """ end^ set term ;^ commit; - +*/ -- Add/change/delete non-system records in RDB$TYPES create role role_for_use_granted_by_clause set system privileges to USE_GRANTED_BY_CLAUSE; commit; @@ -91,15 +84,15 @@ test_script_1 = """ commit; set bail on; - connect '$(DSN)' user sysdba password 'masterkey'; - drop user u01; - drop user u02; - commit; + -- connect '$(DSN)' user sysdba password 'masterkey'; + -- drop user u01; + -- drop user u02; + -- commit; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ WHO_AM_I U01 RDB$ROLE_NAME RDB$ADMIN RDB$ROLE_IN_USE @@ -118,23 +111,22 @@ expected_stdout_1 = """ RDB$FIELD_NAME RDB$USER_TYPE 8 RDB$OBJECT_TYPE 0 - + WHO_AM_I U02 ID 1 WHO_IS_AUTHOR U01 """ -expected_stderr_1 = """ + +expected_stderr = """ Statement failed, SQLSTATE = 28000 no permission for SELECT access to TABLE TEST_U01 -Effective user is U02 """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stderr == act.clean_expected_stderr and + act.clean_stdout == act.clean_expected_stdout) diff --git a/tests/functional/syspriv/test_use_gstat_utility.py b/tests/functional/syspriv/test_use_gstat_utility.py index 991a932e..df21dea2 100644 --- a/tests/functional/syspriv/test_use_gstat_utility.py +++ b/tests/functional/syspriv/test_use_gstat_utility.py @@ -1,43 +1,42 @@ #coding:utf-8 -# -# id: functional.syspriv.use_gstat_utility -# title: Check ability to obtain database statistics. -# decription: -# We create user and grant system privileges USE_GSTAT_UTILITY and IGNORE_DB_TRIGGERS to him. -# Then we check that this user can extract DB statistics in TWO ways: -# 1) common data except encryption info (it is called here 'base "sts_" output') -# 2) only encryption info (I don't know why "sts_encryption" can not be used together with other switches...) -# Both these actions should not produce any error. -# Also, logs of them should contain all needed 'check words' and patterns - and we check this. -# Finally, we ensure that when user U01 gathered DB statistics then db-level trigger did NOT fire. -# -# Checked on 4.0.0.267. -# 31.10.2019: added check for generator pages in encryption block. -# Checked on: -# 4.0.0.1635 SS: 2.660s. -# 4.0.0.1633 CS: 3.164s. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: syspriv.use-gstat-utility +TITLE: Check ability to obtain database statistics +DESCRIPTION: + We create user and grant system privileges USE_GSTAT_UTILITY and IGNORE_DB_TRIGGERS to him. + Then we check that this user can extract DB statistics in TWO ways: + 1) common data except encryption info (it is called here 'base "sts_" output') + 2) only encryption info (I don't know why "sts_encryption" can not be used together with other switches...) + Both these actions should not produce any error. + Also, logs of them should contain all needed 'check words' and patterns - and we check this. + Finally, we ensure that when user U01 gathered DB statistics then db-level trigger did NOT fire. +NOTES: +[31.10.2019] added check for generator pages in encryption block. +FBTEST: functional.syspriv.use_gstat_utility +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +substitutions = [('[ \t]+', ' '), + ('.* data pages: total [\\d]+[,]{0,1} encrypted [\\d]+[,]{0,1} non-crypted [\\d]+', + 'data pages total encrypted non-crypted'), + ('.* index pages: total [\\d]+[,]{0,1} encrypted [\\d]+[,]{0,1} non-crypted [\\d]+', + 'index pages total encrypted non-crypted'), + ('.* blob pages: total [\\d]+[,]{0,1} encrypted [\\d]+[,]{0,1} non-crypted [\\d]+', + 'blob pages total encrypted non-crypted'), + ('.* generator pages: total [\\d]+[,]{0,1} encrypted [\\d]+[,]{0,1} non-crypted [\\d]+', + 'generator pages total encrypted non-crypted')] -substitutions_1 = [('[ \t]+', ' '), ('.* data pages: total [\\d]+[,]{0,1} encrypted [\\d]+[,]{0,1} non-crypted [\\d]+', 'data pages total encrypted non-crypted'), ('.* index pages: total [\\d]+[,]{0,1} encrypted [\\d]+[,]{0,1} non-crypted [\\d]+', 'index pages total encrypted non-crypted'), ('.* blob pages: total [\\d]+[,]{0,1} encrypted [\\d]+[,]{0,1} non-crypted [\\d]+', 'blob pages total encrypted non-crypted'), ('.* generator pages: total [\\d]+[,]{0,1} encrypted [\\d]+[,]{0,1} non-crypted [\\d]+', 'generator pages total encrypted non-crypted')] - -init_script_1 = """ +init_script = """ set wng off; set bail on; set list on; set count on; create or alter view v_check as - select + select mon$database_name ,current_user as who_ami ,r.rdb$role_name @@ -71,7 +70,7 @@ init_script_1 = """ grant select on v_check to public; grant select on att_log to public; - --------------------------------- [ !! ] -- do NOT: grant select on test to u01; -- [ !! ] + --------------------------------- [ !! ] -- do NOT: grant select on test to u01; -- [ !! ] commit; set term ^; @@ -100,184 +99,18 @@ init_script_1 = """ -- Ability to get database statistics. -- NB: 'IGNORE_DB_TRIGGERS' - required for get full db statistics, otherwise: -- Unable to perform operation: system privilege IGNORE_DB_TRIGGERS is missing - create role role_for_use_gstat_utility + create role role_for_use_gstat_utility set system privileges to USE_GSTAT_UTILITY, IGNORE_DB_TRIGGERS; commit; grant default role_for_use_gstat_utility to user u01; commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# -# import os -# import subprocess -# import re -# -# db_file=db_conn.database_name -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# f_db_stat_log=open( os.path.join(context['temp_directory'],'tmp_dbstat.log'), 'w') -# f_db_stat_err=open( os.path.join(context['temp_directory'],'tmp_dbstat.err'), 'w') -# subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr", -# "user","U01","password","123", -# "action_db_stats", -# "dbname", db_file, -# "sts_record_versions", -# "sts_data_pages", -# "sts_idx_pages", -# "sts_sys_relations" -# ], -# stdout=f_db_stat_log, -# stderr=f_db_stat_err -# ) -# -# flush_and_close( f_db_stat_log ) -# flush_and_close( f_db_stat_err ) -# -# # Separate call for get encryption statistics: -# -# f_db_encr_log=open( os.path.join(context['temp_directory'],'tmp_dbencr.log'), 'w') -# f_db_encr_err=open( os.path.join(context['temp_directory'],'tmp_dbencr.err'), 'w') -# -# subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr", -# "user","U01","password","123", -# "action_db_stats", -# "dbname", db_file, -# "sts_encryption" -# ], -# stdout=f_db_encr_log, -# stderr=f_db_encr_err -# ) -# -# flush_and_close( f_db_encr_log ) -# flush_and_close( f_db_encr_err ) -# -# #----------------------- -# -# -# # Check content of logs: -# ####### -# -# # Must be EMPTY: -# with open( f_db_stat_err.name,'r') as f: -# for line in f: -# print('UNEXPECTED GSTAT STDERR: '+line.upper()) -# -# # Pointer pages: 1, data page slots: 2 -# # Data pages: 2, average fill: 8% -# # Primary pages: 1, secondary pages: 1, swept pages: 0 -# # Empty pages: 0, full pages: 0 -# # Blobs: 9, total length: 160, blob pages: 0 -# -# # Must contain: -# check_words=[ -# "rdb$database" -# ,"rdb$index" -# ,"primary pointer page" -# ,"index root page" -# ,"total formats" -# ,"total records" -# ,"total versions" -# ,"total fragments" -# ,"compression ratio" -# ,"pointer pages" -# ,"data pages" -# ,"primary pages" -# ,"empty pages" -# ,"blobs" -# ,"swept pages" -# ,"full pages" -# ,"fill distribution" -# ,"0 - 19%" -# ,"80 - 99%" -# ] -# -# f = open( f_db_stat_log.name, 'r') -# lines = f.read().lower() -# for i in range(len(check_words)): -# if check_words[i].lower() in lines: -# print( 'Found in base "sts_" output: ' + check_words[i].lower() ) -# else: -# print( 'UNEXPECTEDLY NOT found in base "sts_" output: ' + check_words[i].lower() ) -# flush_and_close( f ) -# -# -# # Must be EMPTY: -# with open( f_db_encr_err.name,'r') as f: -# for line in f: -# print('UNEXPECTED STS_ENCRYPTION STDERR: '+line.upper()) -# -# -# # Encryption statistics should be like this: -# # --------------------- -# # Data pages: total NNN, encrypted 0, non-crypted NNN -# # Index pages: total MMM, encrypted 0, non-crypted MMM -# # Blob pages: total 0, encrypted 0, non-crypted 0 -# # Generator pages: total PPP, encrypted 0, non-crypted PPP ------------- 31.10.2019 NB: THIS WAS ADDED RECENLTLY -# -# enc_pattern=re.compile(".*total[\\s]+[\\d]+,[\\s]+encrypted[\\s]+[\\d]+,[\\s]+non-crypted[\\s]+[\\d]+") -# with open( f_db_encr_log.name,'r') as f: -# for line in f: -# # if enc_pattern.match(line): -# if 'encrypted' in line: -# print('Found in "sts_encryption" output: ' + line.lower()) -# -# -# # Cleanup: -# ########## -# -# sql_final=''' -# set list on; -# set count on; -# select * from att_log; -- this should output: "Records affected: 0" because U01 must ignore DB-level trigger -# commit; -# drop user u01; -# commit; -# ''' -# runProgram('isql',[dsn,'-user',user_name, '-pas', user_password], sql_final) -# -# cleanup( (f_db_stat_log, f_db_stat_err, f_db_encr_log, f_db_encr_err) ) -# -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) +act = python_act('db') -expected_stdout_1 = """ +expected_stdout = """ Found in base "sts_" output: rdb$database Found in base "sts_" output: rdb$index Found in base "sts_" output: primary pointer page @@ -304,9 +137,174 @@ expected_stdout_1 = """ Records affected: 0 """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# test_script_1 +#--- +# +# import os +# import subprocess +# import re +# +# db_file=db_conn.database_name +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# f_db_stat_log=open( os.path.join(context['temp_directory'],'tmp_dbstat.log'), 'w') +# f_db_stat_err=open( os.path.join(context['temp_directory'],'tmp_dbstat.err'), 'w') +# subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr", +# "user","U01","password","123", +# "action_db_stats", +# "dbname", db_file, +# "sts_record_versions", +# "sts_data_pages", +# "sts_idx_pages", +# "sts_sys_relations" +# ], +# stdout=f_db_stat_log, +# stderr=f_db_stat_err +# ) +# +# flush_and_close( f_db_stat_log ) +# flush_and_close( f_db_stat_err ) +# +# # Separate call for get encryption statistics: +# +# f_db_encr_log=open( os.path.join(context['temp_directory'],'tmp_dbencr.log'), 'w') +# f_db_encr_err=open( os.path.join(context['temp_directory'],'tmp_dbencr.err'), 'w') +# +# subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr", +# "user","U01","password","123", +# "action_db_stats", +# "dbname", db_file, +# "sts_encryption" +# ], +# stdout=f_db_encr_log, +# stderr=f_db_encr_err +# ) +# +# flush_and_close( f_db_encr_log ) +# flush_and_close( f_db_encr_err ) +# +# #----------------------- +# +# +# # Check content of logs: +# ####### +# +# # Must be EMPTY: +# with open( f_db_stat_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED GSTAT STDERR: '+line.upper()) +# +# # Pointer pages: 1, data page slots: 2 +# # Data pages: 2, average fill: 8% +# # Primary pages: 1, secondary pages: 1, swept pages: 0 +# # Empty pages: 0, full pages: 0 +# # Blobs: 9, total length: 160, blob pages: 0 +# +# # Must contain: +# check_words=[ +# "rdb$database" +# ,"rdb$index" +# ,"primary pointer page" +# ,"index root page" +# ,"total formats" +# ,"total records" +# ,"total versions" +# ,"total fragments" +# ,"compression ratio" +# ,"pointer pages" +# ,"data pages" +# ,"primary pages" +# ,"empty pages" +# ,"blobs" +# ,"swept pages" +# ,"full pages" +# ,"fill distribution" +# ,"0 - 19%" +# ,"80 - 99%" +# ] +# +# f = open( f_db_stat_log.name, 'r') +# lines = f.read().lower() +# for i in range(len(check_words)): +# if check_words[i].lower() in lines: +# print( 'Found in base "sts_" output: ' + check_words[i].lower() ) +# else: +# print( 'UNEXPECTEDLY NOT found in base "sts_" output: ' + check_words[i].lower() ) +# flush_and_close( f ) +# +# +# # Must be EMPTY: +# with open( f_db_encr_err.name,'r') as f: +# for line in f: +# print('UNEXPECTED STS_ENCRYPTION STDERR: '+line.upper()) +# +# +# # Encryption statistics should be like this: +# # --------------------- +# # Data pages: total NNN, encrypted 0, non-crypted NNN +# # Index pages: total MMM, encrypted 0, non-crypted MMM +# # Blob pages: total 0, encrypted 0, non-crypted 0 +# # Generator pages: total PPP, encrypted 0, non-crypted PPP ------------- 31.10.2019 NB: THIS WAS ADDED RECENLTLY +# +# enc_pattern=re.compile(".*total[\\s]+[\\d]+,[\\s]+encrypted[\\s]+[\\d]+,[\\s]+non-crypted[\\s]+[\\d]+") +# with open( f_db_encr_log.name,'r') as f: +# for line in f: +# # if enc_pattern.match(line): +# if 'encrypted' in line: +# print('Found in "sts_encryption" output: ' + line.lower()) +# +# +# # Cleanup: +# ########## +# +# sql_final=''' +# set list on; +# set count on; +# select * from att_log; -- this should output: "Records affected: 0" because U01 must ignore DB-level trigger +# commit; +# drop user u01; +# commit; +# ''' +# runProgram('isql',[dsn,'-user',user_name, '-pas', user_password], sql_final) +# +# cleanup( (f_db_stat_log, f_db_stat_err, f_db_encr_log, f_db_encr_err) ) +# +#--- diff --git a/tests/functional/syspriv/test_use_nbackup_utility.py b/tests/functional/syspriv/test_use_nbackup_utility.py index 2d5b0f7f..bfdcc0a7 100644 --- a/tests/functional/syspriv/test_use_nbackup_utility.py +++ b/tests/functional/syspriv/test_use_nbackup_utility.py @@ -1,36 +1,28 @@ #coding:utf-8 -# -# id: functional.syspriv.use_nbackup_utility -# title: Check ability to use nbackup. -# decription: -# Verify ability to issue ALTER DATABASE BEGIN/END BACKUP command by non-sysdba user. -# Checked on 4.0.0.262. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: syspriv.use-nbackup-utility +TITLE: Check ability to use nbackup +DESCRIPTION: + Verify ability to issue ALTER DATABASE BEGIN/END BACKUP command by non-sysdba user. +FBTEST: functional.syspriv.use_nbackup_utility +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() +test_user = user_factory('db', name='u01', do_not_create=True) +test_role = role_factory('db', name='role_for_use_nbackup_utility', do_not_create=True) -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set wng off; set bail on; set list on; set count on; create or alter view v_check as - select + select current_user as who_ami ,r.rdb$role_name ,rdb$role_in_use(r.rdb$role_name) as RDB_ROLE_IN_USE @@ -44,7 +36,7 @@ test_script_1 = """ create or alter user u01 password '123' revoke admin role; revoke all on all from u01; commit; - +/* set term ^; execute block as begin @@ -53,7 +45,7 @@ test_script_1 = """ end^ set term ;^ commit; - +*/ -- Use nbackup to create database's copies create role role_for_use_nbackup_utility set system privileges to USE_NBACKUP_UTILITY; commit; @@ -63,7 +55,7 @@ test_script_1 = """ -- Statement failed, SQLSTATE = 28000 -- unsuccessful metadata update -- -ALTER DATABASE failed - -- -no permission for ALTER access to DATABASE + -- -no permission for ALTER access to DATABASE grant default role_for_use_nbackup_utility to user u01; commit; @@ -84,22 +76,22 @@ test_script_1 = """ select mon$backup_state from mon$database; commit; - connect '$(DSN)' user sysdba password 'masterkey'; - drop user u01; - drop role role_for_use_nbackup_utility; - commit; + -- connect '$(DSN)' user sysdba password 'masterkey'; + -- drop user u01; + -- drop role role_for_use_nbackup_utility; + -- commit; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ WHO_AMI U01 - RDB$ROLE_NAME RDB$ADMIN + RDB$ROLE_NAME RDB$ADMIN RDB_ROLE_IN_USE RDB$SYSTEM_PRIVILEGES FFFFFFFFFFFFFFFF WHO_AMI U01 - RDB$ROLE_NAME ROLE_FOR_USE_NBACKUP_UTILITY + RDB$ROLE_NAME ROLE_FOR_USE_NBACKUP_UTILITY RDB_ROLE_IN_USE RDB$SYSTEM_PRIVILEGES 1000000000000000 @@ -123,8 +115,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action, test_user, test_role): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/table/alter/test_01.py b/tests/functional/table/alter/test_01.py index 18fb6749..282c9872 100644 --- a/tests/functional/table/alter/test_01.py +++ b/tests/functional/table/alter/test_01.py @@ -1,41 +1,30 @@ #coding:utf-8 -# -# id: functional.table.alter.01 -# title: ALTER TABLE - ADD column -# decription: ALTER TABLE - ADD column -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.table.alter.alter_table_01 + +""" +ID: table.alter-01 +TITLE: ALTER TABLE - ADD column +DESCRIPTION: +FBTEST: functional.table.alter.01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE test( id INTEGER); +init_script = """CREATE TABLE test( id INTEGER); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ALTER TABLE test ADD text varchar(32); +test_script = """ALTER TABLE test ADD text varchar(32); SHOW TABLE test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ID INTEGER Nullable +expected_stdout = """ID INTEGER Nullable TEXT VARCHAR(32) Nullable""" -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/table/alter/test_02.py b/tests/functional/table/alter/test_02.py index eaff2e46..db63de82 100644 --- a/tests/functional/table/alter/test_02.py +++ b/tests/functional/table/alter/test_02.py @@ -1,43 +1,32 @@ #coding:utf-8 -# -# id: functional.table.alter.02 -# title: ALTER TABLE - ADD column (test2) -# decription: ALTER TABLE - ADD column (test2) -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.table.alter.alter_table_02 + +""" +ID: table.alter-02 +TITLE: ALTER TABLE - ADD column (test2) +DESCRIPTION: +FBTEST: functional.table.alter.02 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE test( id INTEGER); +init_script = """CREATE TABLE test( id INTEGER); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ALTER TABLE test ADD text varchar(32) DEFAULT CURRENT_ROLE NOT NULL CONSTRAINT pk PRIMARY KEY; +test_script = """ALTER TABLE test ADD text varchar(32) DEFAULT CURRENT_ROLE NOT NULL CONSTRAINT pk PRIMARY KEY; SHOW TABLE test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ID INTEGER Nullable +expected_stdout = """ID INTEGER Nullable TEXT VARCHAR(32) Not Null DEFAULT CURRENT_ROLE CONSTRAINT PK: Primary key (TEXT)""" -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/table/alter/test_03.py b/tests/functional/table/alter/test_03.py index 2eba6d99..c72e6921 100644 --- a/tests/functional/table/alter/test_03.py +++ b/tests/functional/table/alter/test_03.py @@ -1,42 +1,31 @@ #coding:utf-8 -# -# id: functional.table.alter.03 -# title: ALTER TABLE - ADD CONSTRAINT - PRIMARY KEY -# decription: ALTER TABLE - ADD CONSTRAINT - PRIMARY KEY -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.table.alter.alter_table_03 + +""" +ID: table.alter-03 +TITLE: ALTER TABLE - ADD CONSTRAINT - PRIMARY KEY +DESCRIPTION: +FBTEST: functional.table.alter.03 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE test( id INTEGER NOT NULL); +init_script = """CREATE TABLE test( id INTEGER NOT NULL); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ALTER TABLE test ADD CONSTRAINT pk PRIMARY KEY(id); +test_script = """ALTER TABLE test ADD CONSTRAINT pk PRIMARY KEY(id); SHOW TABLE test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ID INTEGER Not Null +expected_stdout = """ID INTEGER Not Null CONSTRAINT PK: Primary key (ID)""" -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/table/alter/test_04.py b/tests/functional/table/alter/test_04.py index 4e8ea386..1a4a9d2b 100644 --- a/tests/functional/table/alter/test_04.py +++ b/tests/functional/table/alter/test_04.py @@ -1,42 +1,31 @@ #coding:utf-8 -# -# id: functional.table.alter.04 -# title: ALTER TABLE - ADD CONSTRAINT - UNIQUE -# decription: ALTER TABLE - ADD CONSTRAINT - UNIQUE -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.table.alter.alter_table_04 + +""" +ID: table.alter-04 +TITLE: ALTER TABLE - ADD CONSTRAINT - UNIQUE +DESCRIPTION: +FBTEST: functional.table.alter.04 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE test( id INTEGER NOT NULL); +init_script = """CREATE TABLE test( id INTEGER NOT NULL); commit;""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ALTER TABLE test ADD CONSTRAINT unq UNIQUE(id); +test_script = """ALTER TABLE test ADD CONSTRAINT unq UNIQUE(id); SHOW TABLE test;""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ID INTEGER Not Null +expected_stdout = """ID INTEGER Not Null CONSTRAINT UNQ: Unique key (ID)""" -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/table/alter/test_05.py b/tests/functional/table/alter/test_05.py index a90cd399..87a2bbfa 100644 --- a/tests/functional/table/alter/test_05.py +++ b/tests/functional/table/alter/test_05.py @@ -1,40 +1,32 @@ #coding:utf-8 -# -# id: functional.table.alter.05 -# title: ALTER TABLE - ALTER - TO -# decription: ALTER TABLE - ALTER - TO -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.table.alter.alter_table_05 + +""" +ID: table.alter-05 +TITLE: ALTER TABLE - ALTER - TO +DESCRIPTION: +FBTEST: functional.table.alter.05 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +init_script = """CREATE TABLE test( id INTEGER NOT NULL); +commit; +""" -substitutions_1 = [] +db = db_factory(init=init_script) -init_script_1 = """CREATE TABLE test( id INTEGER NOT NULL); -commit;""" +test_script = """ALTER TABLE test ALTER id TO new_col_name; +SHOW TABLE test; +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +act = isql_act('db', test_script) -test_script_1 = """ALTER TABLE test ALTER id TO new_col_name; -SHOW TABLE test;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """NEW_COL_NAME INTEGER Not Null""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """NEW_COL_NAME INTEGER Not Null +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/table/alter/test_06.py b/tests/functional/table/alter/test_06.py index 2c22119d..8cae2429 100644 --- a/tests/functional/table/alter/test_06.py +++ b/tests/functional/table/alter/test_06.py @@ -1,40 +1,32 @@ #coding:utf-8 -# -# id: functional.table.alter.06 -# title: ALTER TABLE - ALTER - TYPE -# decription: ALTER TABLE - ALTER - TYPE -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.table.alter.alter_table_06 + +""" +ID: table.alter-06 +TITLE: ALTER TABLE - ALTER - TYPE +DESCRIPTION: +FBTEST: functional.table.alter.06 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +init_script = """CREATE TABLE test( id INTEGER NOT NULL); +commit; +""" -substitutions_1 = [] +db = db_factory(init=init_script) -init_script_1 = """CREATE TABLE test( id INTEGER NOT NULL); -commit;""" +test_script = """ALTER TABLE test ALTER id TYPE VARCHAR(32); +SHOW TABLE test; +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +act = isql_act('db', test_script) -test_script_1 = """ALTER TABLE test ALTER id TYPE VARCHAR(32); -SHOW TABLE test;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ID VARCHAR(32) Not Null""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """ID VARCHAR(32) Not Null +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/table/alter/test_07.py b/tests/functional/table/alter/test_07.py index 9dd9ad60..5d7c7a02 100644 --- a/tests/functional/table/alter/test_07.py +++ b/tests/functional/table/alter/test_07.py @@ -1,42 +1,34 @@ #coding:utf-8 -# -# id: functional.table.alter.07 -# title: ALTER TABLE - ALTER - POSITION -# decription: ALTER TABLE - ALTER - POSITION -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.table.alter.alter_table_07 + +""" +ID: table.alter-07 +TITLE: ALTER TABLE - ALTER - POSITION +DESCRIPTION: +FBTEST: functional.table.alter.07 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE test( id INTEGER NOT NULL, +init_script = """CREATE TABLE test( id INTEGER NOT NULL, text VARCHAR(32)); -commit;""" +commit; +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ALTER TABLE test ALTER text POSITION 1; -SHOW TABLE test;""" +test_script = """ALTER TABLE test ALTER text POSITION 1; +SHOW TABLE test; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEXT VARCHAR(32) Nullable -ID INTEGER Not Null""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """TEXT VARCHAR(32) Nullable +ID INTEGER Not Null +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/table/alter/test_08.py b/tests/functional/table/alter/test_08.py index 7bec6340..4231d127 100644 --- a/tests/functional/table/alter/test_08.py +++ b/tests/functional/table/alter/test_08.py @@ -1,42 +1,33 @@ #coding:utf-8 -# -# id: functional.table.alter.08 -# title: ALTER TABLE - DROP -# decription: ALTER TABLE - DROP -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# SHOW TABLE -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.table.alter.alter_table_08 + +""" +ID: table.alter-08 +TITLE: ALTER TABLE - DROP +DESCRIPTION: +FBTEST: functional.table.alter.08 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE test( id INTEGER NOT NULL, +init_script = """CREATE TABLE test( id INTEGER NOT NULL, text VARCHAR(32)); -commit;""" +commit; +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ALTER TABLE test DROP text; -SHOW TABLE test;""" +test_script = """ALTER TABLE test DROP text; +SHOW TABLE test; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ID INTEGER Not Null""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """ID INTEGER Not Null +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/table/alter/test_09.py b/tests/functional/table/alter/test_09.py index 148c2ce1..bb5cda75 100644 --- a/tests/functional/table/alter/test_09.py +++ b/tests/functional/table/alter/test_09.py @@ -1,48 +1,38 @@ #coding:utf-8 -# -# id: functional.table.alter.09 -# title: ALTER TABLE - DROP (with data) -# decription: ALTER TABLE - DROP (with data) -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# INSERT -# BASIC SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.table.alter.alter_table_09 + +""" +ID: table.alter-09 +TITLE: ALTER TABLE - DROP (with data) +DESCRIPTION: +FBTEST: functional.table.alter.09 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE test( id INTEGER NOT NULL, +init_script = """CREATE TABLE test( id INTEGER NOT NULL, text VARCHAR(32)); commit; INSERT INTO test(id,text) VALUES(0,'text 1'); -COMMIT;""" +COMMIT; +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ALTER TABLE test DROP text; -SELECT * FROM test;""" +test_script = """ALTER TABLE test DROP text; +SELECT * FROM test; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ ID +expected_stdout = """ ID ============ -0""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +0 +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/table/alter/test_10.py b/tests/functional/table/alter/test_10.py index ab30d552..556d587c 100644 --- a/tests/functional/table/alter/test_10.py +++ b/tests/functional/table/alter/test_10.py @@ -1,43 +1,34 @@ #coding:utf-8 -# -# id: functional.table.alter.10 -# title: ALTER TABLE - DROP CONSTRAINT - PRIMARY KEY -# decription: ALTER TABLE - DROP CONSTRAINT - PRIMARY KEY -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# SHOW TABLE -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.table.alter.alter_table_10 + +""" +ID: table.alter-10 +TITLE: ALTER TABLE - DROP CONSTRAINT - PRIMARY KEY +DESCRIPTION: +FBTEST: functional.table.alter.10 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT pk PRIMARY KEY, +init_script = """CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT pk PRIMARY KEY, text VARCHAR(32)); -commit;""" +commit; +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ALTER TABLE test DROP CONSTRAINT pk; -SHOW TABLE test;""" +test_script = """ALTER TABLE test DROP CONSTRAINT pk; +SHOW TABLE test; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ID INTEGER Not Null -TEXT VARCHAR(32) Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """ID INTEGER Not Null +TEXT VARCHAR(32) Nullable +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/table/alter/test_11.py b/tests/functional/table/alter/test_11.py index 98bf6a03..ab85133a 100644 --- a/tests/functional/table/alter/test_11.py +++ b/tests/functional/table/alter/test_11.py @@ -1,43 +1,34 @@ #coding:utf-8 -# -# id: functional.table.alter.11 -# title: ALTER TABLE - DROP CONSTRAINT - UNIQUE -# decription: ALTER TABLE - DROP CONSTRAINT - UNIQUE -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# SHOW TABLE -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.table.alter.alter_table_11 + +""" +ID: table.alter-11 +TITLE: ALTER TABLE - DROP CONSTRAINT - UNIQUE +DESCRIPTION: +FBTEST: functional.table.alter.11 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT unq UNIQUE, +init_script = """CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT unq UNIQUE, text VARCHAR(32)); -commit;""" +commit; +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ALTER TABLE test DROP CONSTRAINT unq; -SHOW TABLE test;""" +test_script = """ALTER TABLE test DROP CONSTRAINT unq; +SHOW TABLE test; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ID INTEGER Not Null -TEXT VARCHAR(32) Nullable""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +expected_stdout = """ID INTEGER Not Null +TEXT VARCHAR(32) Nullable +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/table/alter/test_12.py b/tests/functional/table/alter/test_12.py index 24b0fd55..57cb6b06 100644 --- a/tests/functional/table/alter/test_12.py +++ b/tests/functional/table/alter/test_12.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.table.alter.12 -# title: Verify ability to create exactly 254 changes of format (increasing it by 1) after initial creating table -# decription: -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: + +""" +ID: table.alter-12 +TITLE: Verify ability to create exactly 254 changes of format (increasing it by 1) after initial creating table +DESCRIPTION: +FBTEST: functional.table.alter.12 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ recreate table test1(f0 int); -- this also create "format #1" -- following shoudl run OK because of 254 changes: alter table test1 add f1 int; @@ -541,9 +533,9 @@ test_script_1 = """ commit; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ F0 INTEGER Nullable F1 INTEGER Nullable F2 INTEGER Nullable @@ -800,19 +792,18 @@ expected_stdout_1 = """ F253 INTEGER Nullable F254 INTEGER Nullable """ -expected_stderr_1 = """ + +expected_stderr = """ Statement failed, SQLSTATE = 54000 unsuccessful metadata update -TABLE TEST2 -too many versions """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stdout == act.clean_expected_stdout and + act.clean_stderr == act.clean_expected_stderr) diff --git a/tests/functional/table/create/test_01.py b/tests/functional/table/create/test_01.py index 06f89bc9..edcebd3f 100644 --- a/tests/functional/table/create/test_01.py +++ b/tests/functional/table/create/test_01.py @@ -1,29 +1,18 @@ #coding:utf-8 -# -# id: functional.table.create.01 -# title: CREATE TABLE - types -# decription: CREATE TABLE - types -# -# Dependencies: -# CREATE DATABASE -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.table.create.create_table_01 + +""" +ID: table.create-01 +TITLE: CREATE TABLE - types +DESCRIPTION: +FBTEST: functional.table.create.01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE TABLE test( +test_script = """CREATE TABLE test( c1 SMALLINT, c2 INTEGER, c3 FLOAT, @@ -48,11 +37,12 @@ test_script_1 = """CREATE TABLE test( c22 BLOB SEGMENT SIZE 512, c23 BLOB (1024,1) ); -SHOW TABLE test;""" +SHOW TABLE test; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """C1 SMALLINT Nullable +expected_stdout = """C1 SMALLINT Nullable C2 INTEGER Nullable C3 FLOAT Nullable C4 DOUBLE PRECISION Nullable @@ -74,11 +64,11 @@ C19 VARCHAR(16000) CHARACTER SET ISO8859_1 Nullable C20 BLOB segment 80, subtype BINARY Nullable C21 BLOB segment 80, subtype TEXT Nullable C22 BLOB segment 512, subtype BINARY Nullable -C23 BLOB segment 1024, subtype TEXT Nullable""" - -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +C23 BLOB segment 1024, subtype TEXT Nullable +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/table/create/test_02.py b/tests/functional/table/create/test_02.py index f3ebb743..768b5a8e 100644 --- a/tests/functional/table/create/test_02.py +++ b/tests/functional/table/create/test_02.py @@ -1,31 +1,22 @@ #coding:utf-8 -# -# id: functional.table.create.02 -# title: CREATE TABLE - column properties -# decription: CREATE TABLE - column properties -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.table.create.create_table_02 + +""" +ID: table.create-02 +TITLE: CREATE TABLE - column properties +DESCRIPTION: +FBTEST: functional.table.create.02 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +init_script = """CREATE TABLE fk(id INT NOT NULL PRIMARY KEY); +commit; +""" -substitutions_1 = [] +db = db_factory(init=init_script) -init_script_1 = """CREATE TABLE fk(id INT NOT NULL PRIMARY KEY); -commit;""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE TABLE test( +test_script = """CREATE TABLE test( c1 SMALLINT NOT NULL, c2 INTEGER DEFAULT 0, c3 FLOAT NOT NULL UNIQUE, @@ -36,11 +27,12 @@ test_script_1 = """CREATE TABLE test( c8 CHAR(31) DEFAULT USER, c9 VARCHAR(40) DEFAULT 'data' ); -SHOW TABLE test;""" +SHOW TABLE test; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """C1 SMALLINT Not Null +expected_stdout = """C1 SMALLINT Not Null C2 INTEGER Nullable DEFAULT 0 C3 FLOAT Not Null C4 DOUBLE PRECISION Not Null @@ -56,11 +48,11 @@ CONSTRAINT INTEG_7: CONSTRAINT INTEG_5: Unique key (C3) CONSTRAINT INTEG_9: -CHECK (c6>c5)""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +CHECK (c6>c5) +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/table/create/test_03.py b/tests/functional/table/create/test_03.py index 0c05c10b..83485ad7 100644 --- a/tests/functional/table/create/test_03.py +++ b/tests/functional/table/create/test_03.py @@ -1,50 +1,42 @@ #coding:utf-8 -# -# id: functional.table.create.03 -# title: CREATE TABLE - charset + colations + domain -# decription: CREATE TABLE - charset + colations + domain -# -# Dependencies: -# CREATE DATABASE -# CREATE DOMAIN -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.table.create.create_table_03 + +""" +ID: table.create-03 +TITLE: CREATE TABLE - charset + colations + domain +DESCRIPTION: +FBTEST: functional.table.create.03 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None +init_script = """CREATE DOMAIN test VARCHAR(32765)[40000]; +commit; +""" -substitutions_1 = [] +db = db_factory(init=init_script) -init_script_1 = """CREATE DOMAIN test VARCHAR(32765)[40000]; -commit;""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE TABLE test( +test_script = """CREATE TABLE test( c1 VARCHAR(40) CHARACTER SET CYRL COLLATE CYRL, c2 VARCHAR(40) CHARACTER SET DOS437 COLLATE DB_DEU437, c3 BLOB SUB_TYPE TEXT CHARACTER SET DOS437, c4 test ); -SHOW TABLE test;""" +SHOW TABLE test; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """C1 VARCHAR(40) CHARACTER SET CYRL Nullable +expected_stdout = """C1 VARCHAR(40) CHARACTER SET CYRL Nullable C2 VARCHAR(40) CHARACTER SET DOS437 Nullable COLLATE DB_DEU437 C3 BLOB segment 80, subtype TEXT CHARACTER SET DOS437 Nullable C4 (TEST) ARRAY OF [40000] -VARCHAR(32765) Nullable""" - -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +VARCHAR(32765) Nullable +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/table/create/test_04.py b/tests/functional/table/create/test_04.py index 599e9b08..89a18f8b 100644 --- a/tests/functional/table/create/test_04.py +++ b/tests/functional/table/create/test_04.py @@ -1,31 +1,22 @@ #coding:utf-8 -# -# id: functional.table.create.04 -# title: CREATE TABLE - constraints -# decription: CREATE TABLE - constraints -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.table.create.create_table_04 + +""" +ID: table.create-04 +TITLE: CREATE TABLE - constraints +DESCRIPTION: +FBTEST: functional.table.create.04 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None +init_script = """CREATE TABLE fk(id INT NOT NULL PRIMARY KEY); +commit; +""" -substitutions_1 = [] +db = db_factory(init=init_script) -init_script_1 = """CREATE TABLE fk(id INT NOT NULL PRIMARY KEY); -commit;""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE TABLE test( +test_script = """CREATE TABLE test( c1 SMALLINT NOT NULL, c2 SMALLINT NOT NULL, c3 SMALLINT NOT NULL, @@ -37,11 +28,12 @@ test_script_1 = """CREATE TABLE test( CONSTRAINT test2 FOREIGN KEY (c3) REFERENCES fk(id) ON DELETE SET NULL, CONSTRAINT test3 CHECK (NOT c3>c1) ); -SHOW TABLE test;""" +SHOW TABLE test; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """C1 SMALLINT Not Null +expected_stdout = """C1 SMALLINT Not Null C2 SMALLINT Not Null C3 SMALLINT Not Null CONSTRAINT INTEG_8: @@ -57,11 +49,11 @@ CONSTRAINT TEST: CONSTRAINT INTEG_9: CHECK (c2>c1) CONSTRAINT TEST3: -CHECK (NOT c3>c1)""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +CHECK (NOT c3>c1) +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/table/create/test_05.py b/tests/functional/table/create/test_05.py index d99694c1..7d90309c 100644 --- a/tests/functional/table/create/test_05.py +++ b/tests/functional/table/create/test_05.py @@ -1,48 +1,39 @@ #coding:utf-8 -# -# id: functional.table.create.05 -# title: CREATE TABLE - create table with same name -# decription: CREATE TABLE - create table with same name -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.table.create.create_table_05 + +""" +ID: table.create-05 +TITLE: CREATE TABLE - create table with same name +DESCRIPTION: +FBTEST: functional.table.create.05 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE test( +init_script = """CREATE TABLE test( c1 SMALLINT ); -commit;""" +commit; +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """CREATE TABLE test( +test_script = """CREATE TABLE test( c1 SMALLINT, c2 INTEGER -);""" +); +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stderr_1 = """Statement failed, SQLSTATE = 42S01 +expected_stderr = """Statement failed, SQLSTATE = 42S01 unsuccessful metadata update -CREATE TABLE TEST failed -Table TEST already exists """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/table/create/test_06.py b/tests/functional/table/create/test_06.py index e805707b..7249643e 100644 --- a/tests/functional/table/create/test_06.py +++ b/tests/functional/table/create/test_06.py @@ -1,45 +1,34 @@ #coding:utf-8 -# -# id: functional.table.create.06 -# title: CREATE TABLE - two column with same name -# decription: CREATE TABLE - two column with same name -# -# Dependencies: -# CREATE DATABASE -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.table.create.create_table_06 + +""" +ID: table.create-06 +TITLE: CREATE TABLE - two column with same name +DESCRIPTION: +FBTEST: functional.table.create.06 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE TABLE test( +test_script = """CREATE TABLE test( c1 SMALLINT, c1 INTEGER ); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stderr_1 = """Statement failed, SQLSTATE = 23000 +expected_stderr = """Statement failed, SQLSTATE = 23000 unsuccessful metadata update -CREATE TABLE TEST failed -violation of PRIMARY or UNIQUE KEY constraint "RDB$INDEX_15" on table "RDB$RELATION_FIELDS" --Problematic key value is ("RDB$FIELD_NAME" = 'C1', "RDB$RELATION_NAME" = 'TEST')""" +-Problematic key value is ("RDB$FIELD_NAME" = 'C1', "RDB$RELATION_NAME" = 'TEST') +""" @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/table/create/test_07.py b/tests/functional/table/create/test_07.py index 89bd33ae..49af3f41 100644 --- a/tests/functional/table/create/test_07.py +++ b/tests/functional/table/create/test_07.py @@ -1,35 +1,25 @@ #coding:utf-8 -# -# id: functional.table.create.07 -# title: CREATE TABLE - unknown datatype (domain) -# decription: CREATE TABLE - unknown datatype (domain) -# -# Dependencies: -# CREATE DATABASE -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.table.create.create_table_07 + +""" +ID: table.create-07 +TITLE: CREATE TABLE - unknown datatype (domain) +DESCRIPTION: +FBTEST: functional.table.create.07 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE TABLE test( +test_script = """CREATE TABLE test( c1 unk_domain -);""" +); +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stderr_1 = """Statement failed, SQLSTATE = 42000 +expected_stderr = """Statement failed, SQLSTATE = 42000 unsuccessful metadata update -CREATE TABLE TEST failed -SQL error code = -607 @@ -38,8 +28,7 @@ unsuccessful metadata update """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/tabloid/core_3611_aux_test.py b/tests/functional/tabloid/core_3611_aux_test.py index d3df0f89..3cd7be03 100644 --- a/tests/functional/tabloid/core_3611_aux_test.py +++ b/tests/functional/tabloid/core_3611_aux_test.py @@ -1,26 +1,19 @@ #coding:utf-8 -# -# id: functional.tabloid.core_3611_aux -# title: Wrong data while retrieving from CTEs (or derived tables) with same column names -# decription: See another sample in this ticket (by dimitr, 30/Oct/12 07:13 PM) -# tracker_id: CORE-3611 -# min_versions: ['2.5.2'] -# versions: 2.5.2 -# qmid: None + +""" +ID: tabloid.core-3611-aux +TITLE: Wrong data while retrieving from CTEs (or derived tables) with same column names +DESCRIPTION: + See another sample in this ticket (by dimitr, 30/Oct/12 07:13 PM) +FBTEST: functional.tabloid.core_3611_aux +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5.2 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set planonly; with tab as ( @@ -33,18 +26,17 @@ test_script_1 = """ ; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stderr_1 = """ +expected_stderr = """ Statement failed, SQLSTATE = 42000 Dynamic SQL Error -SQL error code = -104 -Invalid expression in the select list (not contained in either an aggregate function or the GROUP BY clause) """ -@pytest.mark.version('>=2.5.2') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/tabloid/test_arithmetic_cast_float_to_int_as_round.py b/tests/functional/tabloid/test_arithmetic_cast_float_to_int_as_round.py index 90c8220b..b8f65a1a 100644 --- a/tests/functional/tabloid/test_arithmetic_cast_float_to_int_as_round.py +++ b/tests/functional/tabloid/test_arithmetic_cast_float_to_int_as_round.py @@ -1,38 +1,30 @@ #coding:utf-8 -# -# id: functional.tabloid.arithmetic_cast_float_to_int_as_round -# title: Result of CAST for numbers is implementation defined -# decription: See also: sql.ru/forum/actualutils.aspx?action=gotomsg&tid=1062610&msg=15214333 -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5.0 -# qmid: None + +""" +ID: tabloid.arithmetic-cast-float-to-int-as-round +TITLE: Result of CAST for numbers is implementation defined +DESCRIPTION: + See also: sql.ru/forum/actualutils.aspx?action=gotomsg&tid=1062610&msg=15214333 +FBTEST: functional.tabloid.arithmetic_cast_float_to_int_as_round +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; select cast( sqrt(24) as smallint) casted_sqrt from rdb$database; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ CASTED_SQRT 5 """ -@pytest.mark.version('>=2.5.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_arithmetic_numexpr_eval_dialect_1.py b/tests/functional/tabloid/test_arithmetic_numexpr_eval_dialect_1.py index ea6758f3..d5946253 100644 --- a/tests/functional/tabloid/test_arithmetic_numexpr_eval_dialect_1.py +++ b/tests/functional/tabloid/test_arithmetic_numexpr_eval_dialect_1.py @@ -1,38 +1,30 @@ #coding:utf-8 -# -# id: functional.tabloid.arithmetic_numexpr_eval_dialect_1 -# title: Check result of integer division on dialect 1. -# decription: Was fixed in 2.1, see: sql.ru/forum/actualutils.aspx?action=gotomsg&tid=708324&msg=7865013 -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: tabloid.arithmetic-numexpr-eval-dialect-1 +TITLE: Check result of integer division on dialect 1. +DESCRIPTION: + Was fixed in 2.1, see: sql.ru/forum/actualutils.aspx?action=gotomsg&tid=708324&msg=7865013 +FBTEST: functional.tabloid.arithmetic_numexpr_eval_dialect_1 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(page_size=4096, sql_dialect=1) -substitutions_1 = [('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(page_size=4096, sql_dialect=1, init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; select 36/-4/3 d from rdb$database; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ D -3.000000000000000 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_arithmetic_numexpr_eval_dialect_3.py b/tests/functional/tabloid/test_arithmetic_numexpr_eval_dialect_3.py index f0306140..461625ec 100644 --- a/tests/functional/tabloid/test_arithmetic_numexpr_eval_dialect_3.py +++ b/tests/functional/tabloid/test_arithmetic_numexpr_eval_dialect_3.py @@ -1,38 +1,30 @@ #coding:utf-8 -# -# id: functional.tabloid.arithmetic_numexpr_eval_dialect_3 -# title: Check result of integer division on dialect 3. -# decription: Was fixed in 2.1, see: sql.ru/forum/actualutils.aspx?action=gotomsg&tid=708324&msg=7865013 -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: tabloid.arithmetic-numexpr-eval-dialect-3 +TITLE: Check result of integer division on dialect 3. +DESCRIPTION: + Was fixed in 2.1, see: sql.ru/forum/actualutils.aspx?action=gotomsg&tid=708324&msg=7865013 +FBTEST: functional.tabloid.arithmetic_numexpr_eval_dialect_3 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(page_size=4096) -substitutions_1 = [('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; select 36/-4/3 d from rdb$database; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ D -3 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_bus_3103_windowed_funcs.py b/tests/functional/tabloid/test_bus_3103_windowed_funcs.py index 725e2cf4..487c6709 100644 --- a/tests/functional/tabloid/test_bus_3103_windowed_funcs.py +++ b/tests/functional/tabloid/test_bus_3103_windowed_funcs.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.tabloid.bus_3103_windowed_funcs -# title: Query for test MAX()OVER(). -# decription: -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.bus-3103-windowed-funcs +TITLE: Query for test MAX()OVER(). +DESCRIPTION: +FBTEST: functional.tabloid.bus_3103_windowed_funcs +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='tabloid-bus-3103.fbk') -substitutions_1 = [('=.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='tabloid-bus-3103.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; with dx as ( @@ -47,16 +39,15 @@ test_script_1 = """ having count(pid) > 0; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ CNM ba CNT 13 """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_comment_in_object_names.py b/tests/functional/tabloid/test_comment_in_object_names.py index a3f6d1be..828650bd 100644 --- a/tests/functional/tabloid/test_comment_in_object_names.py +++ b/tests/functional/tabloid/test_comment_in_object_names.py @@ -1,28 +1,19 @@ #coding:utf-8 -# -# id: functional.tabloid.comment_in_object_names -# title: All DB objects types must allow name COMMENT. Also, COMMENT ON ... must allow occurence of "comment" in it. -# decription: -# Original issue: https://granicus.if.org/pgbugs/15555 -# -# tracker_id: -# min_versions: ['3.0.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.comment-in-object-names +TITLE: All DB objects types must allow name COMMENT. Also, COMMENT ON ... must allow occurence of "comment" in it. +DESCRIPTION: + Original issue: https://granicus.if.org/pgbugs/15555 +FBTEST: functional.tabloid.comment_in_object_names +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ create or alter user comment password 'comment'; create collation comment for utf8 from unicode case insensitive 'NUMERIC-SORT=1'; @@ -112,10 +103,8 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - +act = isql_act('db', test_script) @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.execute() - +def test_1(act: Action): + act.execute() diff --git a/tests/functional/tabloid/test_dbp_0951_multiple_nested_cte.py b/tests/functional/tabloid/test_dbp_0951_multiple_nested_cte.py index 1464d9f1..2124b040 100644 --- a/tests/functional/tabloid/test_dbp_0951_multiple_nested_cte.py +++ b/tests/functional/tabloid/test_dbp_0951_multiple_nested_cte.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.tabloid.dbp_0951_multiple_nested_cte -# title: Query for test multiple CTEs -# decription: -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.dbp-0951-multiple-nested-cte +TITLE: Query for test multiple CTEs +DESCRIPTION: +FBTEST: functional.tabloid.dbp_0951_multiple_nested_cte +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='tabloid-dbp-0951.fbk') -substitutions_1 = [('=.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='tabloid-dbp-0951.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; with dup as(select 1 i from rdb$database union all select 2 from rdb$database) ,mx as @@ -267,9 +259,9 @@ test_script_1 = """ ; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ C 2 D 1 S 4,37 @@ -297,8 +289,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_dbp_1697_windowed_funcs.py b/tests/functional/tabloid/test_dbp_1697_windowed_funcs.py index 83658270..d6722cf9 100644 --- a/tests/functional/tabloid/test_dbp_1697_windowed_funcs.py +++ b/tests/functional/tabloid/test_dbp_1697_windowed_funcs.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.tabloid.dbp_1697_windowed_funcs -# title: Query for test SUM()OVER() and COUNT()OVER(). -# decription: -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.dbp-1697-windowed-funcs +TITLE: Query for test SUM()OVER() and COUNT()OVER(). +DESCRIPTION: +FBTEST: functional.tabloid.dbp_1697_windowed_funcs +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='tabloid-dbp-1697.fbk') -substitutions_1 = [('=.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='tabloid-dbp-1697.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ delete from tresult; -------------------- insert into tresult @@ -99,10 +91,8 @@ test_script_1 = """ select id,count(*) from tresult group by id having count(*)<>2; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - +act = isql_act('db', test_script, substitutions=[('=.*', '')]) @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.execute() - +def test_1(act: Action): + act.execute() diff --git a/tests/functional/tabloid/test_dbp_1940_20040130_1740.py b/tests/functional/tabloid/test_dbp_1940_20040130_1740.py index 95cfb32a..7c7d0209 100644 --- a/tests/functional/tabloid/test_dbp_1940_20040130_1740.py +++ b/tests/functional/tabloid/test_dbp_1940_20040130_1740.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.tabloid.dbp_1940_20040130_1740 -# title: Common SQL. Check correctness of the results -# decription: -# tracker_id: -# min_versions: ['2.5.3'] -# versions: 2.5.3 -# qmid: None + +""" +ID: tabloid.dbp-1940-20040130-1740 +TITLE: Common SQL. Check correctness of the results +DESCRIPTION: +FBTEST: functional.tabloid.dbp_1940_20040130_1740 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5.3 -# resources: None +db = db_factory(from_backup='tabloid-dbp-1940.fbk') -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='tabloid-dbp-1940.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; with recursive a1 as ( @@ -74,9 +66,9 @@ test_script_1 = """ ; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ F01 2003-01-01 01:11:00.0000 F02 1 F01 2003-01-01 01:11:01.0000 @@ -139,9 +131,8 @@ expected_stdout_1 = """ F02 4 """ -@pytest.mark.version('>=2.5.3') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_dbp_1940_20061108_2141.py b/tests/functional/tabloid/test_dbp_1940_20061108_2141.py index 9fb78c8a..354dd2da 100644 --- a/tests/functional/tabloid/test_dbp_1940_20061108_2141.py +++ b/tests/functional/tabloid/test_dbp_1940_20061108_2141.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.tabloid.dbp_1940_20061108_2141 -# title: Common SQL. Check correctness of the results -# decription: -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.dbp-1940-20061108-2141 +TITLE: Common SQL. Check correctness of the results +DESCRIPTION: +FBTEST: functional.tabloid.dbp_1940_20061108_2141 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='tabloid-dbp-1940.fbk') -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='tabloid-dbp-1940.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; select dateadd(n-1 second to dat) f01 @@ -46,9 +38,9 @@ test_script_1 = """ ; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ F01 2003-01-01 01:11:00.0000 F02 1 F01 2003-01-01 01:11:01.0000 @@ -112,8 +104,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_dbp_1940_20080912_1005.py b/tests/functional/tabloid/test_dbp_1940_20080912_1005.py index 74ac8c24..e9db40a4 100644 --- a/tests/functional/tabloid/test_dbp_1940_20080912_1005.py +++ b/tests/functional/tabloid/test_dbp_1940_20080912_1005.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.tabloid.dbp_1940_20080912_1005 -# title: Common SQL. Check correctness of the results -# decription: -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.dbp-1940-20080912-1005 +TITLE: Common SQL. Check correctness of the results +DESCRIPTION: +FBTEST: functional.tabloid.dbp_1940_20080912_1005 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='tabloid-dbp-1940.fbk') -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='tabloid-dbp-1940.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; select x.t as f01 @@ -55,9 +47,9 @@ test_script_1 = """ order by 1,2; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ F01 2003-01-01 01:11:00.0000 F02 1 F01 2003-01-01 01:11:01.0000 @@ -121,8 +113,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_dbp_1940_20090521_1520.py b/tests/functional/tabloid/test_dbp_1940_20090521_1520.py index 93d0260d..90a49ce2 100644 --- a/tests/functional/tabloid/test_dbp_1940_20090521_1520.py +++ b/tests/functional/tabloid/test_dbp_1940_20090521_1520.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.tabloid.dbp_1940_20090521_1520 -# title: Common SQL. Check correctness of the results -# decription: -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.dbp-1940-20090521-1520 +TITLE: Common SQL. Check correctness of the results +DESCRIPTION: +FBTEST: functional.tabloid.dbp_1940_20090521_1520 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='tabloid-dbp-1940.fbk') -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='tabloid-dbp-1940.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; with recursive n as ( @@ -49,9 +41,9 @@ test_script_1 = """ ; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ F01 2003-01-01 01:11:00.0000 F02 1 F01 2003-01-01 01:11:01.0000 @@ -115,8 +107,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_dbp_1940_20110426_1906.py b/tests/functional/tabloid/test_dbp_1940_20110426_1906.py index ecd42dd6..39466a1e 100644 --- a/tests/functional/tabloid/test_dbp_1940_20110426_1906.py +++ b/tests/functional/tabloid/test_dbp_1940_20110426_1906.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.tabloid.dbp_1940_20110426_1906 -# title: Common SQL. Check correctness of the results -# decription: -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.dbp-1940-20110426-1906 +TITLE: Common SQL. Check correctness of the results +DESCRIPTION: +FBTEST: functional.tabloid.dbp_1940_20110426_1906 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='tabloid-dbp-1940.fbk') -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='tabloid-dbp-1940.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; with recursive n as ( @@ -66,9 +58,9 @@ test_script_1 = """ ; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ F01 2003-01-01 01:11:00.0000 F02 1 F01 2003-01-01 01:11:01.0000 @@ -132,8 +124,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_dbp_1940_20110725_1159.py b/tests/functional/tabloid/test_dbp_1940_20110725_1159.py index 405f6a6f..a020a195 100644 --- a/tests/functional/tabloid/test_dbp_1940_20110725_1159.py +++ b/tests/functional/tabloid/test_dbp_1940_20110725_1159.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.tabloid.dbp_1940_20110725_1159 -# title: Common SQL. Check correctness of the results -# decription: -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.dbp-1940-20110725-1159 +TITLE: Common SQL. Check correctness of the results +DESCRIPTION: +FBTEST: functional.tabloid.dbp_1940_20110725_1159 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='tabloid-dbp-1940.fbk') -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='tabloid-dbp-1940.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; with a as ( @@ -73,9 +65,9 @@ test_script_1 = """ ; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ F01 2003-01-01 01:11:00.0000 F02 1 F01 2003-01-01 01:11:01.0000 @@ -139,8 +131,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_dbp_1940_20130412_1008.py b/tests/functional/tabloid/test_dbp_1940_20130412_1008.py index a247b129..a07c09ea 100644 --- a/tests/functional/tabloid/test_dbp_1940_20130412_1008.py +++ b/tests/functional/tabloid/test_dbp_1940_20130412_1008.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.tabloid.dbp_1940_20130412_1008 -# title: Common SQL. Check correctness of the results -# decription: -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.dbp-1940-20130412-1008 +TITLE: Common SQL. Check correctness of the results +DESCRIPTION: +FBTEST: functional.tabloid.dbp_1940_20130412_1008 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='tabloid-dbp-1940.fbk') -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='tabloid-dbp-1940.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; with recursive s as ( @@ -65,9 +57,9 @@ test_script_1 = """ ; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ F01 2003-01-01 01:11:00.0000 F02 1 F01 2003-01-01 01:11:01.0000 @@ -131,8 +123,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_dbp_1940_20130831_2039.py b/tests/functional/tabloid/test_dbp_1940_20130831_2039.py index df8e65bf..929a2ecb 100644 --- a/tests/functional/tabloid/test_dbp_1940_20130831_2039.py +++ b/tests/functional/tabloid/test_dbp_1940_20130831_2039.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.tabloid.dbp_1940_20130831_2039 -# title: Common SQL. Check correctness of the results -# decription: -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.dbp-1940-20130831-2039 +TITLE: Common SQL. Check correctness of the results +DESCRIPTION: +FBTEST: functional.tabloid.dbp_1940_20130831_2039 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='tabloid-dbp-1940.fbk') -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='tabloid-dbp-1940.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; with recursive n as ( @@ -54,9 +46,9 @@ test_script_1 = """ ; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ F01 2003-01-01 01:11:00.0000 F02 1 F01 2003-01-01 01:11:01.0000 @@ -120,8 +112,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_dbp_1940_20130912_1908.py b/tests/functional/tabloid/test_dbp_1940_20130912_1908.py index 668b79f8..c4d02ba2 100644 --- a/tests/functional/tabloid/test_dbp_1940_20130912_1908.py +++ b/tests/functional/tabloid/test_dbp_1940_20130912_1908.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.tabloid.dbp_1940_20130912_1908 -# title: Common SQL. Check correctness of the results -# decription: -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.dbp-1940-20130912-1908 +TITLE: Common SQL. Check correctness of the results +DESCRIPTION: +FBTEST: functional.tabloid.dbp_1940_20130912_1908 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='tabloid-dbp-1940.fbk') -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='tabloid-dbp-1940.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; with bx as( select tm,sum(bv)over(partition by vi order by tm)s from bbb) @@ -51,9 +43,9 @@ test_script_1 = """ order by 1,2; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ F01 2003-01-01 01:11:00.0000 F02 1 F01 2003-01-01 01:11:01.0000 @@ -117,8 +109,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_dbp_2146_distinct_not_in.py b/tests/functional/tabloid/test_dbp_2146_distinct_not_in.py index c5098c62..19e4fc71 100644 --- a/tests/functional/tabloid/test_dbp_2146_distinct_not_in.py +++ b/tests/functional/tabloid/test_dbp_2146_distinct_not_in.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.tabloid.dbp_2146_distinct_not_in -# title: Common SQL. Check correctness of the results -# decription: -# tracker_id: -# min_versions: ['2.5'] -# versions: 2.5 -# qmid: None + +""" +ID: tabloid.dbp-2146-distinct-not-in +TITLE: Common SQL. Check correctness of the results +DESCRIPTION: +FBTEST: functional.tabloid.dbp_2146_distinct_not_in +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(from_backup='tabloid-dbp-2146.fbk') -substitutions_1 = [('=.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='tabloid-dbp-2146.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; with eset @@ -88,17 +80,16 @@ test_script_1 = """ ; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ ARI 6 TBI 10 CNT 3 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_dbp_4137_combo_full_join_and_windowed_funcs.py b/tests/functional/tabloid/test_dbp_4137_combo_full_join_and_windowed_funcs.py index ea4be571..61740cad 100644 --- a/tests/functional/tabloid/test_dbp_4137_combo_full_join_and_windowed_funcs.py +++ b/tests/functional/tabloid/test_dbp_4137_combo_full_join_and_windowed_funcs.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.tabloid.dbp_4137_combo_full_join_and_windowed_funcs -# title: Common SQL. Check correctness of the results -# decription: -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.dbp-4137-combo-full-join-and-windowed-funcs +TITLE: Common SQL. Check correctness of the results +DESCRIPTION: +FBTEST: functional.tabloid.dbp_4137_combo_full_join_and_windowed_funcs +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='tabloid-dbp-4137.fbk') -substitutions_1 = [('=.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='tabloid-dbp-4137.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ select x.ari ari_x, y.ari ari_y, z.ari ari_z from( select ari @@ -71,9 +63,9 @@ test_script_1 = """ ; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ ARI_X ARI_Y ARI_Z 2 2 2 53 53 53 @@ -81,8 +73,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_dbp_4391_combo_full_join_and_windowed_funcs.py b/tests/functional/tabloid/test_dbp_4391_combo_full_join_and_windowed_funcs.py index 4856c474..3ba50e38 100644 --- a/tests/functional/tabloid/test_dbp_4391_combo_full_join_and_windowed_funcs.py +++ b/tests/functional/tabloid/test_dbp_4391_combo_full_join_and_windowed_funcs.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.tabloid.dbp_4391_combo_full_join_and_windowed_funcs -# title: Common SQL. Check correctness of the results -# decription: -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.dbp-4391-combo-full-join-and-windowed-funcs +TITLE: Common SQL. Check correctness of the results +DESCRIPTION: +FBTEST: functional.tabloid.dbp_4391_combo_full_join_and_windowed_funcs +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='tabloid-dbp-4391.fbk') -substitutions_1 = [('=.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='tabloid-dbp-4391.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; with n as ( @@ -69,15 +61,14 @@ test_script_1 = """ ; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ CNT 0 """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_dbp_4391_combo_full_join_and_windowed_funcs2.py b/tests/functional/tabloid/test_dbp_4391_combo_full_join_and_windowed_funcs2.py index bd99d77c..2239fadd 100644 --- a/tests/functional/tabloid/test_dbp_4391_combo_full_join_and_windowed_funcs2.py +++ b/tests/functional/tabloid/test_dbp_4391_combo_full_join_and_windowed_funcs2.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.tabloid.dbp_4391_combo_full_join_and_windowed_funcs2 -# title: Common SQL. Check correctness of the results -# decription: -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.dbp-4391-combo-full-join-and-windowed-funcs2 +TITLE: Common SQL. Check correctness of the results +DESCRIPTION: +FBTEST: functional.tabloid.dbp_4391_combo_full_join_and_windowed_funcs2 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='tabloid-dbp-4391.fbk') -substitutions_1 = [('=.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='tabloid-dbp-4391.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; with recursive nx as (select 0 x from rdb$database union all select r.x+1 from nx r where r.x<2) @@ -85,15 +77,14 @@ test_script_1 = """ ; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ CNT 0 """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_dbp_5125_windowed_funcs.py b/tests/functional/tabloid/test_dbp_5125_windowed_funcs.py index d56a4725..7c6ea022 100644 --- a/tests/functional/tabloid/test_dbp_5125_windowed_funcs.py +++ b/tests/functional/tabloid/test_dbp_5125_windowed_funcs.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.tabloid.dbp_5125_windowed_funcs -# title: Common SQL. Check correctness of the results -# decription: -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.dbp-5125-windowed-funcs +TITLE: Common SQL. Check correctness of the results +DESCRIPTION: +FBTEST: functional.tabloid.dbp_5125_windowed_funcs +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='tabloid-dbp-5125.fbk') -substitutions_1 = [('=.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='tabloid-dbp-5125.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ with cx as( select ari, tbi, @@ -56,9 +48,9 @@ test_script_1 = """ ; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ VX Q1 Q2 26 10 9 39 14 13 @@ -69,8 +61,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_dbp_7029_heavy_test_for_windowed_funcs.py b/tests/functional/tabloid/test_dbp_7029_heavy_test_for_windowed_funcs.py index d382c21d..35157506 100644 --- a/tests/functional/tabloid/test_dbp_7029_heavy_test_for_windowed_funcs.py +++ b/tests/functional/tabloid/test_dbp_7029_heavy_test_for_windowed_funcs.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.tabloid.dbp_7029_heavy_test_for_windowed_funcs -# title: Common SQL. Check correctness of the results -# decription: -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.dbp-7029-heavy-test-for-windowed-funcs +TITLE: Common SQL. Check correctness of the results +DESCRIPTION: +FBTEST: functional.tabloid.dbp_7029_heavy_test_for_windowed_funcs +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='tabloid-dbp-7029.fbk') -substitutions_1 = [('=.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='tabloid-dbp-7029.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; with b as @@ -137,9 +129,9 @@ test_script_1 = """ order by 1,2,3; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ F01 0,0,0 F02 9 F03 9,10,58,59,60,108,109,110,1158 @@ -386,8 +378,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_dbp_7114_windowed_funcs.py b/tests/functional/tabloid/test_dbp_7114_windowed_funcs.py index 4b66ab04..7408890a 100644 --- a/tests/functional/tabloid/test_dbp_7114_windowed_funcs.py +++ b/tests/functional/tabloid/test_dbp_7114_windowed_funcs.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.tabloid.dbp_7114_windowed_funcs -# title: Common SQL. Check correctness of the results -# decription: -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.dbp-7114-windowed-funcs +TITLE: Common SQL. Check correctness of the results +DESCRIPTION: +FBTEST: functional.tabloid.dbp_7114_windowed_funcs +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='tabloid-dbp-7114.fbk') -substitutions_1 = [('=.*', '')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='tabloid-dbp-7114.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ delete from tresult; insert into tresult(ip_a, ip_b, cnt) select @@ -117,15 +109,14 @@ test_script_1 = """ set list off; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ CNT_MISM 0 """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_dml_privileges_sufficiency.py b/tests/functional/tabloid/test_dml_privileges_sufficiency.py index 1921da9b..291ebab4 100644 --- a/tests/functional/tabloid/test_dml_privileges_sufficiency.py +++ b/tests/functional/tabloid/test_dml_privileges_sufficiency.py @@ -1,37 +1,29 @@ #coding:utf-8 -# -# id: functional.tabloid.dml_privileges_sufficiency -# title: Verify sufficiency of privileges for performing DML actions. Verify that RETURNING clause can not be used without SELECT privilege. -# decription: -# Test creates three users (for I/U/D) and gives them initial privileges for INSERT, UPDATE and DELETE (but without SELECT). -# Then we check that each user: -# 1) can do "his" DML without using RETURNING clause and this action must pass; -# 2) can NOT do "his" DML with using RETURNING clause because of absense of SELETC privilege. -# After this we add SELECT privilege for all of them and repeat. All actions must pased in this case. -# -# Created by request of dimitr, letter 16.06.2020 13:54. -# Checked on 4.0.0.2066. -# ::: NB ::: -# Do NOT use this test on 4.0.0.2046 and 4.0.0.2048 - these snapshots have bug and will crash on this test. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: tabloid.dml-privileges-sufficiency +TITLE: Verify sufficiency of privileges for performing DML actions. Verify that + RETURNING clause can not be used without SELECT privilege. +DESCRIPTION: + Test creates three users (for I/U/D) and gives them initial privileges for INSERT, UPDATE and DELETE (but without SELECT). + Then we check that each user: + 1) can do "his" DML without using RETURNING clause and this action must pass; + 2) can NOT do "his" DML with using RETURNING clause because of absense of SELETC privilege. + After this we add SELECT privilege for all of them and repeat. All actions must pased in this case. + + Created by request of dimitr, letter 16.06.2020 13:54. + Checked on 4.0.0.2066. + ::: NB ::: + Do NOT use this test on 4.0.0.2046 and 4.0.0.2048 - these snapshots have bug and will crash on this test. +FBTEST: functional.tabloid.dml_privileges_sufficiency +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set wng off; set list on; @@ -42,12 +34,12 @@ test_script_1 = """ execute statement 'drop user tmp$modifier_ins' with autonomous transaction; when any do begin end end - + begin execute statement 'drop user tmp$modifier_upd' with autonomous transaction; when any do begin end end - + begin execute statement 'drop user tmp$modifier_del' with autonomous transaction; when any do begin end @@ -112,7 +104,7 @@ test_script_1 = """ rollback; - -- must PASS: we have privilege to add record and RETURNING clause + -- must PASS: we have privilege to add record and RETURNING clause -- does NOT contain anything from *table*: insert into test(id,x) values(gen_id(g,1), 1234567) returning pi(); rollback; @@ -145,7 +137,7 @@ test_script_1 = """ update test set x = 111 where id = 1; rollback; - -- must PASS: we have privilege to change column and RETURNING clause + -- must PASS: we have privilege to change column and RETURNING clause -- does NOT contain anything from *table*: update test set x = 0 returning pi(); rollback; @@ -252,9 +244,9 @@ test_script_1 = """ commit; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ WHOAMI TMP$MODIFIER_INS MSG Has only INSERT privilege @@ -293,7 +285,8 @@ expected_stdout_1 = """ ID 1 """ -expected_stderr_1 = """ + +expected_stderr = """ Statement failed, SQLSTATE = 28000 no permission for SELECT access to TABLE TEST -Effective user is TMP$MODIFIER_INS @@ -340,11 +333,9 @@ expected_stderr_1 = """ """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stdout == act.clean_expected_stdout and + act.clean_stderr == act.clean_expected_stderr) diff --git a/tests/functional/tabloid/test_eqc_136030.py b/tests/functional/tabloid/test_eqc_136030.py index b5f8c4eb..128b02c8 100644 --- a/tests/functional/tabloid/test_eqc_136030.py +++ b/tests/functional/tabloid/test_eqc_136030.py @@ -1,101 +1,34 @@ #coding:utf-8 -# -# id: functional.tabloid.eqc_136030 -# title: Check ability for preparing and then run query with parameters. Query should use ORDER-BY clause. -# decription: -# 02.02.2019: removed from DB metadata calls to UDFs - they are not used in this test but can not be used in FB 4.0 by default. -# Removed triggers because they have no deal here. -# Checked on: -# 3.0.5.33097: OK, 2.782s. -# 4.0.0.1421: OK, 3.642s. -# -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: + +""" +ID: tabloid.eqc-136030 +TITLE: Check ability for preparing and then run query with parameters. Query should use ORDER-BY clause. +DESCRIPTION: +NOTES: +[02.02.2019] + removed from DB metadata calls to UDFs - they are not used in this test but can not be used in FB 4.0 by default. + Removed triggers because they have no deal here. + Checked on: + 3.0.5.33097: OK, 2.782s. + 4.0.0.1421: OK, 3.642s. +FBTEST: functional.tabloid.eqc_136030 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = python_act('db') -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# import os -# import zipfile -# -# os.environ["ISC_USER"] = 'SYSDBA' -# os.environ["ISC_PASSWORD"] = 'masterkey' -# -# db_conn.close() -# zf = zipfile.ZipFile( os.path.join(context['files_location'],'eqc136030.zip') ) -# zf.extractall( context['temp_directory'] ) -# zf.close() -# -# fbk = os.path.join(context['temp_directory'],'eqc136030.fbk') -# -# runProgram('gbak',['-rep',fbk, dsn]) -# -# script=""" -# set list on; -# set sqlda_display on; -# set planonly; -# -# select -# a.csoc, a.nreserc , a.coddoc , a.codgio , -# a.macchina, a.rec_upd, a.utente_upd, -# cast(a.fblc as integer) fblc, -# cast(a.fdel as integer) fdel, -# b.tipdoc, b.desdoc, b.fblc , c.tipgio, c.desgio , c.fblc -# from docgio a -# left join doctip (a.csoc, a.nreserc) b on ( a.coddoc = b.coddoc ) -# left join giotip (a.csoc, a.nreserc) c on (a.codgio = c.codgio) -# where -# a.csoc = ? -# and a.nreserc = ? -# order by a.codgio, a.coddoc; -# -# set planonly; -# set plan off; -# set sqlda_display off; -# -# select -# a.csoc, a.nreserc , a.coddoc , a.codgio , -# a.macchina, a.rec_upd, a.utente_upd, -# cast(a.fblc as integer) fblc, -# cast(a.fdel as integer) fdel, -# b.tipdoc, b.desdoc, b.fblc , c.tipgio, c.desgio , c.fblc -# from docgio a -# left join doctip (a.csoc, a.nreserc) b on ( a.coddoc = b.coddoc ) -# left join giotip (a.csoc, a.nreserc) c on (a.codgio = c.codgio) -# where -# a.csoc = 'DEM1' -- :csoc -# and a.nreserc = '' -- :nreserc -# order by a.codgio, a.coddoc; -# """ -# runProgram('isql',[dsn,'-q'],script) -# -# ############################### -# # Cleanup. -# os.remove(fbk) -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ +expected_stdout = """ INPUT message field count: 2 01: sqltype: 448 VARYING scale: 0 subtype: 0 len: 4 charset: 0 NONE - : name: alias: - : table: owner: + : name: alias: + : table: owner: 02: sqltype: 448 VARYING scale: 0 subtype: 0 len: 2 charset: 0 NONE - : name: alias: - : table: owner: + : name: alias: + : table: owner: PLAN JOIN (JOIN (A ORDER DOCGIO_PK, B NATURAL), C NATURAL) @@ -123,10 +56,10 @@ expected_stdout_1 = """ : table: DOCGIO owner: SYSDBA 08: sqltype: 496 LONG scale: 0 subtype: 0 len: 4 : name: CAST alias: FBLC - : table: owner: + : table: owner: 09: sqltype: 496 LONG scale: 0 subtype: 0 len: 4 : name: CAST alias: FDEL - : table: owner: + : table: owner: 10: sqltype: 448 VARYING Nullable scale: 0 subtype: 0 len: 3 charset: 0 NONE : name: TIPDOC alias: TIPDOC : table: DOCTIP owner: SYSDBA @@ -148,7 +81,7 @@ expected_stdout_1 = """ CSOC DEM1 - NRESERC + NRESERC CODDOC AUT CODGIO CGB MACCHINA VAIO-ADAL @@ -164,7 +97,7 @@ expected_stdout_1 = """ FBLC CSOC DEM1 - NRESERC + NRESERC CODDOC CGE CODGIO CGB MACCHINA VAIO-ADAL @@ -180,7 +113,7 @@ expected_stdout_1 = """ FBLC CSOC DEM1 - NRESERC + NRESERC CODDOC CTI CODGIO CGB MACCHINA VAIO-ADAL @@ -196,7 +129,7 @@ expected_stdout_1 = """ FBLC CSOC DEM1 - NRESERC + NRESERC CODDOC FAA CODGIO CGB MACCHINA VAIO-ADAL @@ -212,7 +145,7 @@ expected_stdout_1 = """ FBLC CSOC DEM1 - NRESERC + NRESERC CODDOC FAV CODGIO CGB MACCHINA VAIO-ADAL @@ -228,7 +161,7 @@ expected_stdout_1 = """ FBLC CSOC DEM1 - NRESERC + NRESERC CODDOC FTA CODGIO CGB MACCHINA VAIO-ADAL @@ -244,7 +177,7 @@ expected_stdout_1 = """ FBLC CSOC DEM1 - NRESERC + NRESERC CODDOC FTV CODGIO CGB MACCHINA VAIO-ADAL @@ -260,7 +193,7 @@ expected_stdout_1 = """ FBLC CSOC DEM1 - NRESERC + NRESERC CODDOC FAA CODGIO RAC MACCHINA VAIO-ADAL @@ -276,7 +209,7 @@ expected_stdout_1 = """ FBLC CSOC DEM1 - NRESERC + NRESERC CODDOC FTA CODGIO RAC MACCHINA VAIO-ADAL @@ -292,7 +225,7 @@ expected_stdout_1 = """ FBLC CSOC DEM1 - NRESERC + NRESERC CODDOC NCA CODGIO RAC MACCHINA VAIO-ADAL @@ -308,7 +241,7 @@ expected_stdout_1 = """ FBLC CSOC DEM1 - NRESERC + NRESERC CODDOC NDA CODGIO RAC MACCHINA VAIO-ADAL @@ -324,7 +257,7 @@ expected_stdout_1 = """ FBLC CSOC DEM1 - NRESERC + NRESERC CODDOC FAV CODGIO RFV MACCHINA VAIO-ADAL @@ -340,7 +273,7 @@ expected_stdout_1 = """ FBLC CSOC DEM1 - NRESERC + NRESERC CODDOC FTV CODGIO RFV MACCHINA VAIO-ADAL @@ -356,7 +289,7 @@ expected_stdout_1 = """ FBLC CSOC DEM1 - NRESERC + NRESERC CODDOC NCV CODGIO RFV MACCHINA VAIO-ADAL @@ -372,9 +305,68 @@ expected_stdout_1 = """ FBLC """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=3.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# import os +# import zipfile +# +# os.environ["ISC_USER"] = 'SYSDBA' +# os.environ["ISC_PASSWORD"] = 'masterkey' +# +# db_conn.close() +# zf = zipfile.ZipFile( os.path.join(context['files_location'],'eqc136030.zip') ) +# zf.extractall( context['temp_directory'] ) +# zf.close() +# +# fbk = os.path.join(context['temp_directory'],'eqc136030.fbk') +# +# runProgram('gbak',['-rep',fbk, dsn]) +# +# script=""" +# set list on; +# set sqlda_display on; +# set planonly; +# +# select +# a.csoc, a.nreserc , a.coddoc , a.codgio , +# a.macchina, a.rec_upd, a.utente_upd, +# cast(a.fblc as integer) fblc, +# cast(a.fdel as integer) fdel, +# b.tipdoc, b.desdoc, b.fblc , c.tipgio, c.desgio , c.fblc +# from docgio a +# left join doctip (a.csoc, a.nreserc) b on ( a.coddoc = b.coddoc ) +# left join giotip (a.csoc, a.nreserc) c on (a.codgio = c.codgio) +# where +# a.csoc = ? +# and a.nreserc = ? +# order by a.codgio, a.coddoc; +# +# set planonly; +# set plan off; +# set sqlda_display off; +# +# select +# a.csoc, a.nreserc , a.coddoc , a.codgio , +# a.macchina, a.rec_upd, a.utente_upd, +# cast(a.fblc as integer) fblc, +# cast(a.fdel as integer) fdel, +# b.tipdoc, b.desdoc, b.fblc , c.tipgio, c.desgio , c.fblc +# from docgio a +# left join doctip (a.csoc, a.nreserc) b on ( a.coddoc = b.coddoc ) +# left join giotip (a.csoc, a.nreserc) c on (a.codgio = c.codgio) +# where +# a.csoc = 'DEM1' -- :csoc +# and a.nreserc = '' -- :nreserc +# order by a.codgio, a.coddoc; +# """ +# runProgram('isql',[dsn,'-q'],script) +# +# ############################### +# # Cleanup. +# os.remove(fbk) +# ----------------------------------- diff --git a/tests/functional/tabloid/test_eqc_141347.py b/tests/functional/tabloid/test_eqc_141347.py index d7db8489..0c53fcb2 100644 --- a/tests/functional/tabloid/test_eqc_141347.py +++ b/tests/functional/tabloid/test_eqc_141347.py @@ -1,26 +1,19 @@ #coding:utf-8 -# -# id: functional.tabloid.eqc_141347 -# title: Check correctness of LEFT JOIN result when right source has two FK and one of fields from FK present both in ON and WHERE clauses. -# decription: -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: tabloid.eqc-141347 +TITLE: Check correctness of LEFT JOIN result when right source has two FK and one of + fields from FK present both in ON and WHERE clauses. +DESCRIPTION: +FBTEST: functional.tabloid.eqc_141347 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; recreate table t2 ( id int primary key, @@ -29,33 +22,33 @@ test_script_1 = """ ); recreate table t1 (id int primary key); recreate table t0 (id int primary key); - + alter table t2 add constraint fk_t0 foreign key (pid0) references t0 ,add constraint fk_t1 foreign key (pid1) references t1 ; commit; - + insert into t0 (id) values (1); - + insert into t1 (id) values (1); insert into t1 (id) values (2); insert into t1 (id) values (3); insert into t1 (id) values (4); - + insert into t2 (id, pid1, pid0) values (1, 1, 1); insert into t2 (id, pid1, pid0) values (2, 4, 1); commit; - + select a.id, b.pid1 from t1 a left join t2 b on ( a.id = b.pid1 ) and ( b.pid0 = 1 ) where (b.pid1 is null); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ ID 2 PID1 @@ -63,9 +56,8 @@ expected_stdout_1 = """ PID1 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_eqc_160757.py b/tests/functional/tabloid/test_eqc_160757.py index e029c44a..44d3ff13 100644 --- a/tests/functional/tabloid/test_eqc_160757.py +++ b/tests/functional/tabloid/test_eqc_160757.py @@ -1,34 +1,27 @@ #coding:utf-8 -# -# id: functional.tabloid.eqc_160757 -# title: Check correctness of LEFT JOIN result when left source is table with several -vs- single rows and right source is SP. -# decription: -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: tabloid.eqc-160757 +TITLE: Check correctness of LEFT JOIN result when left source is table with several + -vs- single rows and right source is SP. +DESCRIPTION: +FBTEST: functional.tabloid.eqc_160757 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(page_size=4096) -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ create or alter procedure sp_test as begin end; - + recreate table test ( id int primary key using index test_pk, val_a int, val_b int ); - + set term ^; create or alter procedure sp_test( a_id int, @@ -48,21 +41,21 @@ test_script_1 = """ ^ set term ;^ commit; - + insert into test (id, val_a, val_b ) values (1, 0, 0); insert into test (id, val_a, val_b ) values (2, 1, 0); insert into test (id, val_a, val_b ) values (3, 0, 1); - + set list on; - + --set echo on; - + select 'test_1' as msg, t.*, p.* from test t left join sp_test(t.id,t.val_a,t.val_b) p on p.o_id=t.id ; - + select 'test_2' as msg, t.*, p.* from test t left join sp_test (t.id,t.val_a,t.val_b) p on p.o_id=t.id @@ -70,23 +63,23 @@ test_script_1 = """ ; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ MSG test_1 ID 1 VAL_A 0 VAL_B 0 O_ID 1 O_IS_EQU Passed. - + MSG test_1 ID 2 VAL_A 1 VAL_B 0 O_ID 2 O_IS_EQU Failed. - + MSG test_1 ID 3 VAL_A 0 @@ -102,9 +95,8 @@ expected_stdout_1 = """ O_IS_EQU Failed. """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_eqc_166663.py b/tests/functional/tabloid/test_eqc_166663.py index 433b27c1..4d44e826 100644 --- a/tests/functional/tabloid/test_eqc_166663.py +++ b/tests/functional/tabloid/test_eqc_166663.py @@ -1,26 +1,20 @@ #coding:utf-8 -# -# id: functional.tabloid.eqc_166663 -# title: Index(es) should not become corrupted after two updates and raising exception in one Tx, doing inside SP -# decription: -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: tabloid.eqc-166663 +TITLE: Index(es) should not become corrupted after two updates and raising exception in one Tx, doing inside SP +DESCRIPTION: +FBTEST: functional.tabloid.eqc_166663 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +substitutions = [('exception .*', 'exception'), ('line: .*', 'line')] -substitutions_1 = [('exception .*', 'exception'), ('line: .*', 'line')] +db = db_factory(page_size=4096) -init_script_1 = """""" - -db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ -- NB: changed expected value of SQLSTATE from 42000 to HY000, see: -- "Prevent stack trace (line/column info) from overriding the real error's SQLSTATE", 30-apr-2016 -- https://github.com/FirebirdSQL/firebird/commit/d1d8b36a07d4f11d98d2c8ec16fb8ec073da442b // FB 4.0 @@ -93,9 +87,9 @@ test_script_1 = """ select * from tmain; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=substitutions) -expected_stdout_1 = """ +expected_stdout = """ select * from tdetl where id >= 0; PLAN (TDETL INDEX (TDETL_PK)) ID 1 @@ -126,7 +120,8 @@ expected_stdout_1 = """ NAME qwerty Records affected: 1 """ -expected_stderr_1 = """ + +expected_stderr = """ Statement failed, SQLSTATE = HY000 exception 2 -EX_FOO @@ -134,12 +129,10 @@ expected_stderr_1 = """ -At procedure 'SP_TEST' line: 6, col: 8 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stdout == act.clean_expected_stdout and + act.clean_stderr == act.clean_expected_stderr) diff --git a/tests/functional/tabloid/test_eqc_200762.py b/tests/functional/tabloid/test_eqc_200762.py index e8ed8808..169255a6 100644 --- a/tests/functional/tabloid/test_eqc_200762.py +++ b/tests/functional/tabloid/test_eqc_200762.py @@ -1,26 +1,18 @@ #coding:utf-8 -# -# id: functional.tabloid.eqc_200762 -# title: Check results of CONTAINING when search pattern can span on one or several blob segments -# decription: -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: tabloid.eqc-200762 +TITLE: Check results of CONTAINING when search pattern can span on one or several blob segments +DESCRIPTION: +FBTEST: functional.tabloid.eqc_200762 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(page_size=8192) -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; set term ^; execute block as @@ -105,9 +97,9 @@ test_script_1 = """ commit; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ ID 1 PTRN_LEN 36 BLOB_LEN 1036 @@ -144,9 +136,8 @@ expected_stdout_1 = """ PTRN_POS 1007 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_eqc_306263.py b/tests/functional/tabloid/test_eqc_306263.py index 3b9608e8..ebfbedc2 100644 --- a/tests/functional/tabloid/test_eqc_306263.py +++ b/tests/functional/tabloid/test_eqc_306263.py @@ -1,107 +1,20 @@ #coding:utf-8 -# -# id: functional.tabloid.eqc_306263 -# title: Check ability to run complex query -# decription: -# -# tracker_id: -# min_versions: ['2.5.5'] -# versions: 2.5.5 -# qmid: + +""" +ID: tabloid.eqc-306263 +TITLE: Check ability to run complex query +DESCRIPTION: +FBTEST: functional.tabloid.eqc_306263 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 2.5.5 -# resources: None +db = db_factory() -substitutions_1 = [('0.000000000000000.*', '0.0000000000000000')] +act = python_act('db', substitutions=[('0.000000000000000.*', '0.0000000000000000')]) -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# import os -# import zipfile -# os.environ["ISC_USER"] = 'SYSDBA' -# os.environ["ISC_PASSWORD"] = 'masterkey' -# -# db_conn.close() -# -# zf = zipfile.ZipFile( os.path.join(context['files_location'],'eqc306263.zip') ) -# zf.extractall( context['temp_directory'] ) -# zf.close() -# -# fbk = os.path.join(context['temp_directory'],'eqc306263.fbk') -# -# runProgram('gbak',['-rep', fbk, dsn]) -# -# script="""set list on; -# select -# objeschlue.obsch_schl, -# schluedef.schld_bzg, -# objeschlue.obj_id, -# darlehen.vertrag, -# darlkond.dlko_annui, -# zuschkond.zuko_wrt, -# darlkond.dlko_unom, -# darlgeber.darlg_bzg, -# objeschlue.obsch_gb, -# objeschlue.obsch_gabd, -# darlkond.flgk_kz, -# zuschkond.faelligkeit, -# darl_obper.top_id, -# darlkond.dlko_gvond, -# darlkond.dlko_gbisd, -# zuschkond.zuko_id, -# zuschkond.zuko_gvond, -# zuschkond.zuko_gbid, -# darl_obper.obj_id -# from -# ( -# ( -# ( -# ( -# ( -# ( -# darl_obper darl_obper -# inner join darlehen darlehen on darl_obper.darl_id=darlehen.darl_id -# ) -# inner join objeschlue objeschlue on darlehen.darl_id=objeschlue.darl_id -# ) -# inner join darlgeber darlgeber on darlehen.darlg_id=darlgeber.darlg_id -# ) -# inner join darlkond darlkond on darlehen.darl_id=darlkond.darl_id -# ) -# left outer join schluedef schluedef on objeschlue.schld_id=schluedef.schld_id -# ) -# left outer join zuschkond zuschkond on darlehen.darl_id=zuschkond.darl_id -# ) -# where -# darl_obper.obj_id=3759 -# and darlkond.dlko_gvond<'12/02/2011 00:00:00' -# and darlkond.dlko_gbisd>='12/01/2011 00:00:00' -# and objeschlue.obj_id=3759 -# and objeschlue.obsch_gb>='12/02/2011 00:00:00' -# and objeschlue.obsch_gabd<'12/02/2011 00:00:00' -# and darl_obper.top_id is null -# and ( -# zuschkond.zuko_id is null -# or zuschkond.zuko_gvond<'12/02/2011 00:00:00' and zuschkond.zuko_gbid>='12/01/2011 00:00:00' -# ); -# commit; -# """ -# runProgram('isql',[dsn,'-q'],script) -# -# ############################### -# # Cleanup. -# os.remove(fbk) -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ +expected_stdout = """ OBSCH_SCHL 1.000000000000000 SCHLD_BZG OBJ_ID 3759 @@ -123,9 +36,86 @@ expected_stdout_1 = """ OBJ_ID 3759 """ -@pytest.mark.version('>=2.5.5') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=3.0') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# import os +# import zipfile +# os.environ["ISC_USER"] = 'SYSDBA' +# os.environ["ISC_PASSWORD"] = 'masterkey' +# +# db_conn.close() +# +# zf = zipfile.ZipFile( os.path.join(context['files_location'],'eqc306263.zip') ) +# zf.extractall( context['temp_directory'] ) +# zf.close() +# +# fbk = os.path.join(context['temp_directory'],'eqc306263.fbk') +# +# runProgram('gbak',['-rep', fbk, dsn]) +# +# script="""set list on; +# select +# objeschlue.obsch_schl, +# schluedef.schld_bzg, +# objeschlue.obj_id, +# darlehen.vertrag, +# darlkond.dlko_annui, +# zuschkond.zuko_wrt, +# darlkond.dlko_unom, +# darlgeber.darlg_bzg, +# objeschlue.obsch_gb, +# objeschlue.obsch_gabd, +# darlkond.flgk_kz, +# zuschkond.faelligkeit, +# darl_obper.top_id, +# darlkond.dlko_gvond, +# darlkond.dlko_gbisd, +# zuschkond.zuko_id, +# zuschkond.zuko_gvond, +# zuschkond.zuko_gbid, +# darl_obper.obj_id +# from +# ( +# ( +# ( +# ( +# ( +# ( +# darl_obper darl_obper +# inner join darlehen darlehen on darl_obper.darl_id=darlehen.darl_id +# ) +# inner join objeschlue objeschlue on darlehen.darl_id=objeschlue.darl_id +# ) +# inner join darlgeber darlgeber on darlehen.darlg_id=darlgeber.darlg_id +# ) +# inner join darlkond darlkond on darlehen.darl_id=darlkond.darl_id +# ) +# left outer join schluedef schluedef on objeschlue.schld_id=schluedef.schld_id +# ) +# left outer join zuschkond zuschkond on darlehen.darl_id=zuschkond.darl_id +# ) +# where +# darl_obper.obj_id=3759 +# and darlkond.dlko_gvond<'12/02/2011 00:00:00' +# and darlkond.dlko_gbisd>='12/01/2011 00:00:00' +# and objeschlue.obj_id=3759 +# and objeschlue.obsch_gb>='12/02/2011 00:00:00' +# and objeschlue.obsch_gabd<'12/02/2011 00:00:00' +# and darl_obper.top_id is null +# and ( +# zuschkond.zuko_id is null +# or zuschkond.zuko_gvond<'12/02/2011 00:00:00' and zuschkond.zuko_gbid>='12/01/2011 00:00:00' +# ); +# commit; +# """ +# runProgram('isql',[dsn,'-q'],script) +# +# ############################### +# # Cleanup. +# os.remove(fbk) +# ----------------------------------- diff --git a/tests/functional/tabloid/test_eqc_343715.py b/tests/functional/tabloid/test_eqc_343715.py index a63d6bc3..b878a2e8 100644 --- a/tests/functional/tabloid/test_eqc_343715.py +++ b/tests/functional/tabloid/test_eqc_343715.py @@ -1,24 +1,18 @@ #coding:utf-8 -# -# id: functional.tabloid.eqc_343715 -# title: Checking PK index is used when table is joined as driven (right) source in LEFT OUTER join from VIEW -# decription: -# Number of index reads per TABLE can be watched only in 3.0 by using mon$table_stats. -# We have to ensure that table TEST1 in following queries is accessed only by its PK index, i.e. NO natural reads for it can occur. -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.eqc-343715 +TITLE: Checking PK index is used when table is joined as driven (right) source in LEFT OUTER join from VIEW +DESCRIPTION: + Number of index reads per TABLE can be watched only in 3.0 by using mon$table_stats. + We have to ensure that table TEST1 in following queries is accessed only by its PK index, i.e. NO natural reads for it can occur. +FBTEST: functional.tabloid.eqc_343715 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """ +init_script = """ create or alter view vlast_test1_for_test3_a as select 1 id from rdb$database; create or alter view vlast_test1_for_test3_b as select 1 id from rdb$database; create or alter procedure get_last_test1_for_test3 as begin end; @@ -79,11 +73,12 @@ init_script_1 = """ from test3 t3 ; commit; - """ + +""" -db_1 = db_factory(from_backup='mon-stat-gathering-3_0.fbk', init=init_script_1) +db = db_factory(from_backup='mon-stat-gathering-3_0.fbk', init=init_script) -test_script_1 = """ +test_script = """ insert into test3(id) values(1); insert into test3(id) values(2); insert into test3(id) values(3); @@ -151,17 +146,16 @@ test_script_1 = """ where table_name = upper('TEST1'); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ TABLE_NAME TEST1 NATURAL_READS 0 INDEXED_READS 6 """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_eqc_344124.py b/tests/functional/tabloid/test_eqc_344124.py index 4ea3ecc2..565d43cb 100644 --- a/tests/functional/tabloid/test_eqc_344124.py +++ b/tests/functional/tabloid/test_eqc_344124.py @@ -1,30 +1,24 @@ #coding:utf-8 -# -# id: functional.tabloid.eqc_344124 -# title: Check ability to run selectable SP with input parameter which inserts into GTT (on commit DELETE rows) and then does suspend -# decription: NB: if either a_id, suspend or the insert is removed, or if gtt_test is changed to on commit preserve rows - no crash -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: tabloid.eqc-344124 +TITLE: Check ability to run selectable SP with input parameter which inserts into GTT + (on commit DELETE rows) and then does suspend +DESCRIPTION: + NB: if either a_id, suspend or the insert is removed, or if gtt_test is changed to on commit preserve rows - no crash +FBTEST: functional.tabloid.eqc_344124 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ recreate global temporary table gtt_test ( id integer ) on commit delete rows; - + set term ^; create procedure test returns ( @@ -38,20 +32,19 @@ test_script_1 = """ ^ set term ;^ commit; - + set list on; select * from test; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ O_ID 1 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_gtcs_proc_isql_14.py b/tests/functional/tabloid/test_gtcs_proc_isql_14.py index dcdb63f3..a3bf4ddf 100644 --- a/tests/functional/tabloid/test_gtcs_proc_isql_14.py +++ b/tests/functional/tabloid/test_gtcs_proc_isql_14.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.tabloid.gtcs_proc_isql_14 -# title: gtcs-proc-isql-14 -# decription: -# Original test see in: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_14.script -# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: -# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script -# Checked on: -# 4.0.0.1803 SS: 1.822s. -# 3.0.6.33265 SS: 0.849s. -# 2.5.9.27149 SC: 0.313s. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: tabloid.gtcs-proc-isql-14 +TITLE: gtcs-proc-isql-14 +DESCRIPTION: + Original test see in: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_14.script + SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data: + https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script + Checked on: + 4.0.0.1803 SS: 1.822s. + 3.0.6.33265 SS: 0.849s. + 2.5.9.27149 SC: 0.313s. +FBTEST: functional.tabloid.gtcs_proc_isql_14 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory(from_backup='gtcs_sp1.fbk') -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='gtcs_sp1.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set term ^; create procedure proc14 returns (a varchar(20), b integer) as @@ -56,9 +47,9 @@ test_script_1 = """ select 'point-7' msg, p.a, p.b from proc14 p where p.b > (select avg(x.b) from proc14 x); """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('=', ''), ('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ A B Nut 12 @@ -95,9 +86,8 @@ expected_stdout_1 = """ point-7 Screw 14 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_join_on_position_function_result.py b/tests/functional/tabloid/test_join_on_position_function_result.py index 10af9b53..46cc3153 100644 --- a/tests/functional/tabloid/test_join_on_position_function_result.py +++ b/tests/functional/tabloid/test_join_on_position_function_result.py @@ -1,28 +1,19 @@ #coding:utf-8 -# -# id: functional.tabloid.join_on_position_function_result -# title: Records with NULLs could be lost from resultset. -# decription: -# http://www.sql.ru/forum/actualutils.aspx?action=gotomsg&tid=1009792&msg=14032086 -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: tabloid.join-on-position-function-result +TITLE: Records with NULLs could be lost from resultset. +DESCRIPTION: + http://www.sql.ru/forum/actualutils.aspx?action=gotomsg&tid=1009792&msg=14032086 +FBTEST: functional.tabloid.join_on_position_function_result +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ recreate table t(id int, s varchar(30)); commit; insert into t values(1, 'aaa'); @@ -41,9 +32,9 @@ test_script_1 = """ ; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ ID 1 S aaa P @@ -70,9 +61,8 @@ expected_stdout_1 = """ K """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_join_transformation_001.py b/tests/functional/tabloid/test_join_transformation_001.py index 51ec3b3b..d8ed37be 100644 --- a/tests/functional/tabloid/test_join_transformation_001.py +++ b/tests/functional/tabloid/test_join_transformation_001.py @@ -1,26 +1,19 @@ #coding:utf-8 -# -# id: functional.tabloid.join_transformation_001 -# title: Check ability of outer join simplification. -# decription: Use null-rejected predicate, trivial case. -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: + +""" +ID: tabloid.join-transformation-01 +TITLE: Check ability of outer join simplification. +DESCRIPTION: + Use null-rejected predicate, trivial case. +FBTEST: functional.tabloid.join_transformation_001 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='join-transformations.fbk') -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='join-transformations.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ execute procedure sp_fill( 25, 30 ); -- ^ ^- probability of assign each field on each row to NULL (percent). -- +- number of rows in each of tables t1...t6 @@ -28,7 +21,7 @@ test_script_1 = """ commit; execute procedure sp_recalc_idx_stat; commit; - + set list on; set term ^; execute block returns(result varchar(50)) as @@ -56,9 +49,9 @@ test_script_1 = """ suspend; if ( result not containing 'Passed' ) then - -- this context variable serves as 'flag' to show + -- this context variable serves as 'flag' to show -- problematic data (see following EB): - rdb$set_context('USER_SESSION', 'FAULT', '1'); + rdb$set_context('USER_SESSION', 'FAULT', '1'); end ^ execute block returns( failed_on varchar(255) ) as @@ -67,8 +60,8 @@ test_script_1 = """ -- rows from all tables in order to reproduce this trouble later: if ( rdb$get_context('USER_SESSION', 'FAULT') = '1' ) then begin - for - select dml from sp_show_data + for + select dml from sp_show_data into failed_on do suspend; @@ -80,15 +73,14 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ RESULT Passed. """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_join_transformation_002.py b/tests/functional/tabloid/test_join_transformation_002.py index 7ff9b0ce..5aa476c8 100644 --- a/tests/functional/tabloid/test_join_transformation_002.py +++ b/tests/functional/tabloid/test_join_transformation_002.py @@ -1,26 +1,19 @@ #coding:utf-8 -# -# id: functional.tabloid.join_transformation_002 -# title: Check ability of outer join simplification. -# decription: Use null-rejected predicate in WHERE filtering leads to replacement of TWO outer joins. -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: + +""" +ID: tabloid.join-transformation-02 +TITLE: Check ability of outer join simplification. +DESCRIPTION: + Use null-rejected predicate in WHERE filtering leads to replacement of TWO outer joins. +FBTEST: functional.tabloid.join_transformation_002 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='join-transformations.fbk') -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='join-transformations.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ execute procedure sp_fill( 50, 30 ); -- ^ ^- probability of assign each field on each row to NULL (percent). -- +- number of rows in each of tables t1...t6 @@ -28,7 +21,7 @@ test_script_1 = """ commit; execute procedure sp_recalc_idx_stat; commit; - + set list on; set term ^; execute block returns(result varchar(50)) as @@ -54,9 +47,9 @@ test_script_1 = """ suspend; if ( result not containing 'Passed' ) then - -- this context variable serves as 'flag' to show + -- this context variable serves as 'flag' to show -- problematic data (see following EB): - rdb$set_context('USER_SESSION', 'FAULT', '1'); + rdb$set_context('USER_SESSION', 'FAULT', '1'); end ^ execute block returns( failed_on varchar(255) ) as @@ -65,8 +58,8 @@ test_script_1 = """ -- rows from all tables in order to reproduce this trouble later: if ( rdb$get_context('USER_SESSION', 'FAULT') = '1' ) then begin - for - select dml from sp_show_data + for + select dml from sp_show_data into failed_on do suspend; @@ -78,15 +71,14 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ RESULT Passed. """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_join_transformation_003.py b/tests/functional/tabloid/test_join_transformation_003.py index 541507d4..95b065ce 100644 --- a/tests/functional/tabloid/test_join_transformation_003.py +++ b/tests/functional/tabloid/test_join_transformation_003.py @@ -1,26 +1,19 @@ #coding:utf-8 -# -# id: functional.tabloid.join_transformation_003 -# title: Check ability of outer join simplification. -# decription: Null-rejected expr in WHERE clause refers to most right datasource, and this leads to replacement of TWO outer joins. -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: + +""" +ID: tabloid.join-transformation-03 +TITLE: Check ability of outer join simplification. +DESCRIPTION: + Null-rejected expr in WHERE clause refers to most right datasource, and this leads to replacement of TWO outer joins. +FBTEST: functional.tabloid.join_transformation_003 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='join-transformations.fbk') -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='join-transformations.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ execute procedure sp_fill( 50, 30 ); -- ^ ^- probability of assign each field on each row to NULL (percent). -- +- number of rows in each of tables t1...t6 @@ -28,7 +21,7 @@ test_script_1 = """ commit; execute procedure sp_recalc_idx_stat; commit; - + set list on; set term ^; execute block returns(result varchar(50)) as @@ -40,30 +33,30 @@ test_script_1 = """ -- if query returns less columns: 'select a.id, b.id, c.id, d.id, null, null from ( - t1 a - left - join t2 b using(x,y) + t1 a + left + join t2 b using(x,y) ) - left + left join ( - t3 c - left - join t4 d using(x,y) + t3 c + left + join t4 d using(x,y) ) using(x,y) where d.x>0' -- NB: 'd' is alias for t4 which is most right data source. , ---------------------- Query-2 (simplified and we assume that it ALWAYS produces the same result as Q1) 'select a.id, b.id, c.id, d.id, null, null from ( - t1 a + t1 a left -- NB: this can NOT be replaced with "inner"! - join t2 b using(x,y) + join t2 b using(x,y) ) inner -- [1] join ( - t3 c + t3 c inner -- [2] - join t4 d using(x,y) + join t4 d using(x,y) ) using(x,y) where d.x>0' , 0 ------------------------------------ nr_total: when 0 then do NOT run sp_fill because we already do have data for checking @@ -72,9 +65,9 @@ test_script_1 = """ suspend; if ( result not containing 'Passed' ) then - -- this context variable serves as 'flag' to show + -- this context variable serves as 'flag' to show -- problematic data (see following EB): - rdb$set_context('USER_SESSION', 'FAULT', '1'); + rdb$set_context('USER_SESSION', 'FAULT', '1'); end ^ execute block returns( failed_on varchar(255) ) as @@ -83,8 +76,8 @@ test_script_1 = """ -- rows from all tables in order to reproduce this trouble later: if ( rdb$get_context('USER_SESSION', 'FAULT') = '1' ) then begin - for - select dml from sp_show_data + for + select dml from sp_show_data into failed_on do suspend; @@ -96,15 +89,14 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ RESULT Passed. """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_join_transformation_004.py b/tests/functional/tabloid/test_join_transformation_004.py index 8f93f8ff..0288ddcb 100644 --- a/tests/functional/tabloid/test_join_transformation_004.py +++ b/tests/functional/tabloid/test_join_transformation_004.py @@ -1,30 +1,22 @@ #coding:utf-8 -# -# id: functional.tabloid.join_transformation_004 -# title: Check ability of outer join simplification. -# decription: -# Two datasources are involved in the null-rejecting predicate in the WHERE-filtering. -# Because of DISJUNCTION usage ('where ... OR ...'), replacement of outer join with -# inner one is possible only for part of query that is BEFORE (left-side) of first DS. -# This means that we can not simplify LOJ of 'c' and 'd' datasources - see code below. -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: + +""" +ID: tabloid.join-transformation-04 +TITLE: Check ability of outer join simplification. +DESCRIPTION: + Two datasources are involved in the null-rejecting predicate in the WHERE-filtering. + Because of DISJUNCTION usage ('where ... OR ...'), replacement of outer join with + inner one is possible only for part of query that is BEFORE (left-side) of first DS. + This means that we can not simplify LOJ of 'c' and 'd' datasources - see code below. +FBTEST: functional.tabloid.join_transformation_004 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='join-transformations.fbk') -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='join-transformations.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ execute procedure sp_fill( 35, 30 ); -- ^ ^- probability of assign each field on each row to NULL (percent). -- +- number of rows in each of tables t1...t6 @@ -32,7 +24,7 @@ test_script_1 = """ commit; execute procedure sp_recalc_idx_stat; commit; - + set list on; set term ^; execute block returns(result varchar(50)) as @@ -46,9 +38,9 @@ test_script_1 = """ from (t1 a left join t2 b using(x,y) ) left ----------------------------------------- [1] join ( - t3 c + t3 c left ------------------------------------- [2]: this can NOT be replaced with INNER because of disjunction expr in WHERE-filtering - join t4 d using(x,y) + join t4 d using(x,y) ) using(x,y) where c.u>0 or d.v>0' -- ^-------------------------------------- !!! @@ -56,11 +48,11 @@ test_script_1 = """ ---------------------- Query-2 (simplified and we assume that it ALWAYS produces the same result as Q1) 'select a.id, b.id, c.id, d.id, null, null from (t1 a left join t2 b using(x,y) ) - INNER + INNER join ( - t3 c - left - join t4 d using(x,y) + t3 c + left + join t4 d using(x,y) ) using(x,y) where c.u>0 or d.v>0' , 0 ------------------------------------ nr_total: when 0 then do NOT run sp_fill because we already do have data for checking @@ -69,9 +61,9 @@ test_script_1 = """ suspend; if ( result not containing 'Passed' ) then - -- this context variable serves as 'flag' to show + -- this context variable serves as 'flag' to show -- problematic data (see following EB): - rdb$set_context('USER_SESSION', 'FAULT', '1'); + rdb$set_context('USER_SESSION', 'FAULT', '1'); end ^ execute block returns( failed_on varchar(255) ) as @@ -80,8 +72,8 @@ test_script_1 = """ -- rows from all tables in order to reproduce this trouble later: if ( rdb$get_context('USER_SESSION', 'FAULT') = '1' ) then begin - for - select dml from sp_show_data + for + select dml from sp_show_data into failed_on do suspend; @@ -93,15 +85,14 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ RESULT Passed. """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_join_transformation_005.py b/tests/functional/tabloid/test_join_transformation_005.py index 168ba35d..59571c71 100644 --- a/tests/functional/tabloid/test_join_transformation_005.py +++ b/tests/functional/tabloid/test_join_transformation_005.py @@ -1,31 +1,22 @@ #coding:utf-8 -# -# id: functional.tabloid.join_transformation_005 -# title: Check ability of outer join simplification. -# decription: -# Query like this: -# A inner join B left join C can be simplified to: -# A inner join B inner join C -# -- if join-expression of 'B left join C' is null-rejecting. -# -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: + +""" +ID: tabloid.join-transformation-05 +TITLE: Check ability of outer join simplification. +DESCRIPTION: + Query like this: + A inner join B left join C can be simplified to: + A inner join B inner join C + -- if join-expression of 'B left join C' is null-rejecting. +FBTEST: functional.tabloid.join_transformation_005 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='join-transformations.fbk') -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='join-transformations.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ execute procedure sp_fill( 35, 30 ); -- ^ ^- probability of assign each field on each row to NULL (percent). -- +- number of rows in each of tables t1...t6 @@ -33,7 +24,7 @@ test_script_1 = """ commit; execute procedure sp_recalc_idx_stat; commit; - + set list on; set term ^; execute block returns(result varchar(50)) as @@ -53,7 +44,7 @@ test_script_1 = """ ---------------------- Query-2 (simplified and we assume that it ALWAYS produces the same result as Q1) 'select a.id, b.id, c.id, null, null, null from t1 a - inner + inner join t2 b on a.x = b.y join t3 c on b.y = c.z' , 0 ------------------------------------ nr_total: when 0 then do NOT run sp_fill because we already do have data for checking @@ -62,9 +53,9 @@ test_script_1 = """ suspend; if ( result not containing 'Passed' ) then - -- this context variable serves as 'flag' to show + -- this context variable serves as 'flag' to show -- problematic data (see following EB): - rdb$set_context('USER_SESSION', 'FAULT', '1'); + rdb$set_context('USER_SESSION', 'FAULT', '1'); end ^ execute block returns( failed_on varchar(255) ) as @@ -73,8 +64,8 @@ test_script_1 = """ -- rows from all tables in order to reproduce this trouble later: if ( rdb$get_context('USER_SESSION', 'FAULT') = '1' ) then begin - for - select dml from sp_show_data + for + select dml from sp_show_data into failed_on do suspend; @@ -86,15 +77,14 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ RESULT Passed. """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_join_transformation_006.py b/tests/functional/tabloid/test_join_transformation_006.py index 032a4377..107d51b8 100644 --- a/tests/functional/tabloid/test_join_transformation_006.py +++ b/tests/functional/tabloid/test_join_transformation_006.py @@ -1,31 +1,22 @@ #coding:utf-8 -# -# id: functional.tabloid.join_transformation_006 -# title: Check ability of outer join simplification. -# decription: -# We can replace 'A left join B' with 'A inner join B' if: -# 1. Both of these datasources are referred later in the expression of INNER join with some else datasource, and -# 2. There are no disjunction in this expression, i.e. its parts aren't linked by "OR", and -# 3. Each part of this expression is null-rejected. -# -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: + +""" +ID: tabloid.join-transformation-06 +TITLE: Check ability of outer join simplification. +DESCRIPTION: + We can replace 'A left join B' with 'A inner join B' if: + 1. Both of these datasources are referred later in the expression of INNER join with some else datasource, and + 2. There are no disjunction in this expression, i.e. its parts aren't linked by "OR", and + 3. Each part of this expression is null-rejected. +FBTEST: functional.tabloid.join_transformation_006 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='join-transformations.fbk') -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='join-transformations.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ execute procedure sp_fill( 40, 30 ); -- ^ ^- probability of assign each field on each row to NULL (percent). -- +- number of rows in each of tables t1...t6 @@ -33,7 +24,7 @@ test_script_1 = """ commit; execute procedure sp_recalc_idx_stat; commit; - + set list on; set term ^; execute block returns(result varchar(50)) as @@ -44,31 +35,31 @@ test_script_1 = """ -- NB: we have to make "padding" of null literals up to 6 fields -- if query returns less columns: 'select a.id, b.id, c.id, d.id, null, null - from - ( - ( t1 a - LEFT - join t2 b on a.x = b.y - ) - left join t3 c on a.y = c.z - ) + from + ( + ( t1 a + LEFT + join t2 b on a.x = b.y + ) + left join t3 c on a.y = c.z + ) inner join t4 d on a.v = d.w and b.z = d.u' -- ^-- INNER clause + absence of disjunction ("or"-parts) + null-rejecting of each parts ("a.v = d.w"; "b.z = d.u") -- makes this expression as whole null-rejecting. , ---------------------- Query-2 (simplified and we assume that it ALWAYS produces the same result as Q1) 'select a.id, b.id, c.id, d.id, null, null - from - ( - ( t1 a - INNER - join t2 b on a.x = b.y - ) - left join t3 c on a.y = c.z - ) + from + ( + ( t1 a + INNER + join t2 b on a.x = b.y + ) + left join t3 c on a.y = c.z + ) inner join t4 d on a.v = d.w and b.z = d.u' -- ^ ^ - -- +-------------+------> these datasources can be INNER joined because they + -- +-------------+------> these datasources can be INNER joined because they -- BOTH participate in null-rejecting expression. , 0 ------------------------------------ nr_total: when 0 then do NOT run sp_fill because we already do have data for checking @@ -77,9 +68,9 @@ test_script_1 = """ suspend; if ( result not containing 'Passed' ) then - -- this context variable serves as 'flag' to show + -- this context variable serves as 'flag' to show -- problematic data (see following EB): - rdb$set_context('USER_SESSION', 'FAULT', '1'); + rdb$set_context('USER_SESSION', 'FAULT', '1'); end ^ execute block returns( failed_on varchar(255) ) as @@ -88,8 +79,8 @@ test_script_1 = """ -- rows from all tables in order to reproduce this trouble later: if ( rdb$get_context('USER_SESSION', 'FAULT') = '1' ) then begin - for - select dml from sp_show_data + for + select dml from sp_show_data into failed_on do suspend; @@ -101,15 +92,14 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ RESULT Passed. """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_join_transformation_007.py b/tests/functional/tabloid/test_join_transformation_007.py index 3f9d55fc..d3b9ba63 100644 --- a/tests/functional/tabloid/test_join_transformation_007.py +++ b/tests/functional/tabloid/test_join_transformation_007.py @@ -1,33 +1,23 @@ #coding:utf-8 -# -# id: functional.tabloid.join_transformation_007 -# title: Check ability of outer join simplification. -# decription: -# For two datasources (S1, S2) and some complex query Q which has inside itself: "... left join S2 [...]" -# we can replace LEFT OUTER joins inside Q with INNER if S1 and S2 are involved in INNER join with -# null-rejected expression, i.e.: -# -# S1 join ( Q left join S2 ) on null_rej (S1.b,S2.f) ==> S1 join ( Q join S2 ) on null_rej (S1.b,S2.f) -# -# -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: + +""" +ID: tabloid.join-transformation-07 +TITLE: Check ability of outer join simplification. +DESCRIPTION: + For two datasources (S1, S2) and some complex query Q which has inside itself: "... left join S2 [...]" + we can replace LEFT OUTER joins inside Q with INNER if S1 and S2 are involved in INNER join with + null-rejected expression, i.e.: + + S1 join ( Q left join S2 ) on null_rej (S1.b,S2.f) ==> S1 join ( Q join S2 ) on null_rej (S1.b,S2.f) +FBTEST: functional.tabloid.join_transformation_007 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='join-transformations.fbk') -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='join-transformations.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ execute procedure sp_fill( 40, 30 ); -- ^ ^- probability of assign each field on each row to NULL (percent). -- +- number of rows in each of tables t1...t6 @@ -35,7 +25,7 @@ test_script_1 = """ commit; execute procedure sp_recalc_idx_stat; commit; - + set list on; set term ^; execute block returns(result varchar(50)) as @@ -51,11 +41,11 @@ test_script_1 = """ (t2 b ------------------------------------------------------ source_1 join (t3 c - left - join (t4 d - left - join (t5 e - left + left + join (t4 d + left + join (t5 e + left join t6 f ----------------------------- source_2 on e.y=f.z ) on d.x = e.y and f.y = d.z @@ -72,11 +62,11 @@ test_script_1 = """ join (t3 c inner - join (t4 d + join (t4 d inner - join (t5 e + join (t5 e inner - join t6 f + join t6 f on e.y=f.z ) on d.x = e.y and f.y = d.z ) on c.y = e.z @@ -88,9 +78,9 @@ test_script_1 = """ suspend; if ( result not containing 'Passed' ) then - -- this context variable serves as 'flag' to show + -- this context variable serves as 'flag' to show -- problematic data (see following EB): - rdb$set_context('USER_SESSION', 'FAULT', '1'); + rdb$set_context('USER_SESSION', 'FAULT', '1'); end ^ execute block returns( failed_on varchar(255) ) as @@ -99,8 +89,8 @@ test_script_1 = """ -- rows from all tables in order to reproduce this trouble later: if ( rdb$get_context('USER_SESSION', 'FAULT') = '1' ) then begin - for - select dml from sp_show_data + for + select dml from sp_show_data into failed_on do suspend; @@ -112,15 +102,14 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ RESULT Passed. """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_join_transformation_008.py b/tests/functional/tabloid/test_join_transformation_008.py index f12c3bee..479ac322 100644 --- a/tests/functional/tabloid/test_join_transformation_008.py +++ b/tests/functional/tabloid/test_join_transformation_008.py @@ -1,40 +1,31 @@ #coding:utf-8 -# -# id: functional.tabloid.join_transformation_008 -# title: Check ability of outer join simplification. -# decription: -# From join-transformation-008.fbt: -# === -# For two sources, S and T, which: -# 1) are separated by at least one "intermediate" source G (e.g. which is just "after" S and is "before" T), and -# 2) are involved into left join predicate P(S,T) which does not participate in disjunction ("OR"ed) expression -# -- one may to replace all LEFT joins starting from G and up to T with INNER ones. -# Join condition between S and its adjacent datasource (G) should be preserved as it is in original query. -# === -# Additional case here: when a query has several predicates {P1, P2,..., Pn} that involves non-adjacent datasources -# and are null-rejected then we can replace left-outer joins with inner ones separately for each of {P1, P2,..., Pn}. -# Moreover, if some pair of them (say, Px and Py) have common "affecting area" (affects on the same datasources) then -# result of replacement for Px can be preserved even if some of aliases (affected by Px) are starting pair for Py -# (which could NOT be replaced if query would have only one Py) - this effect looks like "bin_or". -# -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: + +""" +ID: tabloid.join-transformation-08 +TITLE: Check ability of outer join simplification. +DESCRIPTION: + From join-transformation-008.fbt: + === + For two sources, S and T, which: + 1) are separated by at least one "intermediate" source G (e.g. which is just "after" S and is "before" T), and + 2) are involved into left join predicate P(S,T) which does not participate in disjunction ("OR"ed) expression + -- one may to replace all LEFT joins starting from G and up to T with INNER ones. + Join condition between S and its adjacent datasource (G) should be preserved as it is in original query. + === + Additional case here: when a query has several predicates {P1, P2,..., Pn} that involves non-adjacent datasources + and are null-rejected then we can replace left-outer joins with inner ones separately for each of {P1, P2,..., Pn}. + Moreover, if some pair of them (say, Px and Py) have common "affecting area" (affects on the same datasources) then + result of replacement for Px can be preserved even if some of aliases (affected by Px) are starting pair for Py + (which could NOT be replaced if query would have only one Py) - this effect looks like "bin_or". +FBTEST: functional.tabloid.join_transformation_008 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='join-transformations.fbk') -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='join-transformations.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ execute procedure sp_fill( 20, 20 ); -- ^ ^- probability of assign each field on each row to NULL (percent). -- +- number of rows in each of tables t1...t6 @@ -42,7 +33,7 @@ test_script_1 = """ commit; execute procedure sp_recalc_idx_stat; commit; - + set list on; set term ^; execute block returns(result varchar(50)) as @@ -56,7 +47,7 @@ test_script_1 = """ -- Here we have two predicates that "jumps" over more than one datasource, -- i.e. they involve relations which are NOT adjacent in the query. -- Hereafter such predicates are called "jumpers". - -- First such predicate is "a.z = d.x" - it involve relations A & D and + -- First such predicate is "a.z = d.x" - it involve relations A & D and -- its affecting area is marked below as "::::::::::::::". -- Second is "b.w = e.y" - it involve relations B &E and its affecting area -- is marked as "%%%%%%%%%%%%%". @@ -80,14 +71,14 @@ test_script_1 = """ from t1 a LEFT join t2 b - left + left join t3 c left join t4 d left -- +-- this alias is NOT afffected by any of "jumpers"! join t5 e -- | left -- | - join t6 F + join t6 F on e.x = f.u on d.z = e.y on c.y = e.x @@ -98,8 +89,8 @@ test_script_1 = """ , 'select a.id, b.id, c.id, d.id, e.id, f.id from t1 a - LEFT -- this should be preserved anyway; explanation see in "join-transformation-008.fbt" - join t2 b + LEFT -- this should be preserved anyway; explanation see in "join-transformation-008.fbt" + join t2 b INNER -- "BIN_OR" here! This could NOT be done if we have only 2nd "jumper" (`b.w = e.y`) which STARTS from `b`. -- -- -- can be replaced because of jumper-1 ("a.z = d.x"); and this result will be PRESERVED despite of jumper-2. join t3 c @@ -108,7 +99,7 @@ test_script_1 = """ inner -- can be replaced because of jumper-2 ("b.w = e.y") join t5 e LEFT -- <<< !! this should be preserved as OUTER join !! - join t6 F + join t6 F on e.x = f.u on d.z = e.y on c.y = e.x @@ -120,9 +111,9 @@ test_script_1 = """ suspend; if ( result not containing 'Passed' ) then - -- this context variable serves as 'flag' to show + -- this context variable serves as 'flag' to show -- problematic data (see following EB): - rdb$set_context('USER_SESSION', 'FAULT', '1'); + rdb$set_context('USER_SESSION', 'FAULT', '1'); end ^ execute block returns( failed_on varchar(255) ) as @@ -131,8 +122,8 @@ test_script_1 = """ -- rows from all tables in order to reproduce this trouble later: if ( rdb$get_context('USER_SESSION', 'FAULT') = '1' ) then begin - for - select dml from sp_show_data + for + select dml from sp_show_data into failed_on do suspend; @@ -144,15 +135,14 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ RESULT Passed. """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_loose_record_when_relief_where_expr.py b/tests/functional/tabloid/test_loose_record_when_relief_where_expr.py index 54a04a00..7db7d55a 100644 --- a/tests/functional/tabloid/test_loose_record_when_relief_where_expr.py +++ b/tests/functional/tabloid/test_loose_record_when_relief_where_expr.py @@ -1,29 +1,20 @@ #coding:utf-8 -# -# id: functional.tabloid.loose_record_when_relief_where_expr -# title: Uncomment of one part of OR'ed expression led to loose of record from resultset. -# decription: -# "OR" could __reduce__ number of rows in resultset. -# See leters to/from dimitr 23.09.2010 (probably this bug was due to occasional typo). -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: tabloid.loose-record-when-relief-where-expr +TITLE: Uncomment of one part of OR'ed expression led to loose of record from resultset. +DESCRIPTION: + "OR" could __reduce__ number of rows in resultset. + See leters to/from dimitr 23.09.2010 (probably this bug was due to occasional typo). +FBTEST: functional.tabloid.loose_record_when_relief_where_expr +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; with tset_init as @@ -60,9 +51,9 @@ test_script_1 = """ ; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ SRVT_ID 61844284 ROW_ID 1 SELECTED_CASE A @@ -71,9 +62,8 @@ expected_stdout_1 = """ SELECTED_CASE A """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_max_path_length.py b/tests/functional/tabloid/test_max_path_length.py index 14d1989e..f99bdf45 100644 --- a/tests/functional/tabloid/test_max_path_length.py +++ b/tests/functional/tabloid/test_max_path_length.py @@ -1,586 +1,85 @@ #coding:utf-8 -# -# id: functional.tabloid.max_path_length -# title: Check ability to create database with total length of path and file name about maximal allowed limit in Windows -# decription: -# Maximal TOTAL length of :\\\\ on Windows is about 260 characters. -# Firebird can *not* create DB file with such length, attempt to do this will failed with message: -# SQLSTATE = 08001 / I/O error during "CreateFile (create)" ... / -Error while trying to create file ... -# -# Firebird *allows* to create database with path length = 259 but this DB can not be referred -# neither using rdb$get_context('SYSTEM','DB_NAME') nor vis MON$DATABASE. -# -# Firebird database with path+name length >= 253 can not be subject of NBACKUP -L because of ".delta" suffix -# which always is added to the full path+name of database and this will raise: -# PROBLEM ON "begin backup: commit". -# I/O error during "CreateFile (create)" operation for file "<...>.FDB.delta" -# -Error while trying to create file -# - // localized message here -# SQLCODE:-902 -# -# Firebird database with length >= 246 can be backed up but can not be restored (if DB name will be such length). -# Maximal length of database that is created on Windows (:\\\\) is 245. -# -# Test uses this length (see below, MAX_FILE_NAME_SIZE) and cheks that one may to do following: -# * create such database and add some DB objects in it (table with index and procedure); -# * use encrypt and decrypt actions against this DB; SHOW database must display actual state of this DB; -# * extract metadata from this database; -# * call fb_lock_print utility with requirement to show Lock Manager info about this database; -# * invoke utilities: gstat -r; gfix; fbsvcmgr -# -# See also: http://tracker.firebirdsql.org/browse/CORE-6248 -# -# All these actions are performed with active trace user session, which registers database and services activity. -# Finally we check that: -# * all above mentioned actions did not failed, i.e. they did not issue somewhat into STDERR; -# * trace log has records about all services which did start. -# * trace log does NOT contain 'FAILED START_SERVICE' -# -# Note-1. -# Test database that is created by fbtest framework will be encrypted here using IBSurgeon Demo Encryption package -# ( https://ib-aid.com/download-demo-firebird-encryption-plugin/ ; https://ib-aid.com/download/crypt/CryptTest.zip ) -# License file plugins\\dbcrypt.conf with unlimited expiration was provided by IBSurgeon to Firebird Foundation (FF). -# This file was preliminary stored in FF Test machine. -# Test assumes that this file and all neccessary libraries already were stored into FB_HOME and %FB_HOME%\\plugins. -# -# Anyone who wants to run this test on his own machine must -# 1) download https://ib-aid.com/download/crypt/CryptTest.zip AND -# 2) PURCHASE LICENSE and get from IBSurgeon file plugins\\dbcrypt.conf with apropriate expiration date and other info. -# -# ################################################ ! ! ! N O T E ! ! ! ############################################## -# FF tests storage (aka "fbt-repo") does not (and will not) contain any license file for IBSurgeon Demo Encryption package! -# ######################################################################################################################### -# -# Note-2. -# Encryption/decryption is executed as separate server thread and can not be done instantly thus requiring some pause. -# This delay is implemented by starting transaction with LOCK TIMEOUT and making attempt to insert duplicate into -# table with unique index. Though error can be supressed in PSQL code, it *does* appear in the trace log as two lines: -# Error in the trace: 335544665 : violation of PRIMARY or UNIQUE KEY constraint "TEST_UNQ" on table "TEST" -# Error in the trace: 335545072 : Problematic key value is ("S" = '000000000000000000000000000000000000') -# We have to ignore these errors because it is expected (they must appear twise: after start of encryption and decruption). -# -# Checked on: -# 4.0.0.1767 SS: 15.266s. -# 4.0.0.1712 SC: 16.438s. -# 4.0.0.1763 CS: 17.719s. -# 3.0.6.33246 SS: 10.656s. -# 3.0.5.33084 SC: 14.563s. -# 3.0.6.33246 CS: 14.595s. -# -# 21.01.2021: added check for trace STDERR because of crash FB 4.0.0.2335 SS/CS. Refactored code for saving logs. -# -# -# tracker_id: -# min_versions: ['3.0.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.max-path-length +TITLE: Check ability to create database with total length of path and file name about maximal allowed limit in Windows +DESCRIPTION: + Maximal TOTAL length of :// on Windows is about 260 characters. + Firebird can *not* create DB file with such length, attempt to do this will failed with message: + SQLSTATE = 08001 / I/O error during "CreateFile (create)" ... / -Error while trying to create file ... + + Firebird *allows* to create database with path length = 259 but this DB can not be referred + neither using rdb$get_context('SYSTEM','DB_NAME') nor vis MON$DATABASE. + + Firebird database with path+name length >= 253 can not be subject of NBACKUP -L because of ".delta" suffix + which always is added to the full path+name of database and this will raise: + PROBLEM ON "begin backup: commit". + I/O error during "CreateFile (create)" operation for file "<...>.FDB.delta" + -Error while trying to create file + - // localized message here + SQLCODE:-902 + + Firebird database with length >= 246 can be backed up but can not be restored (if DB name will be such length). + Maximal length of database that is created on Windows (://) is 245. + + Test uses this length (see below, MAX_FILE_NAME_SIZE) and cheks that one may to do following: + * create such database and add some DB objects in it (table with index and procedure); + * use encrypt and decrypt actions against this DB; SHOW database must display actual state of this DB; + * extract metadata from this database; + * call fb_lock_print utility with requirement to show Lock Manager info about this database; + * invoke utilities: gstat -r; gfix; fbsvcmgr + + See also: http://tracker.firebirdsql.org/browse/CORE-6248 + + All these actions are performed with active trace user session, which registers database and services activity. + Finally we check that: + * all above mentioned actions did not failed, i.e. they did not issue somewhat into STDERR; + * trace log has records about all services which did start. + * trace log does NOT contain 'FAILED START_SERVICE' + + Note-1. + Test database that is created by fbtest framework will be encrypted here using IBSurgeon Demo Encryption package + ( https://ib-aid.com/download-demo-firebird-encryption-plugin/ ; https://ib-aid.com/download/crypt/CryptTest.zip ) + License file plugins/dbcrypt.conf with unlimited expiration was provided by IBSurgeon to Firebird Foundation (FF). + This file was preliminary stored in FF Test machine. + Test assumes that this file and all neccessary libraries already were stored into FB_HOME and %FB_HOME%/plugins. + + Anyone who wants to run this test on his own machine must + 1) download https://ib-aid.com/download/crypt/CryptTest.zip AND + 2) PURCHASE LICENSE and get from IBSurgeon file plugins/dbcrypt.conf with apropriate expiration date and other info. + + ################################################ ! ! ! N O T E ! ! ! ############################################## + FF tests storage (aka "fbt-repo") does not (and will not) contain any license file for IBSurgeon Demo Encryption package! + ######################################################################################################################### + + Note-2. + Encryption/decryption is executed as separate server thread and can not be done instantly thus requiring some pause. + This delay is implemented by starting transaction with LOCK TIMEOUT and making attempt to insert duplicate into + table with unique index. Though error can be supressed in PSQL code, it *does* appear in the trace log as two lines: + Error in the trace: 335544665 : violation of PRIMARY or UNIQUE KEY constraint "TEST_UNQ" on table "TEST" + Error in the trace: 335545072 : Problematic key value is ("S" = '000000000000000000000000000000000000') + We have to ignore these errors because it is expected (they must appear twise: after start of encryption and decruption). + + Checked on: + 4.0.0.1767 SS: 15.266s. + 4.0.0.1712 SC: 16.438s. + 4.0.0.1763 CS: 17.719s. + 3.0.6.33246 SS: 10.656s. + 3.0.5.33084 SC: 14.563s. + 3.0.6.33246 CS: 14.595s. + + 21.01.2021: added check for trace STDERR because of crash FB 4.0.0.2335 SS/CS. Refactored code for saving logs. +FBTEST: functional.tabloid.max_path_length +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [] +act = python_act('db') -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import time -# import re -# import subprocess -# from fdb import services -# -# MAX_FILE_NAME_SIZE=245 # last value with result = ok -# #MAX_FILE_NAME_SIZE=246 # 246 --> get error in gbak; 254 --> can not run nbackup -L -# #MAX_FILE_NAME_SIZE=254 -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close(file_handle): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb'): -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for i in range(len( f_names_list )): -# if os.path.isfile( f_names_list[i]): -# os.remove( f_names_list[i] ) -# -# #-------------------------------------------- -# -# -# fb_home = services.connect(host='localhost', user= user_name, password= user_password).get_home_directory() -# -# db_folder=context['temp_directory'] -# db_ext = '.fdb' -# bk_ext = '.fbk' -# db_longest_name = ('1234567890' * 1000) [ : MAX_FILE_NAME_SIZE - len( db_folder ) - len(db_ext) ] + db_ext -# bk_longest_name = db_longest_name[ : -len(db_ext )] + bk_ext -# db_longest_repl = db_longest_name[ : -len(db_ext )] + '.tmp' -# nb_longest_name = db_longest_name[ : -len(db_ext )] + '.nb0' -# -# # --------------------------- generate trace config and launch trace ------------------------------------ -# -# txt = ''' database=%%[\\\\\\\\/]%(db_longest_name)s -# { -# enabled = true -# time_threshold = 0 -# log_initfini = false -# log_connections = true -# # log_transactions = true -# log_errors = true -# log_sweep = true -# log_statement_finish = true -# } -# -# services -# { -# enabled = true -# log_errors = true -# log_initfini = false -# log_services = true -# exclude_filter = "%%((List Trace Sessions)|(Start Trace Session)|(Stop Trace Session))%%" -# # include_filter = "%%((backup database)|(restore database)|(repair database)|(validate database)|(incremental backup database)|(incremental restore database))%%" -# } -# ''' % locals() -# -# trc_cfg=open( os.path.join(context['temp_directory'],'trc_maxpath.cfg'), 'w') -# trc_cfg.write(txt) -# trc_cfg.close() -# -# ##################################################################### -# # Async. launch of trace session using FBSVCMGR action_trace_start: -# -# trc_log = open( os.path.join(context['temp_directory'],'trc_maxpath.log'), "w") -# trc_err = open( os.path.join(context['temp_directory'],'trc_maxpath.err'), "w") -# # Execute a child program in a new process, redirecting STDERR to the same target as of STDOUT: -# p_svcmgr = subprocess.Popen( [ "fbsvcmgr", "localhost:service_mgr", -# "action_trace_start", -# "trc_cfg", trc_cfg.name -# ], -# stdout=trc_log, -# stderr=trc_err -# ) -# -# # 08.01.2020. This delay is mandatory, otherwise file with trace session info can remain (sometimes) -# # empty when we will read it at the next step: -# time.sleep(2) -# -# -# # Determine active trace session ID (for further stop): -# ######################## -# trc_lst=open( os.path.join(context['temp_directory'],'trc_maxpath.lst'), 'w') -# subprocess.call(["fbsvcmgr", "localhost:service_mgr", -# "action_trace_list"], -# stdout=trc_lst, stderr=subprocess.STDOUT -# ) -# flush_and_close( trc_lst ) -# -# # Session ID: 5 -# # user: -# # date: 2015-08-27 15:24:14 -# # flags: active, trace -# -# sid_pattern = re.compile('Session\\s+ID[:]{0,1}\\s+\\d+', re.IGNORECASE) -# -# trc_ssn=0 -# with open( trc_lst.name,'r') as f: -# for line in f: -# if sid_pattern.search( line ) and len( line.split() ) == 3: -# trc_ssn = line.split()[2] -# break -# -# # Result: `trc_ssn` is ID of active trace session. -# # We have to terminate trace session that is running on server BEFORE we termitane process `p_svcmgr` -# -# -# # ---------------------- generating .SQL and create databse with long name ------------------------------ -# -# if os.path.isfile( db_folder + db_longest_name ): -# os.remove( db_folder + db_longest_name ) -# -# sql_ddl=''' -# -- set bail on; -# set list on; -# set names utf8; -# shell del %(db_folder)s%(db_longest_name)s 2>nul; -# create database 'localhost:%(db_folder)s%(db_longest_name)s' default character set utf8; -# create table test( s varchar(36), constraint test_unq unique(s) ); -# commit; -# insert into test(s) values( lpad('', 36, '0') ); -# commit; -# -# alter database set linger to 0; -# commit; -# insert into test select uuid_to_char(gen_uuid()) from rdb$types; -# commit; -# set term ^; -# create procedure sp_pause as -# declare s1 varchar(36); -# begin -# update test set s = s -# order by s rows 1 -# returning s into s1; -# execute statement ( 'insert into test(s) values(?)' ) ( s1 ) -# on external -# 'localhost:' || rdb$get_context('SYSTEM','DB_NAME') -# as user 'SYSDBA' password 'masterkey' role left(replace( uuid_to_char(gen_uuid()), '-', ''), 31); -# when any do -# begin -# -# end -# end -# ^ -# set term ;^ -# commit; -# -# alter database encrypt with dbcrypt key Red; -# commit; -# set transaction lock timeout 1; -# execute procedure sp_pause; -# show database; -# -# alter database decrypt; -# commit; -# set transaction lock timeout 1; -# execute procedure sp_pause; -# show database; -# -# shell %(fb_home)sfb_lock_print -c -d %(db_folder)s%(db_longest_name)s; -# -# ''' % locals() -# -# f_isql_cmd=open( os.path.join(context['temp_directory'],'tmp_maxpath_test.sql'), 'w', buffering = 0) -# f_isql_cmd.write(sql_ddl) -# flush_and_close( f_isql_cmd ) -# -# # ---------------------- generating .bat for execute actions with DB via Firebird utilities ------------------------- -# -# f_isql_name=f_isql_cmd.name -# tmp_bat_text=''' -# @echo off -# setlocal enabledelayedexpansion enableextensions -# set tmplog=%%~dpn0.log -# set tmperr=%%~dpn0.err -# -# del !tmplog! 2>nul -# del !tmperr! 2>nul -# -# set ISC_USER=%(user_name)s -# set ISC_PASSWORD=%(user_password)s -# -# set run_cmd=%(fb_home)sisql -q -i %(f_isql_name)s -# echo. >>!tmplog! -# echo !run_cmd! 1>>!tmplog! -# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! -# @rem --------------------------------------------------------------------------------------- -# -# set run_cmd=%(fb_home)sisql localhost:%(db_folder)s%(db_longest_name)s -x -ch utf8 -# echo. >>!tmplog! -# echo !run_cmd! 1>>!tmplog! -# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! -# -# @rem --------------------------------------------------------------------------------------- -# -# set run_cmd=%(fb_home)sgstat -r localhost:%(db_folder)s%(db_longest_name)s -# echo. >>!tmplog! -# echo !run_cmd! 1>>!tmplog! -# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! -# @rem --------------------------------------------------------------------------------------- -# -# set run_cmd=%(fb_home)sgfix -shut full -force 0 localhost:%(db_folder)s%(db_longest_name)s -# echo. >>!tmplog! -# echo !run_cmd! 1>>!tmplog! -# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! -# @rem --------------------------------------------------------------------------------------- -# -# set run_cmd=%(fb_home)sgfix -online localhost:%(db_folder)s%(db_longest_name)s -# echo. >>!tmplog! -# echo !run_cmd! 1>>!tmplog! -# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! -# @rem --------------------------------------------------------------------------------------- -# -# set run_cmd=%(fb_home)sgfix -v -full localhost:%(db_folder)s%(db_longest_name)s -# echo. >>!tmplog! -# echo !run_cmd! 1>>!tmplog! -# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! -# @rem --------------------------------------------------------------------------------------- -# -# set run_cmd=%(fb_home)sgfix -w async localhost:%(db_folder)s%(db_longest_name)s -# echo. >>!tmplog! -# echo !run_cmd! 1>>!tmplog! -# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! -# @rem --------------------------------------------------------------------------------------- -# -# -# set run_cmd=%(fb_home)sgfix -sweep localhost:%(db_folder)s%(db_longest_name)s -# echo. >>!tmplog! -# echo !run_cmd! 1>>!tmplog! -# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! -# @rem --------------------------------------------------------------------------------------- -# -# set run_cmd=%(fb_home)snbackup -L %(db_folder)s%(db_longest_name)s -# echo. >>!tmplog! -# echo !run_cmd! 1>>!tmplog! -# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! -# @rem --------------------------------------------------------------------------------------- -# -# set run_cmd=%(fb_home)snbackup -N %(db_folder)s%(db_longest_name)s -# echo. >>!tmplog! -# echo !run_cmd! 1>>!tmplog! -# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! -# @rem --------------------------------------------------------------------------------------- -# -# set run_cmd=%(fb_home)sfbsvcmgr localhost:service_mgr action_repair rpr_sweep_db dbname %(db_folder)s%(db_longest_name)s -# echo. >>!tmplog! -# echo !run_cmd! 1>>!tmplog! -# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! -# -# @rem --------------------------------------------------------------------------------------- -# -# -# set run_cmd=%(fb_home)sfbsvcmgr localhost:service_mgr action_validate dbname %(db_folder)s%(db_longest_name)s -# echo. >>!tmplog! -# echo !run_cmd! 1>>!tmplog! -# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! -# -# @rem --------------------------------------------------------------------------------------- -# -# set run_cmd=%(fb_home)sfbsvcmgr localhost:service_mgr action_properties dbname %(db_folder)s%(db_longest_name)s prp_sweep_interval 12345 -# echo. >>!tmplog! -# echo !run_cmd! 1>>!tmplog! -# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! -# -# @rem --------------------------------------------------------------------------------------- -# -# set run_cmd=%(fb_home)sfbsvcmgr localhost:service_mgr action_backup dbname %(db_folder)s%(db_longest_name)s bkp_file %(db_folder)s%(bk_longest_name)s -# echo. >>!tmplog! -# echo !run_cmd! 1>>!tmplog! -# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! -# -# if exist %(db_folder)s%(bk_longest_name)s ( -# set run_cmd=%(fb_home)sfbsvcmgr localhost:service_mgr action_restore res_replace bkp_file %(db_folder)s%(bk_longest_name)s dbname %(db_folder)s%(db_longest_repl)s -# echo. >>!tmplog! -# echo !run_cmd! 1>>!tmplog! -# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! -# ) else ( -# ( -# echo Record-level backup via FB services API FAILED, target file: -# echo %(db_folder)s%(bk_longest_name)s -# echo -- does not exist. -# echo Subsequent restore from this file has been SKIPPED. -# ) >>!tmplog! -# ) -# -# del %(db_folder)s%(bk_longest_name)s 2>nul -# -# -# @rem --------------------------------------------------------------------------------------- -# -# @rem Drop TARGET file for ongoing INCREMENTAL BACKUP operation: -# @rem ~~~~~~~~~~~~~~~~ -# del %(db_folder)s%(nb_longest_name)s 2>nul -# -# set run_cmd=%(fb_home)sfbsvcmgr localhost:service_mgr action_nbak dbname %(db_folder)s%(db_longest_name)s nbk_file %(db_folder)s%(nb_longest_name)s nbk_level 0 -# echo. >>!tmplog! -# echo !run_cmd! 1>>!tmplog! -# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! -# -# @rem --------------------------------------------------------------------------------------- -# -# if exist %(db_folder)s%(nb_longest_name)s ( -# -# @rem Drop TARGET file for ongoing INCREMENTAL RESTORE operation: -# @rem ~~~~~~~~~~~~~~~~ -# del %(db_folder)s%(db_longest_repl)s 2>nul -# -# set run_cmd=%(fb_home)sfbsvcmgr localhost:service_mgr action_nrest nbk_file %(db_folder)s%(nb_longest_name)s dbname %(db_folder)s%(db_longest_repl)s -# echo. >>!tmplog! -# echo !run_cmd! 1>>!tmplog! -# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! -# ) else ( -# ( -# echo Physical-level backup via FB services API FAILED, target file: -# echo %(db_folder)s%(nb_longest_name)s -# echo -- does not exist. -# echo Subsequent incremental restore from this file has been SKIPPED. -# ) >>!tmplog! -# ) -# -# -# @rem --------------------------------------------------------------------------------------- -# -# set run_cmd=%(fb_home)sgbak -b -v localhost:%(db_folder)s%(db_longest_name)s %(db_folder)s%(bk_longest_name)s -# echo. >>!tmplog! -# echo !run_cmd! 1>>!tmplog! -# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! -# @rem --------------------------------------------------------------------------------------- -# -# if exist %(db_folder)s%(bk_longest_name)s ( -# -# ( -# echo. -# echo Check backup file from which we will perform record-level restore: -# dir /-c %(db_folder)s%(bk_longest_name)s | findstr /i /c:"%(bk_ext)s" -# echo. -# ) 1>>!tmplog! -# -# set run_cmd=%(fb_home)sgbak -rep -v %(db_folder)s%(bk_longest_name)s localhost:%(db_folder)s%(db_longest_repl)s -# echo. >>!tmplog! -# echo !run_cmd! 1>>!tmplog! -# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! -# @rem --------------------------------------------------------------------------------------- -# -# @rem set run_cmd=%(fb_home)sgbak -b localhost:%(db_folder)s%(db_longest_name)s stdout ^| %(fb_home)sgbak -rep -v stdin localhost:%(db_folder)s%(db_longest_repl)s -# @rem echo. >>!tmplog! -# @rem echo !run_cmd! 1>>!tmplog! -# @rem cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! -# @rem --------------------------------------------------------------------------------------- -# ) else ( -# -# ( -# echo Record-level backup using gbak utility FAILED, target file: -# echo %(db_folder)s%(bk_longest_name)s -# echo -- does not exist. -# echo Subsequent restore from this file has been SKIPPED. -# ) >>!tmplog! -# ) -# -# del %(db_folder)s%(nb_longest_name)s 2>nul -# del %(db_folder)s%(bk_longest_name)s 2>nul -# del %(db_folder)s%(db_longest_name)s 2>nul -# del %(db_folder)s%(db_longest_repl)s 2>nul -# -# ''' % dict(globals(), **locals()) -# -# -# # ---------------------- execute .bat ------------------------- -# -# f_tmp_bat=open( os.path.join(context['temp_directory'],'tmp_maxpath_test.bat'), 'w', buffering = 0) -# f_tmp_bat.write( tmp_bat_text ) -# flush_and_close( f_tmp_bat ) -# -# subprocess.call( [ f_tmp_bat.name ] ) -# -# # ::: NB ::: Here we have to be idle at least 2s (two seconds) otherwise trace log will -# # not contain some or all of messages about create DB, start Tx, ES, Tx and drop DB. -# # See also discussion with hvlad, 08.01.2020 15:16 -# # (subj: "action_trace_stop does not flush trace log (fully or partially)") -# time.sleep(2) -# -# # Stop trace session: -# ##################### -# -# trc_lst=open(trc_lst.name, "a") -# trc_lst.seek(0,2) -# -# subprocess.call( [ "fbsvcmgr", "localhost:service_mgr", -# "action_trace_stop", -# "trc_id",trc_ssn -# ], -# stdout=trc_lst, -# stderr=subprocess.STDOUT -# ) -# flush_and_close( trc_lst ) -# -# p_svcmgr.terminate() -# flush_and_close( trc_log ) -# flush_and_close( trc_err ) -# -# f_bat_log = '.'.join( ( os.path.splitext( f_tmp_bat.name )[0], 'log') ) -# f_bat_err = '.'.join( ( os.path.splitext( f_tmp_bat.name )[0], 'err') ) -# -# with open(f_bat_err,'r', buffering = 0) as f: -# for line in f: -# if line.split(): -# print( 'Unexpected STDERR: ' + line ) -# -# -# runtime_error_ptn = re.compile( '\\d{8}\\s+:\\s+.' ) -# services_patterns = { -# '1. DB_REPAIR' : re.compile('"Repair\\s+Database"', re.IGNORECASE) -# ,'2. DB_VALIDATE' : re.compile('"Validate\\s+Database"', re.IGNORECASE) -# ,'3. DB_PROPS' : re.compile('"Database\\s+Properties"', re.IGNORECASE) -# ,'4. DB_BACKUP' : re.compile('"Backup\\s+Database"', re.IGNORECASE) -# ,'5. DB_RESTORE' : re.compile('"Restore\\s+Database"', re.IGNORECASE) -# ,'6. DB_NBACKUP' : re.compile('"Incremental\\s+Backup\\s+Database"', re.IGNORECASE) -# ,'7. DB_NRESTORE' : re.compile('"Incremental\\s+Restore\\s+Database"', re.IGNORECASE) -# } -# -# found_patterns={} -# -# with open( trc_err.name,'r') as f: -# for line in f: -# if line.rstrip(): -# print( 'UNEXPECTED error in the trace: ' + line ) -# -# with open( trc_log.name,'r') as f: -# for line in f: -# if line.rstrip().split(): -# if 'FAILED START_SERVICE' in line: -# print( 'UNEXPECTED error with FB SERVICES: ' + line ) -# if runtime_error_ptn.search(line): -# if '335544528 :' in line: -# # ::: NOTE ::: -# # Change DB state to full shutdown (by gfix) produced in the trace log -# # failed_attach event that is issued by secondary attachment of gfix.exe: -# # FAILED ATTACH_DATABASE -# # -# # ERROR AT JProvider::attachDatabase -# # -# # 335544528 : database ... shutdown -# # This is actutal at least for Firebird 3.0.6, so we have to IGNORE this line. -# pass -# else: -# print( 'Expected error in the trace: ' + line ) -# -# for k,v in services_patterns.items(): -# if v.search(line): -# found_patterns[k] = 'FOUND in the trace log' -# -# -# for k,v in sorted( found_patterns.items() ): -# print( 'Pattern', k, ':', v) -# -# ################################################ -# -# # 02.04.2020, WindowsError: 32 The process cannot access the file because it is being used by another process -# ############# -# time.sleep(2) -# -# #cleanup: -# f_list = [ i.name for i in (trc_cfg, trc_log, trc_err, trc_lst, f_isql_cmd, f_tmp_bat) ] -# f_list += [ db_longest_name,bk_longest_name,db_longest_repl,nb_longest_name, f_bat_log, f_bat_err ] -# -# cleanup( f_list ) -# -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ +expected_stdout = """ Expected error in the trace: 335544665 : violation of PRIMARY or UNIQUE KEY constraint "TEST_UNQ" on table "TEST" Expected error in the trace: 335545072 : Problematic key value is ("S" = '000000000000000000000000000000000000') Expected error in the trace: 335544665 : violation of PRIMARY or UNIQUE KEY constraint "TEST_UNQ" on table "TEST" @@ -594,10 +93,500 @@ expected_stdout_1 = """ Pattern 7. DB_NRESTORE : FOUND in the trace log """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=3.0') @pytest.mark.platform('Windows') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# +# import os +# import sys +# import time +# import re +# import subprocess +# from fdb import services +# +# MAX_FILE_NAME_SIZE=245 # last value with result = ok +# #MAX_FILE_NAME_SIZE=246 # 246 --> get error in gbak; 254 --> can not run nbackup -L +# #MAX_FILE_NAME_SIZE=254 +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close(file_handle): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb'): +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for i in range(len( f_names_list )): +# if os.path.isfile( f_names_list[i]): +# os.remove( f_names_list[i] ) +# +# #-------------------------------------------- +# +# +# fb_home = services.connect(host='localhost', user= user_name, password= user_password).get_home_directory() +# +# db_folder=context['temp_directory'] +# db_ext = '.fdb' +# bk_ext = '.fbk' +# db_longest_name = ('1234567890' * 1000) [ : MAX_FILE_NAME_SIZE - len( db_folder ) - len(db_ext) ] + db_ext +# bk_longest_name = db_longest_name[ : -len(db_ext )] + bk_ext +# db_longest_repl = db_longest_name[ : -len(db_ext )] + '.tmp' +# nb_longest_name = db_longest_name[ : -len(db_ext )] + '.nb0' +# +# # --------------------------- generate trace config and launch trace ------------------------------------ +# +# txt = ''' database=%%[\\\\\\\\/]%(db_longest_name)s +# { +# enabled = true +# time_threshold = 0 +# log_initfini = false +# log_connections = true +# # log_transactions = true +# log_errors = true +# log_sweep = true +# log_statement_finish = true +# } +# +# services +# { +# enabled = true +# log_errors = true +# log_initfini = false +# log_services = true +# exclude_filter = "%%((List Trace Sessions)|(Start Trace Session)|(Stop Trace Session))%%" +# # include_filter = "%%((backup database)|(restore database)|(repair database)|(validate database)|(incremental backup database)|(incremental restore database))%%" +# } +# ''' % locals() +# +# trc_cfg=open( os.path.join(context['temp_directory'],'trc_maxpath.cfg'), 'w') +# trc_cfg.write(txt) +# trc_cfg.close() +# +# ##################################################################### +# # Async. launch of trace session using FBSVCMGR action_trace_start: +# +# trc_log = open( os.path.join(context['temp_directory'],'trc_maxpath.log'), "w") +# trc_err = open( os.path.join(context['temp_directory'],'trc_maxpath.err'), "w") +# # Execute a child program in a new process, redirecting STDERR to the same target as of STDOUT: +# p_svcmgr = subprocess.Popen( [ "fbsvcmgr", "localhost:service_mgr", +# "action_trace_start", +# "trc_cfg", trc_cfg.name +# ], +# stdout=trc_log, +# stderr=trc_err +# ) +# +# # 08.01.2020. This delay is mandatory, otherwise file with trace session info can remain (sometimes) +# # empty when we will read it at the next step: +# time.sleep(2) +# +# +# # Determine active trace session ID (for further stop): +# ######################## +# trc_lst=open( os.path.join(context['temp_directory'],'trc_maxpath.lst'), 'w') +# subprocess.call(["fbsvcmgr", "localhost:service_mgr", +# "action_trace_list"], +# stdout=trc_lst, stderr=subprocess.STDOUT +# ) +# flush_and_close( trc_lst ) +# +# # Session ID: 5 +# # user: +# # date: 2015-08-27 15:24:14 +# # flags: active, trace +# +# sid_pattern = re.compile('Session\\s+ID[:]{0,1}\\s+\\d+', re.IGNORECASE) +# +# trc_ssn=0 +# with open( trc_lst.name,'r') as f: +# for line in f: +# if sid_pattern.search( line ) and len( line.split() ) == 3: +# trc_ssn = line.split()[2] +# break +# +# # Result: `trc_ssn` is ID of active trace session. +# # We have to terminate trace session that is running on server BEFORE we termitane process `p_svcmgr` +# +# +# # ---------------------- generating .SQL and create databse with long name ------------------------------ +# +# if os.path.isfile( db_folder + db_longest_name ): +# os.remove( db_folder + db_longest_name ) +# +# sql_ddl=''' +# -- set bail on; +# set list on; +# set names utf8; +# shell del %(db_folder)s%(db_longest_name)s 2>nul; +# create database 'localhost:%(db_folder)s%(db_longest_name)s' default character set utf8; +# create table test( s varchar(36), constraint test_unq unique(s) ); +# commit; +# insert into test(s) values( lpad('', 36, '0') ); +# commit; +# +# alter database set linger to 0; +# commit; +# insert into test select uuid_to_char(gen_uuid()) from rdb$types; +# commit; +# set term ^; +# create procedure sp_pause as +# declare s1 varchar(36); +# begin +# update test set s = s +# order by s rows 1 +# returning s into s1; +# execute statement ( 'insert into test(s) values(?)' ) ( s1 ) +# on external +# 'localhost:' || rdb$get_context('SYSTEM','DB_NAME') +# as user 'SYSDBA' password 'masterkey' role left(replace( uuid_to_char(gen_uuid()), '-', ''), 31); +# when any do +# begin +# +# end +# end +# ^ +# set term ;^ +# commit; +# +# alter database encrypt with dbcrypt key Red; +# commit; +# set transaction lock timeout 1; +# execute procedure sp_pause; +# show database; +# +# alter database decrypt; +# commit; +# set transaction lock timeout 1; +# execute procedure sp_pause; +# show database; +# +# shell %(fb_home)sfb_lock_print -c -d %(db_folder)s%(db_longest_name)s; +# +# ''' % locals() +# +# f_isql_cmd=open( os.path.join(context['temp_directory'],'tmp_maxpath_test.sql'), 'w', buffering = 0) +# f_isql_cmd.write(sql_ddl) +# flush_and_close( f_isql_cmd ) +# +# # ---------------------- generating .bat for execute actions with DB via Firebird utilities ------------------------- +# +# f_isql_name=f_isql_cmd.name +# tmp_bat_text=''' +# @echo off +# setlocal enabledelayedexpansion enableextensions +# set tmplog=%%~dpn0.log +# set tmperr=%%~dpn0.err +# +# del !tmplog! 2>nul +# del !tmperr! 2>nul +# +# set ISC_USER=%(user_name)s +# set ISC_PASSWORD=%(user_password)s +# +# set run_cmd=%(fb_home)sisql -q -i %(f_isql_name)s +# echo. >>!tmplog! +# echo !run_cmd! 1>>!tmplog! +# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! +# @rem --------------------------------------------------------------------------------------- +# +# set run_cmd=%(fb_home)sisql localhost:%(db_folder)s%(db_longest_name)s -x -ch utf8 +# echo. >>!tmplog! +# echo !run_cmd! 1>>!tmplog! +# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! +# +# @rem --------------------------------------------------------------------------------------- +# +# set run_cmd=%(fb_home)sgstat -r localhost:%(db_folder)s%(db_longest_name)s +# echo. >>!tmplog! +# echo !run_cmd! 1>>!tmplog! +# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! +# @rem --------------------------------------------------------------------------------------- +# +# set run_cmd=%(fb_home)sgfix -shut full -force 0 localhost:%(db_folder)s%(db_longest_name)s +# echo. >>!tmplog! +# echo !run_cmd! 1>>!tmplog! +# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! +# @rem --------------------------------------------------------------------------------------- +# +# set run_cmd=%(fb_home)sgfix -online localhost:%(db_folder)s%(db_longest_name)s +# echo. >>!tmplog! +# echo !run_cmd! 1>>!tmplog! +# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! +# @rem --------------------------------------------------------------------------------------- +# +# set run_cmd=%(fb_home)sgfix -v -full localhost:%(db_folder)s%(db_longest_name)s +# echo. >>!tmplog! +# echo !run_cmd! 1>>!tmplog! +# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! +# @rem --------------------------------------------------------------------------------------- +# +# set run_cmd=%(fb_home)sgfix -w async localhost:%(db_folder)s%(db_longest_name)s +# echo. >>!tmplog! +# echo !run_cmd! 1>>!tmplog! +# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! +# @rem --------------------------------------------------------------------------------------- +# +# +# set run_cmd=%(fb_home)sgfix -sweep localhost:%(db_folder)s%(db_longest_name)s +# echo. >>!tmplog! +# echo !run_cmd! 1>>!tmplog! +# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! +# @rem --------------------------------------------------------------------------------------- +# +# set run_cmd=%(fb_home)snbackup -L %(db_folder)s%(db_longest_name)s +# echo. >>!tmplog! +# echo !run_cmd! 1>>!tmplog! +# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! +# @rem --------------------------------------------------------------------------------------- +# +# set run_cmd=%(fb_home)snbackup -N %(db_folder)s%(db_longest_name)s +# echo. >>!tmplog! +# echo !run_cmd! 1>>!tmplog! +# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! +# @rem --------------------------------------------------------------------------------------- +# +# set run_cmd=%(fb_home)sfbsvcmgr localhost:service_mgr action_repair rpr_sweep_db dbname %(db_folder)s%(db_longest_name)s +# echo. >>!tmplog! +# echo !run_cmd! 1>>!tmplog! +# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! +# +# @rem --------------------------------------------------------------------------------------- +# +# +# set run_cmd=%(fb_home)sfbsvcmgr localhost:service_mgr action_validate dbname %(db_folder)s%(db_longest_name)s +# echo. >>!tmplog! +# echo !run_cmd! 1>>!tmplog! +# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! +# +# @rem --------------------------------------------------------------------------------------- +# +# set run_cmd=%(fb_home)sfbsvcmgr localhost:service_mgr action_properties dbname %(db_folder)s%(db_longest_name)s prp_sweep_interval 12345 +# echo. >>!tmplog! +# echo !run_cmd! 1>>!tmplog! +# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! +# +# @rem --------------------------------------------------------------------------------------- +# +# set run_cmd=%(fb_home)sfbsvcmgr localhost:service_mgr action_backup dbname %(db_folder)s%(db_longest_name)s bkp_file %(db_folder)s%(bk_longest_name)s +# echo. >>!tmplog! +# echo !run_cmd! 1>>!tmplog! +# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! +# +# if exist %(db_folder)s%(bk_longest_name)s ( +# set run_cmd=%(fb_home)sfbsvcmgr localhost:service_mgr action_restore res_replace bkp_file %(db_folder)s%(bk_longest_name)s dbname %(db_folder)s%(db_longest_repl)s +# echo. >>!tmplog! +# echo !run_cmd! 1>>!tmplog! +# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! +# ) else ( +# ( +# echo Record-level backup via FB services API FAILED, target file: +# echo %(db_folder)s%(bk_longest_name)s +# echo -- does not exist. +# echo Subsequent restore from this file has been SKIPPED. +# ) >>!tmplog! +# ) +# +# del %(db_folder)s%(bk_longest_name)s 2>nul +# +# +# @rem --------------------------------------------------------------------------------------- +# +# @rem Drop TARGET file for ongoing INCREMENTAL BACKUP operation: +# @rem ~~~~~~~~~~~~~~~~ +# del %(db_folder)s%(nb_longest_name)s 2>nul +# +# set run_cmd=%(fb_home)sfbsvcmgr localhost:service_mgr action_nbak dbname %(db_folder)s%(db_longest_name)s nbk_file %(db_folder)s%(nb_longest_name)s nbk_level 0 +# echo. >>!tmplog! +# echo !run_cmd! 1>>!tmplog! +# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! +# +# @rem --------------------------------------------------------------------------------------- +# +# if exist %(db_folder)s%(nb_longest_name)s ( +# +# @rem Drop TARGET file for ongoing INCREMENTAL RESTORE operation: +# @rem ~~~~~~~~~~~~~~~~ +# del %(db_folder)s%(db_longest_repl)s 2>nul +# +# set run_cmd=%(fb_home)sfbsvcmgr localhost:service_mgr action_nrest nbk_file %(db_folder)s%(nb_longest_name)s dbname %(db_folder)s%(db_longest_repl)s +# echo. >>!tmplog! +# echo !run_cmd! 1>>!tmplog! +# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! +# ) else ( +# ( +# echo Physical-level backup via FB services API FAILED, target file: +# echo %(db_folder)s%(nb_longest_name)s +# echo -- does not exist. +# echo Subsequent incremental restore from this file has been SKIPPED. +# ) >>!tmplog! +# ) +# +# +# @rem --------------------------------------------------------------------------------------- +# +# set run_cmd=%(fb_home)sgbak -b -v localhost:%(db_folder)s%(db_longest_name)s %(db_folder)s%(bk_longest_name)s +# echo. >>!tmplog! +# echo !run_cmd! 1>>!tmplog! +# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! +# @rem --------------------------------------------------------------------------------------- +# +# if exist %(db_folder)s%(bk_longest_name)s ( +# +# ( +# echo. +# echo Check backup file from which we will perform record-level restore: +# dir /-c %(db_folder)s%(bk_longest_name)s | findstr /i /c:"%(bk_ext)s" +# echo. +# ) 1>>!tmplog! +# +# set run_cmd=%(fb_home)sgbak -rep -v %(db_folder)s%(bk_longest_name)s localhost:%(db_folder)s%(db_longest_repl)s +# echo. >>!tmplog! +# echo !run_cmd! 1>>!tmplog! +# cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! +# @rem --------------------------------------------------------------------------------------- +# +# @rem set run_cmd=%(fb_home)sgbak -b localhost:%(db_folder)s%(db_longest_name)s stdout ^| %(fb_home)sgbak -rep -v stdin localhost:%(db_folder)s%(db_longest_repl)s +# @rem echo. >>!tmplog! +# @rem echo !run_cmd! 1>>!tmplog! +# @rem cmd /c !run_cmd! 1>>!tmplog! 2>!tmperr! +# @rem --------------------------------------------------------------------------------------- +# ) else ( +# +# ( +# echo Record-level backup using gbak utility FAILED, target file: +# echo %(db_folder)s%(bk_longest_name)s +# echo -- does not exist. +# echo Subsequent restore from this file has been SKIPPED. +# ) >>!tmplog! +# ) +# +# del %(db_folder)s%(nb_longest_name)s 2>nul +# del %(db_folder)s%(bk_longest_name)s 2>nul +# del %(db_folder)s%(db_longest_name)s 2>nul +# del %(db_folder)s%(db_longest_repl)s 2>nul +# +# ''' % dict(globals(), **locals()) +# +# +# # ---------------------- execute .bat ------------------------- +# +# f_tmp_bat=open( os.path.join(context['temp_directory'],'tmp_maxpath_test.bat'), 'w', buffering = 0) +# f_tmp_bat.write( tmp_bat_text ) +# flush_and_close( f_tmp_bat ) +# +# subprocess.call( [ f_tmp_bat.name ] ) +# +# # ::: NB ::: Here we have to be idle at least 2s (two seconds) otherwise trace log will +# # not contain some or all of messages about create DB, start Tx, ES, Tx and drop DB. +# # See also discussion with hvlad, 08.01.2020 15:16 +# # (subj: "action_trace_stop does not flush trace log (fully or partially)") +# time.sleep(2) +# +# # Stop trace session: +# ##################### +# +# trc_lst=open(trc_lst.name, "a") +# trc_lst.seek(0,2) +# +# subprocess.call( [ "fbsvcmgr", "localhost:service_mgr", +# "action_trace_stop", +# "trc_id",trc_ssn +# ], +# stdout=trc_lst, +# stderr=subprocess.STDOUT +# ) +# flush_and_close( trc_lst ) +# +# p_svcmgr.terminate() +# flush_and_close( trc_log ) +# flush_and_close( trc_err ) +# +# f_bat_log = '.'.join( ( os.path.splitext( f_tmp_bat.name )[0], 'log') ) +# f_bat_err = '.'.join( ( os.path.splitext( f_tmp_bat.name )[0], 'err') ) +# +# with open(f_bat_err,'r', buffering = 0) as f: +# for line in f: +# if line.split(): +# print( 'Unexpected STDERR: ' + line ) +# +# +# runtime_error_ptn = re.compile( '\\d{8}\\s+:\\s+.' ) +# services_patterns = { +# '1. DB_REPAIR' : re.compile('"Repair\\s+Database"', re.IGNORECASE) +# ,'2. DB_VALIDATE' : re.compile('"Validate\\s+Database"', re.IGNORECASE) +# ,'3. DB_PROPS' : re.compile('"Database\\s+Properties"', re.IGNORECASE) +# ,'4. DB_BACKUP' : re.compile('"Backup\\s+Database"', re.IGNORECASE) +# ,'5. DB_RESTORE' : re.compile('"Restore\\s+Database"', re.IGNORECASE) +# ,'6. DB_NBACKUP' : re.compile('"Incremental\\s+Backup\\s+Database"', re.IGNORECASE) +# ,'7. DB_NRESTORE' : re.compile('"Incremental\\s+Restore\\s+Database"', re.IGNORECASE) +# } +# +# found_patterns={} +# +# with open( trc_err.name,'r') as f: +# for line in f: +# if line.rstrip(): +# print( 'UNEXPECTED error in the trace: ' + line ) +# +# with open( trc_log.name,'r') as f: +# for line in f: +# if line.rstrip().split(): +# if 'FAILED START_SERVICE' in line: +# print( 'UNEXPECTED error with FB SERVICES: ' + line ) +# if runtime_error_ptn.search(line): +# if '335544528 :' in line: +# # ::: NOTE ::: +# # Change DB state to full shutdown (by gfix) produced in the trace log +# # failed_attach event that is issued by secondary attachment of gfix.exe: +# # FAILED ATTACH_DATABASE +# # +# # ERROR AT JProvider::attachDatabase +# # +# # 335544528 : database ... shutdown +# # This is actutal at least for Firebird 3.0.6, so we have to IGNORE this line. +# pass +# else: +# print( 'Expected error in the trace: ' + line ) +# +# for k,v in services_patterns.items(): +# if v.search(line): +# found_patterns[k] = 'FOUND in the trace log' +# +# +# for k,v in sorted( found_patterns.items() ): +# print( 'Pattern', k, ':', v) +# +# ################################################ +# +# # 02.04.2020, WindowsError: 32 The process cannot access the file because it is being used by another process +# ############# +# time.sleep(2) +# +# #cleanup: +# f_list = [ i.name for i in (trc_cfg, trc_log, trc_err, trc_lst, f_isql_cmd, f_tmp_bat) ] +# f_list += [ db_longest_name,bk_longest_name,db_longest_repl,nb_longest_name, f_bat_log, f_bat_err ] +# +# cleanup( f_list ) +# +# ----------------------------------- diff --git a/tests/functional/tabloid/test_no_dups_in_call_stack.py b/tests/functional/tabloid/test_no_dups_in_call_stack.py index a5842070..27980556 100644 --- a/tests/functional/tabloid/test_no_dups_in_call_stack.py +++ b/tests/functional/tabloid/test_no_dups_in_call_stack.py @@ -1,22 +1,18 @@ #coding:utf-8 -# -# id: functional.tabloid.no_dups_in_call_stack -# title: Avoid info duplication when statements in call stack attached to different transactions (for example: monitoring snapshot is created in autonomous transaction) -# decription: Fixed in rev. 59971 for 3.0; rev. 59972 for 2.5 (backporting) -- 12-aug-2014 -# tracker_id: -# min_versions: ['2.5.4'] -# versions: 2.5.4 -# qmid: None + +""" +ID: tabloid.no-dups-in-call-stack +TITLE: Avoid info duplication when statements in call stack attached to different + transactions (for example: monitoring snapshot is created in autonomous transaction) +DESCRIPTION: + Fixed in rev. 59971 for 3.0; rev. 59972 for 2.5 (backporting) -- 12-aug-2014 +FBTEST: functional.tabloid.no_dups_in_call_stack +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5.4 -# resources: None - -substitutions_1 = [] - -init_script_1 = """ +init_script = """ -- sql.ru/forum/actualutils.aspx?action=gotomsg&tid=1109867&msg=16438071 -- run: fbt_run -b functional.tabloid.no-dups-in-call-stack -o localhost/ set term ^; @@ -154,11 +150,12 @@ init_script_1 = """ ^ set term ;^ commit; - """ -db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1) +""" -test_script_1 = """ +db = db_factory(page_size=4096, init=init_script) + +test_script = """ delete from dbg_stack; commit; execute procedure p_01; @@ -169,9 +166,9 @@ test_script_1 = """ from dbg_stack s; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ WHOAMI CALL_LEVEL OBJECT_NAME OBJECT_TYPE SOURCE_LINE ====== ============ =============== =========== ============ p_04 1 P_01 5 6 @@ -181,9 +178,8 @@ p_04 4 P_04 5 8 p_04 5 DBG_GET_STACK 5 13 """ -@pytest.mark.version('>=2.5.4') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_oltp_emul_30_compiler_check.py b/tests/functional/tabloid/test_oltp_emul_30_compiler_check.py index e4cf2579..72c61bee 100644 --- a/tests/functional/tabloid/test_oltp_emul_30_compiler_check.py +++ b/tests/functional/tabloid/test_oltp_emul_30_compiler_check.py @@ -1,26 +1,20 @@ #coding:utf-8 -# -# id: functional.tabloid.oltp_emul_30_compiler_check -# title: Compiler check. Test ability to compile source code of OLTP-EMUL test. -# decription: -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.oltp-emul-30-compiler-check +TITLE: Compiler check. Test ability to compile source code of OLTP-EMUL test. +DESCRIPTION: +FBTEST: functional.tabloid.oltp_emul_30_compiler_check +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +substitutions = [('start at .*', 'start at'), ('finish at .*', 'finish at')] -substitutions_1 = [('start at .*', 'start at'), ('finish at .*', 'finish at')] +db = db_factory() -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ -- This test was created only for daily checking of FB compiler: there were several times -- in the past when DDL of OLTP-EMUL test could not be compiled because of regressions. -- Discuss with dimitr: letter for 11-apr-2016 15:34. @@ -17527,9 +17521,9 @@ set list off; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=substitutions) -expected_stdout_1 = """ +expected_stdout = """ MSG oltp30_DDL.sql start at 2016-04-11 15:50:59.5620 MSG oltp30_DDL.sql finish at 2016-04-11 15:51:01.0780 MSG oltp30_sp.sql start at 2016-04-11 15:51:01.0780 @@ -17551,8 +17545,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_optimizer_index_navigation.py b/tests/functional/tabloid/test_optimizer_index_navigation.py index 3f4ec48b..995f8942 100644 --- a/tests/functional/tabloid/test_optimizer_index_navigation.py +++ b/tests/functional/tabloid/test_optimizer_index_navigation.py @@ -1,31 +1,22 @@ #coding:utf-8 -# -# id: functional.tabloid.optimizer_index_navigation -# title: Check that optimizer takes in account presense of index and does navigation instead of external sort. -# decription: -# Verified commit: https://github.com/FirebirdSQL/firebird/actions/runs/176006556 -# Source message to dimitr: 20.07.2020 20:00. -# -# Checked on 3.0.7.33340 and 4.0.0.2114 (intermediate build with timestamp 20.07.2020 17:45) -# -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.optimizer-index-navigation +TITLE: Check that optimizer takes in account presense of index and does navigation instead of external sort. +DESCRIPTION: + Verified commit: https://github.com/FirebirdSQL/firebird/actions/runs/176006556 + Source message to dimitr: 20.07.2020 20:00. + + Checked on 3.0.7.33340 and 4.0.0.2114 (intermediate build with timestamp 20.07.2020 17:45) +FBTEST: functional.tabloid.optimizer_index_navigation +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ recreate table t(x int); create index t_x_asc on t(x); create descending index t_x_dec on t(x); @@ -45,9 +36,9 @@ test_script_1 = """ select * from t as t4 where x<=0.5 order by x desc; -- here PLAN ORDER is much efficient than bitmap + PLAN SORT """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ PLAN (T1 INDEX (T_X_ASC)) PLAN (T2 ORDER T_X_ASC) @@ -58,8 +49,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_ora_4475843.py b/tests/functional/tabloid/test_ora_4475843.py index 4ce07a10..7883dc97 100644 --- a/tests/functional/tabloid/test_ora_4475843.py +++ b/tests/functional/tabloid/test_ora_4475843.py @@ -1,34 +1,25 @@ #coding:utf-8 -# -# id: functional.tabloid.ora_4475843 -# title: Wrong (empty) result of query in Oracle 19 -# decription: -# Original issue: -# https://community.oracle.com/tech/developers/discussion/4475843/wrong-result-on-a-simple-sql-statement -# According to message, Oracle return no rows for the query that follows. -# Could not check because of problems with install Oracle XE. -# -# Checked on 4.0.0.2416, 3.0.8.33445 - results OK (one row). -# Checked also on SQL Server XE and Postgres 13 -# -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.ora-4475843 +TITLE: Wrong (empty) result of query in Oracle 19 +DESCRIPTION: + Original issue: + https://community.oracle.com/tech/developers/discussion/4475843/wrong-result-on-a-simple-sql-statement + According to message, Oracle return no rows for the query that follows. + Could not check because of problems with install Oracle XE. + + Checked on 4.0.0.2416, 3.0.8.33445 - results OK (one row). + Checked also on SQL Server XE and Postgres 13 +FBTEST: functional.tabloid.ora_4475843 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ recreate table test( p_c_id int, v_id int, @@ -79,9 +70,9 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ SUPPLIER 1363 R_M_S_ID 1363 M_C_ID 10 @@ -90,8 +81,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_pg_13918.py b/tests/functional/tabloid/test_pg_13918.py index 2f8e5c2b..2f8485e1 100644 --- a/tests/functional/tabloid/test_pg_13918.py +++ b/tests/functional/tabloid/test_pg_13918.py @@ -1,38 +1,29 @@ #coding:utf-8 -# -# id: functional.tabloid.pg_13918 -# title: Some _TRIVIAL_ queries allow to specify HAVING without group by, and for such case one may to get record from EMPTY source rowset(!) -# decription: -# Original issue: -# http://www.postgresql.org/message-id/flat/CAKFQuwYSa5Dzvw8KdxhiUAY+fjbO4DRQ-sDqQXPVexvVoTkvQA@mail.gmail.com#CAKFQuwYSa5Dzvw8KdxhiUAY+fjbO4DRQ-sDqQXPVexvVoTkvQA@mail.gmail.com -# -# See also http://www.postgresql.org/docs/9.5/interactive/sql-select.html -# === -# The presence of HAVING turns a query into a grouped query even if there is no GROUP BY clause <...> -# Such a query will emit a single row if the HAVING condition is true, zero rows if it is not true. -# === -# -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: tabloid.pg-13918 +TITLE: Some _TRIVIAL_ queries allow to specify HAVING without group by, and for such + case one may to get record from EMPTY source rowset(!) +DESCRIPTION: + Original issue: + http://www.postgresql.org/message-id/flat/CAKFQuwYSa5Dzvw8KdxhiUAY+fjbO4DRQ-sDqQXPVexvVoTkvQA@mail.gmail.com#CAKFQuwYSa5Dzvw8KdxhiUAY+fjbO4DRQ-sDqQXPVexvVoTkvQA@mail.gmail.com + + See also http://www.postgresql.org/docs/9.5/interactive/sql-select.html + === + The presence of HAVING turns a query into a grouped query even if there is no GROUP BY clause <...> + Such a query will emit a single row if the HAVING condition is true, zero rows if it is not true. + === +FBTEST: functional.tabloid.pg_13918 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; - set count on; + set count on; -- This compiles OK and, moreover, outputs one record: select 123456789 as "Yeah!" from rdb$database where 1=0 having 1=1; select 987654321 as "Waw!!" from rdb$database where 1=0 having 1=2; @@ -41,17 +32,16 @@ test_script_1 = """ -- select i from (select 1 i from rdb$database) where i<0 having 1=0; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ Yeah! 123456789 Records affected: 1 Records affected: 0 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_pg_14105.py b/tests/functional/tabloid/test_pg_14105.py index 776780cf..724889f6 100644 --- a/tests/functional/tabloid/test_pg_14105.py +++ b/tests/functional/tabloid/test_pg_14105.py @@ -1,51 +1,42 @@ #coding:utf-8 -# -# id: functional.tabloid.pg_14105 -# title: Check ability to compile query with combination of full and right join. Taken from PG bug library. -# decription: -# Original issue ( http://www.postgresql.org/message-id/20160420194758.22924.80319@wrigleys.postgresql.org ): -# === -# create table a as (select 1 as id); -# select * -# from (( -# a as a1 -# full join (select 1 as id) as tt -# on (a1.id = tt.id) -# ) -# right join (select 1 as id) as tt2 -# on (coalesce(tt.id) = tt2.id) -# ) -# ; -# ERROR: XX000: failed to build any 2-way joins -# LOCATION: standard_join_search, allpaths.c:1832 -# -# -# It works on PostgreSQL 9.2.13., returning: -# id | id | id -# ----+----+---- -# 1 | 1 | 1 -# (1 row) -# === -# PS. NOTE on strange form of COALESCE: "coalesce(tt.id)" - it has only single argument. -# -# tracker_id: -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: None + +""" +ID: tabloid.pg-14105 +TITLE: Check ability to compile query with combination of full and right join. Taken from PG bug library. +DESCRIPTION: + Original issue ( http://www.postgresql.org/message-id/20160420194758.22924.80319@wrigleys.postgresql.org ): + === + create table a as (select 1 as id); + select * + from (( + a as a1 + full join (select 1 as id) as tt + on (a1.id = tt.id) + ) + right join (select 1 as id) as tt2 + on (coalesce(tt.id) = tt2.id) + ) + ; + ERROR: XX000: failed to build any 2-way joins + LOCATION: standard_join_search, allpaths.c:1832 + + + It works on PostgreSQL 9.2.13., returning: + id | id | id + ----+----+---- + 1 | 1 | 1 + (1 row) + === + PS. NOTE on strange form of COALESCE: "coalesce(tt.id)" - it has only single argument. +FBTEST: functional.tabloid.pg_14105 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ recreate table a(id int); commit; @@ -72,17 +63,16 @@ test_script_1 = """ ; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ A1_ID 1 TT_ID 1 TT2_ID 1 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_pg_14421.py b/tests/functional/tabloid/test_pg_14421.py index b0258bfa..bdc00ae0 100644 --- a/tests/functional/tabloid/test_pg_14421.py +++ b/tests/functional/tabloid/test_pg_14421.py @@ -1,29 +1,20 @@ #coding:utf-8 -# -# id: functional.tabloid.pg_14421 -# title: UPDATE/DETERE RETURNING should issue only one row when applying to table with self-referencing FK -# decription: -# Original issue: -# https://www.postgresql.org/message-id/cakfquwyrb5iyfqs6o9mmtbxp96l40bxpnfgosj8xm88ag%2b5_aa%40mail.gmail.com -# -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.pg-14421 +TITLE: UPDATE/DETERE RETURNING should issue only one row when applying to table with self-referencing FK +DESCRIPTION: + Original issue: + https://www.postgresql.org/message-id/cakfquwyrb5iyfqs6o9mmtbxp96l40bxpnfgosj8xm88ag%2b5_aa%40mail.gmail.com +FBTEST: functional.tabloid.pg_14421 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; recreate table test( id int primary key, @@ -52,9 +43,9 @@ test_script_1 = """ rollback; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ OLD_ID 2 @@ -64,8 +55,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_remote_access_to_security_db.py b/tests/functional/tabloid/test_remote_access_to_security_db.py index 584e3b2d..e7738166 100644 --- a/tests/functional/tabloid/test_remote_access_to_security_db.py +++ b/tests/functional/tabloid/test_remote_access_to_security_db.py @@ -1,56 +1,46 @@ #coding:utf-8 -# -# id: functional.tabloid.remote_access_to_security_db -# title: Verify ability to make REMOTE connect to security.db -# decription: -# This test verifies only ability to make REMOTE connect to security.db -# Line "RemoteAccess = false" in file $FB_HOME/databases.conf should be COMMENTED. -# On the host that run tests this must is done BEFORE launch all testsby calling -# batch file "upd_databases_conf.bat" (see \\FirebirdQA\\qa3x.bat; qa4x.bat). -# Checked 28.06.2016 on 4.0.0.267 -# -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.remote-access-to-security-db +TITLE: Verify ability to make REMOTE connect to security.db +DESCRIPTION: + This test verifies only ability to make REMOTE connect to security.db + Line "RemoteAccess = false" in file $FB_HOME/databases.conf should be COMMENTED. + On the host that run tests this must is done BEFORE launch all testsby calling + batch file "upd_databases_conf.bat" (see \FirebirdQA\qa3x.bat; qa4x.bat). + Checked 28.06.2016 on 4.0.0.267 +FBTEST: functional.tabloid.remote_access_to_security_db +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [('TCPv4', 'TCP'), ('TCPv6', 'TCP')] +act = python_act('db', substitutions=[('TCPv4', 'TCP'), ('TCPv6', 'TCP')]) -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# db_conn.close() -# sql_chk=''' -# connect 'localhost:security.db'; -# set list on; -# select mon$attachment_name,mon$remote_protocol from mon$attachments where mon$attachment_id = current_connection; -# ''' -# runProgram('isql',['-q'],sql_chk) -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ +expected_stdout = """ MON$ATTACHMENT_NAME security.db MON$REMOTE_PROTOCOL TCP """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=3.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# +# import os +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# db_conn.close() +# sql_chk=''' +# connect 'localhost:security.db'; +# set list on; +# select mon$attachment_name,mon$remote_protocol from mon$attachments where mon$attachment_id = current_connection; +# ''' +# runProgram('isql',['-q'],sql_chk) +# ----------------------------------- diff --git a/tests/functional/tabloid/test_request_of_expr_index.py b/tests/functional/tabloid/test_request_of_expr_index.py index a8e243b4..de85c02f 100644 --- a/tests/functional/tabloid/test_request_of_expr_index.py +++ b/tests/functional/tabloid/test_request_of_expr_index.py @@ -1,35 +1,26 @@ #coding:utf-8 -# -# id: functional.tabloid.request_of_expr_index -# title: request of expression index could run outside of main request's snapshot. -# decription: -# Test verifies fix that is described here: -# https://github.com/FirebirdSQL/firebird/commit/26ee42e69d0a381c166877e3c2a17893d85317e0 -# Thanks Vlad for example of implementation and suggestions. -# ::: NOTE ::: -# It is crusial that final SELECT must run in TIL = read committed read consistency. -# -# Confirmed bug on 4.0.0.1810. -# Checked on 4.0.0.1812 (SS/CS) - all OK. -# -# tracker_id: -# min_versions: ['4.0.0'] -# versions: 4.0 -# qmid: None + +""" +ID: tabloid.request-of-expr-index +TITLE: request of expression index could run outside of main request's snapshot. +DESCRIPTION: + Test verifies fix that is described here: + https://github.com/FirebirdSQL/firebird/commit/26ee42e69d0a381c166877e3c2a17893d85317e0 + Thanks Vlad for example of implementation and suggestions. + ::: NOTE ::: + It is crusial that final SELECT must run in TIL = read committed read consistency. + + Confirmed bug on 4.0.0.1810. + Checked on 4.0.0.1812 (SS/CS) - all OK. +FBTEST: functional.tabloid.request_of_expr_index +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ recreate global temporary table gtt_snap (id bigint) on commit delete rows; set term ^; @@ -66,15 +57,14 @@ test_script_1 = """ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ RESULT Expected: values are equal. """ @pytest.mark.version('>=4.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/tabloid/test_snd_7795_20120706_1249.py b/tests/functional/tabloid/test_snd_7795_20120706_1249.py index c3c8751d..a0a73742 100644 --- a/tests/functional/tabloid/test_snd_7795_20120706_1249.py +++ b/tests/functional/tabloid/test_snd_7795_20120706_1249.py @@ -1,34 +1,25 @@ #coding:utf-8 -# -# id: functional.tabloid.snd_7795_20120706_1249 -# title: Common SQL. Check correctness of the results -# decription: -# NB: new datatype in FB 4.0 was introduces: numeric(38,0). -# It leads to additional ident of values when we show them in form "SET LIST ON", -# so we have to ignore all internal spaces - see added 'substitution' section below. -# -# Checked on: -# 4.0.0.1635 SS: 1.824s. -# 3.0.5.33182 SS: 1.387s. -# -# tracker_id: -# min_versions: ['3.0'] -# versions: 3.0 -# qmid: None + +""" +ID: tabloid.snd-7795-20120706-1249 +TITLE: Common SQL. Check correctness of the results +DESCRIPTION: + NB: new datatype in FB 4.0 was introduces: numeric(38,0). + It leads to additional ident of values when we show them in form "SET LIST ON", + so we have to ignore all internal spaces - see added 'substitution' section below. + + Checked on: + 4.0.0.1635 SS: 1.824s. + 3.0.5.33182 SS: 1.387s. +FBTEST: functional.tabloid.snd_7795_20120706_1249 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory(from_backup='tabloid-snd-7795.fbk') -substitutions_1 = [('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(from_backup='tabloid-snd-7795.fbk', init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; with recursive n as(select -1 i from rdb$database union all select n.i+1 from n where n.i<1), @@ -215,9 +206,9 @@ test_script_1 = """ order by 1,2; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ GATE 1,1 DTS 2001-03-14 RETAIL_SUM 80778.08 @@ -260,8 +251,7 @@ expected_stdout_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/transactions/test_read_consist_statement_delete_undone_01.py b/tests/functional/transactions/test_read_consist_statement_delete_undone_01.py index 17501caa..ba38d047 100644 --- a/tests/functional/transactions/test_read_consist_statement_delete_undone_01.py +++ b/tests/functional/transactions/test_read_consist_statement_delete_undone_01.py @@ -1,392 +1,380 @@ #coding:utf-8 -# -# id: functional.transactions.read_consist_statement_delete_undone_01 -# title: READ CONSISTENCY. Changes produced by DELETE statement must be UNDONE when cursor resultset becomes empty after this statement start. Test-01 -# decription: -# Initial article for reading: -# https://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:11504247549852 -# Note on terms which are used there: "BLOCKER", "LONG" and "FIRSTLAST" - their names are slightly changed here -# to: LOCKER-1, WORKER and LOCKER-2 respectively. -# See also: doc\\README.read_consistency.md -# -# ********************************************** -# -# ::: NB ::: -# This test uses script %FBT_REPO% -# iles -# ead-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. -# Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual -# results against table TEST. These triggers launched AUTONOMOUS transactions in order to have ability to see results in any -# outcome of test. -# -# ############### -# Following scenario if executed here: -# * five rows are inserted into the table TEST, with IDs: 1...5. -# -# * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): -# update test set id = id where id=1; -# -# * session 'worker' ("LONG" in TK article) has mission: -# delete from test where not exists(select * from test where id >= 10) order by id desc; // using TIL = read committed read consistency -# -# // Execution will have PLAN ORDER . -# // It will delete rows starting with ID = 5 and down to ID = 2, but hang on row with ID = 1 because of locker-1; -# -# * session 'locker-2' ("FIRSTLAST" in TK article): -# (1) insert into test(id) values(6); -# (2) commit; -# (3) update test set id=id where id = 6; -# -# // session-'worker' remains waiting at this point because row with ID = 5 is still occupied by by locker-1 -# // but worker must further see record with (new) id = 6 because its TIL was changed to RC NO RECORD_VERSION. -# -# * session 'locker-1': commit (and allows lead session-worker to delete row with ID = 1). -# (1) commit; -# (2) insert into test(id) values(7); -# (3) commit; -# (4) update test set id=id where id = 7; -# -# // This: '(1) commit' - will release record with ID = 1. Worker sees this record and put write-lock on it. -# // [DOC]: "b) engine put write lock on conflicted record" -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Worker resumes search for any rows which with taking in account required order of its DML (i.e. 'ORDER BY ID DESC'). -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // Worker starts to search records which must be involved in its DML and *found* first sucn row: it has ID = 7. -# // Then it goes on and stops on ID=6 because id is occupied by locker-2. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since -# // top-level statement execution starts and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# -# * session 'locker-2': -# (1) commit; -# (2) insert into test(id) values(8); -# (3) commit; -# (4) update test set id=id where id = 8; -# -# // This: '(1) commit' - will release record with ID = 6. Worker sees this record and put write-lock on it. -# // [DOC]: "b) engine put write lock on conflicted record" -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Worker resumes search for any rows which with taking in account required order of its DML (i.e. 'ORDER BY ID DESC') -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // Worker starts to search records which must be involved in its DML and *found* first sucn row: it has ID = 8. -# // Then it goes on stops on ID=7 because id is occupied by locker-1. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since -# // top-level statement execution starts and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# * session 'locker-1': commit (this allows session-worker to delete row with ID = 7). -# (1) commit; -# (2) insert into test(id) values(9); -# (3) commit; -# (4) update test set id=id where id = 9; -# -# // Comments here are similar to previous one: STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# -# * session 'locker-2': commit (this allows session-worker to delete row with ID = 6). -# (1) commit; -# (2) insert into test(id) values(10); -# (3) commit; -# (4) update test set id=id where id = 10; -# -# // This will made this row visible to session-worker when it will resume its DML. -# // NOTE: this record will cause session-worker immediately UNDO all changes that it was performed before - see "WHERE NOT EXISTS(...)" in its DML expression. -# -# -# Expected result: -# * session-'worker' must be cancelled. No rows must be deleted, PLUS new rows must remain (with ID = 6 ... 10). -# * we must NOT see statement-level restart because no rows actually were affected by session-worker statement. -# Column TLOG_DONE.SNAP_NO must contain only one unique value that relates to start of DELETE statement. -# -# ################ -# -# Additional comments for this case - see letter from Vlad, 05-aug-2020 00:51. -# -# Checked on 4.0.0.2151 SS/CS -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: + +""" +ID: transactions.read-consist-statement-delete-undone-01 +TITLE: READ CONSISTENCY. Changes produced by DELETE statement must be UNDONE when cursor resultset becomes empty after this statement start. Test-01 +DESCRIPTION: + Initial article for reading: + https://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:11504247549852 + Note on terms which are used there: "BLOCKER", "LONG" and "FIRSTLAST" - their names are slightly changed here + to: LOCKER-1, WORKER and LOCKER-2 respectively. + See also: doc/README.read_consistency.md + + ********************************************** + + ::: NB ::: + This test uses script %FBT_REPO%/files/read-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. + Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual + results against table TEST. These triggers launched AUTONOMOUS transactions in order to have ability to see results in any + outcome of test. + + ############### + Following scenario if executed here: + * five rows are inserted into the table TEST, with IDs: 1...5. + + * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): + update test set id = id where id=1; + + * session 'worker' ("LONG" in TK article) has mission: + delete from test where not exists(select * from test where id >= 10) order by id desc; // using TIL = read committed read consistency + + // Execution will have PLAN ORDER . + // It will delete rows starting with ID = 5 and down to ID = 2, but hang on row with ID = 1 because of locker-1; + + * session 'locker-2' ("FIRSTLAST" in TK article): + (1) insert into test(id) values(6); + (2) commit; + (3) update test set id=id where id = 6; + + // session-'worker' remains waiting at this point because row with ID = 5 is still occupied by by locker-1 + // but worker must further see record with (new) id = 6 because its TIL was changed to RC NO RECORD_VERSION. + + * session 'locker-1': commit (and allows lead session-worker to delete row with ID = 1). + (1) commit; + (2) insert into test(id) values(7); + (3) commit; + (4) update test set id=id where id = 7; + + // This: '(1) commit' - will release record with ID = 1. Worker sees this record and put write-lock on it. + // [DOC]: "b) engine put write lock on conflicted record" + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Worker resumes search for any rows which with taking in account required order of its DML (i.e. 'ORDER BY ID DESC'). + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // Worker starts to search records which must be involved in its DML and *found* first sucn row: it has ID = 7. + // Then it goes on and stops on ID=6 because id is occupied by locker-2. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since + // top-level statement execution starts and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + + * session 'locker-2': + (1) commit; + (2) insert into test(id) values(8); + (3) commit; + (4) update test set id=id where id = 8; + + // This: '(1) commit' - will release record with ID = 6. Worker sees this record and put write-lock on it. + // [DOC]: "b) engine put write lock on conflicted record" + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Worker resumes search for any rows which with taking in account required order of its DML (i.e. 'ORDER BY ID DESC') + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // Worker starts to search records which must be involved in its DML and *found* first sucn row: it has ID = 8. + // Then it goes on stops on ID=7 because id is occupied by locker-1. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since + // top-level statement execution starts and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + * session 'locker-1': commit (this allows session-worker to delete row with ID = 7). + (1) commit; + (2) insert into test(id) values(9); + (3) commit; + (4) update test set id=id where id = 9; + + // Comments here are similar to previous one: STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + + * session 'locker-2': commit (this allows session-worker to delete row with ID = 6). + (1) commit; + (2) insert into test(id) values(10); + (3) commit; + (4) update test set id=id where id = 10; + + // This will made this row visible to session-worker when it will resume its DML. + // NOTE: this record will cause session-worker immediately UNDO all changes that it was performed before - see "WHERE NOT EXISTS(...)" in its DML expression. + + + Expected result: + * session-'worker' must be cancelled. No rows must be deleted, PLUS new rows must remain (with ID = 6 ... 10). + * we must NOT see statement-level restart because no rows actually were affected by session-worker statement. + Column TLOG_DONE.SNAP_NO must contain only one unique value that relates to start of DELETE statement. + + ################ + + Additional comments for this case - see letter from Vlad, 05-aug-2020 00:51. + + Checked on 4.0.0.2151 SS/CS +FBTEST: functional.transactions.read_consist_statement_delete_undone_01 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] +act = python_act('db', substitutions=[('=', ''), ('[ \t]+', ' ')]) -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import subprocess -# from subprocess import Popen -# import re -# import difflib -# from fdb import services -# import time -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# -# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') -# -# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') -# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') -# -# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) -# flush_and_close(f_init_log) -# flush_and_close(f_init_err) -# -# # add rows with ID = 1,2,3,4,5: -# sql_addi=''' -# set term ^; -# execute block as -# begin -# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); -# end -# ^ -# set term ;^ -# insert into test(id, x) -# select row_number()over(),row_number()over() -# from rdb$types rows 5; -# commit; -# ''' -# runProgram('isql', [ dsn, '-q' ], sql_addi) -# -# -# con_lock_1 = fdb.connect( dsn = dsn ) -# con_lock_2 = fdb.connect( dsn = dsn ) -# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) -# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) -# -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# -# con_lock_1.execute_immediate( 'update test set id=id where id = 1' ) -# -# sql_text=''' -# connect '%(dsn)s'; -# set list on; -# set autoddl off; -# set term ^; -# execute block returns (whoami varchar(30)) as -# begin -# whoami = 'WORKER'; -- , ATT#' || current_connection; -# rdb$set_context('USER_SESSION','WHO', whoami); -# -- suspend; -# end -# ^ -# set term ;^ -# commit; -# SET KEEP_TRAN_PARAMS ON; -# set transaction read committed read consistency; -# --select current_connection, current_transaction from rdb$database; -# set list off; -# set wng off; -# --set plan on; -# set count on; -# -# delete from test where not exists(select * from test where id >= 10) order by id desc; -- THIS MUST BE LOCKED -# -# -- check results: -# -- ############### -# -# select id from test order by id; -- this will produce output only after all lockers do their commit/rollback -# -# select v.old_id, v.op, v.snap_no_rank -# from v_worker_log v -# where v.op = 'del'; -# -# set width who 10; -# -- DO NOT check this! Values can differ here from one run to another! -# --select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; -# -# rollback; -# -# ''' % dict(globals(), **locals()) -# -# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_read_consist_statement_undone_delete_01.sql'), 'w') -# f_worker_sql.write(sql_text) -# flush_and_close(f_worker_sql) -# -# -# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') -# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') -# -# ############################################################################ -# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### -# ############################################################################ -# -# p_worker = Popen( [ context['isql_path'], '-pag', '999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) -# time.sleep(1) -# -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# # Add record so that it **will* be included in the set of rows that must be affected by session-worker: -# con_lock_2.execute_immediate( 'insert into test(id, x) values(6, 6);' ) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'update test set id = id where id = 6;' ) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# con_lock_1.commit() # releases record with ID=1 (allow it to be deleted by session-worker) -# # Add record so that it **will* be included in the set of rows that must be affected by session-worker: -# con_lock_1.execute_immediate( 'insert into test(id, x) values(7, 7);' ) -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'update test set id = id where id = 7;' ) -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# con_lock_2.commit() # releases record with ID = 6, but session-worker is waiting for record with ID = 7 (that was added by locker-1). -# con_lock_2.execute_immediate( 'insert into test(id, x) values(8, 8);' ) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'update test set id = id where id = 8;' ) -# -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# con_lock_1.commit() # releases record with ID = 7, but session-worker is waiting for record with ID = 8 (that was added by locker-2). -# con_lock_1.execute_immediate( 'insert into test(id, x) values(9, 9);' ) -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'update test set id = id where id = 9;' ) -# -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# con_lock_2.commit() # releases record with ID = 8, but session-worker is waiting for record with ID = 9 (that was added by locker-1). -# con_lock_2.execute_immediate( 'insert into test(id, x) values(10, 10);' ) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'update test set id = id where id = 10;' ) -# -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# con_lock_1.commit() # <<< THIS MUST CANCEL ALL PERFORMED DELETIONS OF SESSION-WORKER -# -# con_lock_2.commit() -# -# # Here we wait for ISQL complete its mission: -# p_worker.wait() -# -# flush_and_close(f_worker_log) -# flush_and_close(f_worker_err) -# -# # Close lockers: -# ################ -# for c in (con_lock_1, con_lock_2): -# c.close() -# -# -# # CHECK RESULTS -# ############### -# -# for f in (f_init_err, f_worker_err): -# with open(f.name,'r') as g: -# for line in g: -# if line: -# print( 'UNEXPECTED STDERR IN ' + g.name + ':' + line) -# -# with open(f_worker_log.name,'r') as f: -# for line in f: -# print(line) -# -# -# # Cleanup. -# ########## -# time.sleep(1) -# cleanup( (f_init_log, f_init_err, f_worker_sql, f_worker_log, f_worker_err) ) -# -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ +expected_stdout = """ Records affected: 0 - ID - ======= - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 + ID + ======= + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 Records affected: 10 - OLD_ID OP SNAP_NO_RANK - ======= ====== ===================== - 5 DEL 1 - 4 DEL 1 - 3 DEL 1 - 2 DEL 1 + OLD_ID OP SNAP_NO_RANK + ======= ====== ===================== + 5 DEL 1 + 4 DEL 1 + 3 DEL 1 + 2 DEL 1 Records affected: 4 """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# +# import os +# import sys +# import subprocess +# from subprocess import Popen +# import re +# import difflib +# from fdb import services +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# +# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') +# +# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') +# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') +# +# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) +# flush_and_close(f_init_log) +# flush_and_close(f_init_err) +# +# # add rows with ID = 1,2,3,4,5: +# sql_addi=''' +# set term ^; +# execute block as +# begin +# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); +# end +# ^ +# set term ;^ +# insert into test(id, x) +# select row_number()over(),row_number()over() +# from rdb$types rows 5; +# commit; +# ''' +# runProgram('isql', [ dsn, '-q' ], sql_addi) +# +# +# con_lock_1 = fdb.connect( dsn = dsn ) +# con_lock_2 = fdb.connect( dsn = dsn ) +# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) +# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) +# +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# +# con_lock_1.execute_immediate( 'update test set id=id where id = 1' ) +# +# sql_text=''' +# connect '%(dsn)s'; +# set list on; +# set autoddl off; +# set term ^; +# execute block returns (whoami varchar(30)) as +# begin +# whoami = 'WORKER'; -- , ATT#' || current_connection; +# rdb$set_context('USER_SESSION','WHO', whoami); +# -- suspend; +# end +# ^ +# set term ;^ +# commit; +# SET KEEP_TRAN_PARAMS ON; +# set transaction read committed read consistency; +# --select current_connection, current_transaction from rdb$database; +# set list off; +# set wng off; +# --set plan on; +# set count on; +# +# delete from test where not exists(select * from test where id >= 10) order by id desc; -- THIS MUST BE LOCKED +# +# -- check results: +# -- ############### +# +# select id from test order by id; -- this will produce output only after all lockers do their commit/rollback +# +# select v.old_id, v.op, v.snap_no_rank +# from v_worker_log v +# where v.op = 'del'; +# +# set width who 10; +# -- DO NOT check this! Values can differ here from one run to another! +# --select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; +# +# rollback; +# +# ''' % dict(globals(), **locals()) +# +# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_read_consist_statement_undone_delete_01.sql'), 'w') +# f_worker_sql.write(sql_text) +# flush_and_close(f_worker_sql) +# +# +# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') +# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') +# +# ############################################################################ +# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### +# ############################################################################ +# +# p_worker = Popen( [ context['isql_path'], '-pag', '999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) +# time.sleep(1) +# +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# # Add record so that it **will* be included in the set of rows that must be affected by session-worker: +# con_lock_2.execute_immediate( 'insert into test(id, x) values(6, 6);' ) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'update test set id = id where id = 6;' ) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# con_lock_1.commit() # releases record with ID=1 (allow it to be deleted by session-worker) +# # Add record so that it **will* be included in the set of rows that must be affected by session-worker: +# con_lock_1.execute_immediate( 'insert into test(id, x) values(7, 7);' ) +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'update test set id = id where id = 7;' ) +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# con_lock_2.commit() # releases record with ID = 6, but session-worker is waiting for record with ID = 7 (that was added by locker-1). +# con_lock_2.execute_immediate( 'insert into test(id, x) values(8, 8);' ) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'update test set id = id where id = 8;' ) +# +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# con_lock_1.commit() # releases record with ID = 7, but session-worker is waiting for record with ID = 8 (that was added by locker-2). +# con_lock_1.execute_immediate( 'insert into test(id, x) values(9, 9);' ) +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'update test set id = id where id = 9;' ) +# +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# con_lock_2.commit() # releases record with ID = 8, but session-worker is waiting for record with ID = 9 (that was added by locker-1). +# con_lock_2.execute_immediate( 'insert into test(id, x) values(10, 10);' ) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'update test set id = id where id = 10;' ) +# +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# con_lock_1.commit() # <<< THIS MUST CANCEL ALL PERFORMED DELETIONS OF SESSION-WORKER +# +# con_lock_2.commit() +# +# # Here we wait for ISQL complete its mission: +# p_worker.wait() +# +# flush_and_close(f_worker_log) +# flush_and_close(f_worker_err) +# +# # Close lockers: +# ################ +# for c in (con_lock_1, con_lock_2): +# c.close() +# +# +# # CHECK RESULTS +# ############### +# +# for f in (f_init_err, f_worker_err): +# with open(f.name,'r') as g: +# for line in g: +# if line: +# print( 'UNEXPECTED STDERR IN ' + g.name + ':' + line) +# +# with open(f_worker_log.name,'r') as f: +# for line in f: +# print(line) +# +# +# # Cleanup. +# ########## +# time.sleep(1) +# cleanup( (f_init_log, f_init_err, f_worker_sql, f_worker_log, f_worker_err) ) +# +# ----------------------------------- diff --git a/tests/functional/transactions/test_read_consist_statement_delete_undone_02.py b/tests/functional/transactions/test_read_consist_statement_delete_undone_02.py index 10b14df3..8f5f1b9a 100644 --- a/tests/functional/transactions/test_read_consist_statement_delete_undone_02.py +++ b/tests/functional/transactions/test_read_consist_statement_delete_undone_02.py @@ -1,349 +1,337 @@ #coding:utf-8 -# -# id: functional.transactions.read_consist_statement_delete_undone_02 -# title: READ CONSISTENCY. Changes produced by DELETE statement must be UNDONE when cursor resultset becomes empty after this statement start. Test-02 -# decription: -# Initial article for reading: -# https://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:11504247549852 -# Note on terms which are used there: "BLOCKER", "LONG" and "FIRSTLAST" - their names are slightly changed here -# to: LOCKER-1, WORKER and LOCKER-2 respectively. -# See also: doc\\README.read_consistency.md -# -# ********************************************** -# -# ::: NB ::: -# This test uses script %FBT_REPO% -# iles -# ead-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. -# Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual -# results against table TEST. These triggers launched AUTONOMOUS transactions in order to have ability to see results in any -# outcome of test. -# -# ############### -# Following scenario if executed here: -# * five rows are inserted into the table TEST, with IDs: 1...5. -# -# * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): -# update test set id = id where id = 1; -# -# * session 'worker' ("LONG" in TK article) has mission: -# delete from test where x not in (select x from test where id >= 4) order by id desc; // using TIL = read committed read consistency -# -# // Execution will have PLAN ORDER . -# // It will delete rows starting with ID = 5 and down to ID = 2, but hang on row with ID = 1 because of locker-1; -# -# * session 'locker-2' ("FIRSTLAST" in TK article): -# (1) insert into test(id) values(-1); -# (2) commit; -# (3) update test set id=id where id = -1; -# -# // session-'worker' remains waiting at this point because row with ID = 1 is still occupied by by locker-1 -# // but worker must further see record with (new) id = -1 because its TIL was changed to RC NO RECORD_VERSION. -# -# * session 'locker-1': -# (1) commit; -# (2) insert into test(id) values(-2); -# (3) commit; -# (4) update test set id=id where id = -2; -# -# // This: '(1) commit' - will release record with ID = 1. Worker sees this record and put write-lock on it. -# // [DOC]: "b) engine put write lock on conflicted record" -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Worker resumes search for any rows which with taking in account required order of its DML (i.e. 'ORDER BY ID DESC'). -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // Worker starts to search records which must be involved in its DML and *found* row that was not yet locked: it has ID = 1. -# // Then it goes on and stops on ID = -1 because id is occupied by locker-2. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since -# // top-level statement execution starts and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# * session 'locker-2': -# (1) commit; -# (2) insert into test(id, x) values(10, NULL); -- ::: NB ::: X is NULL here! -# (3) update test set id=id where id = 10; -# -# // This: '(1) commit' - will release record with ID = -1. Worker sees this record and put write-lock on it. -# // [DOC]: "b) engine put write lock on conflicted record" -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Worker resumes search for any rows which with taking in account required order of its DML (i.e. 'ORDER BY ID DESC'). -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // Worker starts to search records which must be involved in its DML and *found* row that was not yet locked: it has ID = -1. -# // Then it goes on and stops on ID = -2 because id is occupied by locker-1. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since -# // top-level statement execution starts and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# // ::: NB ::: -# // Expression "where x not in (select x from test where id >= 4)" will be evaluated as FALSE since this point -# // because one of its records has NULL in 'X' column. -# -# * session 'locker-1': -# commit; -# -# // this allows session-worker to delete row with ID = -2. -# // session-worker must immediately cancel its DML because now it sees record with ID = 10 and X is NULL which not meets NOT-IN requirements. -# -# Expected result: -# * session-'worker' must be cancelled. No rows must be deleted, PLUS new rows must remain (with ID = -1, -2 and 10). -# * we must NOT see statement-level restart because no rows actually were affected by session-worker statement. -# Column TLOG_DONE.SNAP_NO must contain only one unique value that relates to start of DELETE statement. -# ################ -# -# Checked on 4.0.0.2151 SS/CS -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: + +""" +ID: transactions.read-consist-statement-delete-undone-02 +TITLE: READ CONSISTENCY. Changes produced by DELETE statement must be UNDONE when cursor resultset becomes empty after this statement start. Test-02 +DESCRIPTION: + Initial article for reading: + https://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:11504247549852 + Note on terms which are used there: "BLOCKER", "LONG" and "FIRSTLAST" - their names are slightly changed here + to: LOCKER-1, WORKER and LOCKER-2 respectively. + See also: doc/README.read_consistency.md + + ********************************************** + + ::: NB ::: + This test uses script %FBT_REPO%/files/read-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. + Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual + results against table TEST. These triggers launched AUTONOMOUS transactions in order to have ability to see results in any + outcome of test. + + ############### + Following scenario if executed here: + * five rows are inserted into the table TEST, with IDs: 1...5. + + * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): + update test set id = id where id = 1; + + * session 'worker' ("LONG" in TK article) has mission: + delete from test where x not in (select x from test where id >= 4) order by id desc; // using TIL = read committed read consistency + + // Execution will have PLAN ORDER . + // It will delete rows starting with ID = 5 and down to ID = 2, but hang on row with ID = 1 because of locker-1; + + * session 'locker-2' ("FIRSTLAST" in TK article): + (1) insert into test(id) values(-1); + (2) commit; + (3) update test set id=id where id = -1; + + // session-'worker' remains waiting at this point because row with ID = 1 is still occupied by by locker-1 + // but worker must further see record with (new) id = -1 because its TIL was changed to RC NO RECORD_VERSION. + + * session 'locker-1': + (1) commit; + (2) insert into test(id) values(-2); + (3) commit; + (4) update test set id=id where id = -2; + + // This: '(1) commit' - will release record with ID = 1. Worker sees this record and put write-lock on it. + // [DOC]: "b) engine put write lock on conflicted record" + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Worker resumes search for any rows which with taking in account required order of its DML (i.e. 'ORDER BY ID DESC'). + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // Worker starts to search records which must be involved in its DML and *found* row that was not yet locked: it has ID = 1. + // Then it goes on and stops on ID = -1 because id is occupied by locker-2. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since + // top-level statement execution starts and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + * session 'locker-2': + (1) commit; + (2) insert into test(id, x) values(10, NULL); -- ::: NB ::: X is NULL here! + (3) update test set id=id where id = 10; + + // This: '(1) commit' - will release record with ID = -1. Worker sees this record and put write-lock on it. + // [DOC]: "b) engine put write lock on conflicted record" + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Worker resumes search for any rows which with taking in account required order of its DML (i.e. 'ORDER BY ID DESC'). + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // Worker starts to search records which must be involved in its DML and *found* row that was not yet locked: it has ID = -1. + // Then it goes on and stops on ID = -2 because id is occupied by locker-1. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since + // top-level statement execution starts and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + // ::: NB ::: + // Expression "where x not in (select x from test where id >= 4)" will be evaluated as FALSE since this point + // because one of its records has NULL in 'X' column. + + * session 'locker-1': + commit; + + // this allows session-worker to delete row with ID = -2. + // session-worker must immediately cancel its DML because now it sees record with ID = 10 and X is NULL which not meets NOT-IN requirements. + + Expected result: + * session-'worker' must be cancelled. No rows must be deleted, PLUS new rows must remain (with ID = -1, -2 and 10). + * we must NOT see statement-level restart because no rows actually were affected by session-worker statement. + Column TLOG_DONE.SNAP_NO must contain only one unique value that relates to start of DELETE statement. + ################ + + Checked on 4.0.0.2151 SS/CS +FBTEST: functional.transactions.read_consist_statement_delete_undone_02 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] +act = python_act('db', substitutions=[('=', ''), ('[ \t]+', ' ')]) -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import subprocess -# from subprocess import Popen -# import re -# import difflib -# from fdb import services -# import time -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') -# -# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') -# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') -# -# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) -# flush_and_close(f_init_log) -# flush_and_close(f_init_err) -# -# # add rows with ID = 1,2,3,4,5: -# sql_addi=''' -# set term ^; -# execute block as -# begin -# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); -# end -# ^ -# set term ;^ -# insert into test(id, x) -# select row_number()over(),row_number()over() -# from rdb$types rows 5; -# commit; -# ''' -# runProgram('isql', [ dsn, '-q' ], sql_addi) -# -# con_lock_1 = fdb.connect( dsn = dsn ) -# con_lock_2 = fdb.connect( dsn = dsn ) -# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) -# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# -# con_lock_1.execute_immediate( 'update test set id=id where id = 1' ) -# -# sql_text=''' -# connect '%(dsn)s'; -# set list on; -# set autoddl off; -# set term ^; -# execute block returns (whoami varchar(30)) as -# begin -# whoami = 'WORKER'; -- , ATT#' || current_connection; -# rdb$set_context('USER_SESSION','WHO', whoami); -# -- suspend; -# end -# ^ -# set term ;^ -# commit; -# SET KEEP_TRAN_PARAMS ON; -# set transaction read committed read consistency; -# --select current_connection, current_transaction from rdb$database; -# set list off; -# set wng off; -# --set plan on; -# set count on; -# -# delete from test where x not in (select x from test where id >= 4) order by id desc; -- THIS MUST BE LOCKED -# -# -- check results: -# -- ############### -# -# -# select id from test order by id; -- this will produce output only after all lockers do their commit/rollback -# -# select v.old_id, v.op, v.snap_no_rank -# from v_worker_log v -# where v.op = 'del'; -# -# set width who 10; -# -- DO NOT check this! Values can differ here from one run to another! -# --select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; -# -# rollback; -# -# ''' % dict(globals(), **locals()) -# -# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_read_consist_statement_undone_delete_02.sql'), 'w') -# f_worker_sql.write(sql_text) -# flush_and_close(f_worker_sql) -# -# -# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') -# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') -# -# ############################################################################ -# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### -# ############################################################################ -# -# p_worker = Popen( [ context['isql_path'], '-pag', '999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) -# time.sleep(1) -# -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# con_lock_2.execute_immediate( 'insert into test(id,x) values( -1, -1 )') -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'update test set id=id where id=-1' ) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# con_lock_1.commit() # releases record with ID=1 (allow it to be deleted by session-worker) -# con_lock_1.execute_immediate( 'insert into test(id, x) values(-2, -2);' ) -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'update test set id = id where id = -2' ) -# -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'insert into test(id, x) values(10, null)' ) -# con_lock_2.commit() -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# con_lock_1.commit() # <<< THIS MUST CANCEL ALL PERFORMED DELETIONS OF SESSION-WORKER -# -# -# # Here we wait for ISQL complete its mission: -# p_worker.wait() -# -# flush_and_close(f_worker_log) -# flush_and_close(f_worker_err) -# -# # Close lockers: -# ################ -# for c in (con_lock_1, con_lock_2): -# c.close() -# -# -# # CHECK RESULTS -# ############### -# -# for f in (f_init_err, f_worker_err): -# with open(f.name,'r') as g: -# for line in g: -# if line: -# print( 'UNEXPECTED STDERR IN ' + g.name + ':' + line) -# -# with open(f_worker_log.name,'r') as f: -# for line in f: -# print(line) -# -# # Cleanup. -# ########## -# time.sleep(1) -# cleanup( (f_init_log, f_init_err, f_worker_sql, f_worker_log, f_worker_err) ) -# -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ +expected_stdout = """ Records affected: 0 - ID - ======= - -2 - -1 - 1 - 2 - 3 - 4 - 5 - 10 + ID + ======= + -2 + -1 + 1 + 2 + 3 + 4 + 5 + 10 Records affected: 8 - OLD_ID OP SNAP_NO_RANK - ======= ====== ===================== - 3 DEL 1 - 2 DEL 1 + OLD_ID OP SNAP_NO_RANK + ======= ====== ===================== + 3 DEL 1 + 2 DEL 1 Records affected: 2 """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# +# import os +# import sys +# import subprocess +# from subprocess import Popen +# import re +# import difflib +# from fdb import services +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') +# +# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') +# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') +# +# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) +# flush_and_close(f_init_log) +# flush_and_close(f_init_err) +# +# # add rows with ID = 1,2,3,4,5: +# sql_addi=''' +# set term ^; +# execute block as +# begin +# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); +# end +# ^ +# set term ;^ +# insert into test(id, x) +# select row_number()over(),row_number()over() +# from rdb$types rows 5; +# commit; +# ''' +# runProgram('isql', [ dsn, '-q' ], sql_addi) +# +# con_lock_1 = fdb.connect( dsn = dsn ) +# con_lock_2 = fdb.connect( dsn = dsn ) +# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) +# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# +# con_lock_1.execute_immediate( 'update test set id=id where id = 1' ) +# +# sql_text=''' +# connect '%(dsn)s'; +# set list on; +# set autoddl off; +# set term ^; +# execute block returns (whoami varchar(30)) as +# begin +# whoami = 'WORKER'; -- , ATT#' || current_connection; +# rdb$set_context('USER_SESSION','WHO', whoami); +# -- suspend; +# end +# ^ +# set term ;^ +# commit; +# SET KEEP_TRAN_PARAMS ON; +# set transaction read committed read consistency; +# --select current_connection, current_transaction from rdb$database; +# set list off; +# set wng off; +# --set plan on; +# set count on; +# +# delete from test where x not in (select x from test where id >= 4) order by id desc; -- THIS MUST BE LOCKED +# +# -- check results: +# -- ############### +# +# +# select id from test order by id; -- this will produce output only after all lockers do their commit/rollback +# +# select v.old_id, v.op, v.snap_no_rank +# from v_worker_log v +# where v.op = 'del'; +# +# set width who 10; +# -- DO NOT check this! Values can differ here from one run to another! +# --select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; +# +# rollback; +# +# ''' % dict(globals(), **locals()) +# +# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_read_consist_statement_undone_delete_02.sql'), 'w') +# f_worker_sql.write(sql_text) +# flush_and_close(f_worker_sql) +# +# +# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') +# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') +# +# ############################################################################ +# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### +# ############################################################################ +# +# p_worker = Popen( [ context['isql_path'], '-pag', '999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) +# time.sleep(1) +# +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# con_lock_2.execute_immediate( 'insert into test(id,x) values( -1, -1 )') +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'update test set id=id where id=-1' ) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# con_lock_1.commit() # releases record with ID=1 (allow it to be deleted by session-worker) +# con_lock_1.execute_immediate( 'insert into test(id, x) values(-2, -2);' ) +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'update test set id = id where id = -2' ) +# +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'insert into test(id, x) values(10, null)' ) +# con_lock_2.commit() +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# con_lock_1.commit() # <<< THIS MUST CANCEL ALL PERFORMED DELETIONS OF SESSION-WORKER +# +# +# # Here we wait for ISQL complete its mission: +# p_worker.wait() +# +# flush_and_close(f_worker_log) +# flush_and_close(f_worker_err) +# +# # Close lockers: +# ################ +# for c in (con_lock_1, con_lock_2): +# c.close() +# +# +# # CHECK RESULTS +# ############### +# +# for f in (f_init_err, f_worker_err): +# with open(f.name,'r') as g: +# for line in g: +# if line: +# print( 'UNEXPECTED STDERR IN ' + g.name + ':' + line) +# +# with open(f_worker_log.name,'r') as f: +# for line in f: +# print(line) +# +# # Cleanup. +# ########## +# time.sleep(1) +# cleanup( (f_init_log, f_init_err, f_worker_sql, f_worker_log, f_worker_err) ) +# +# ----------------------------------- diff --git a/tests/functional/transactions/test_read_consist_sttm_merge_deny_multiple_matches.py b/tests/functional/transactions/test_read_consist_sttm_merge_deny_multiple_matches.py index 29f030be..b9a69a5d 100644 --- a/tests/functional/transactions/test_read_consist_sttm_merge_deny_multiple_matches.py +++ b/tests/functional/transactions/test_read_consist_sttm_merge_deny_multiple_matches.py @@ -1,351 +1,79 @@ #coding:utf-8 -# -# id: functional.transactions.read_consist_sttm_merge_deny_multiple_matches -# title: READ CONSISTENCY. MERGE must reject multiple matches, regardless on statement-level restart. -# decription: -# Initial article for reading: -# https://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:11504247549852 -# Note on terms which are used there: "BLOCKER", "LONG" and "FIRSTLAST" - their names are slightly changed here -# to: LOCKER-1, WORKER and LOCKER-2 respectively. -# -# See also: doc\\README.read_consistency.md -# Letter from Vlad: 15.09.2020 20:04 // subj "read consistency // addi test(s)" -# -# ::: NB ::: -# This test uses script %FBT_REPO% -# iles -# ead-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. -# Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual -# results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any -# outcome of test. -# -# Test verifies DENIAL of multiple matches when MERGE encounteres them, but this statement works in read committed read consistency TIL -# and forced to do several statement-level restarts before such condition will occur. -# -# Scenario: -# * add initial data to the table TEST: six rows with ID and X = (0, ..., 5); -# * launch LOCKER-1 and catch record with ID = 0: update, then commit and once again update this record (without commit); -# * launch WORKER which tries to do: -# merge into test t -# using ( -# select s.id, s.x from test as s -# where s.id <= 1 -# order by s.id DESC -# ) s -# on abs(t.id) = abs(s.id) -# when matched then -# update set t.x = t.x * s.x -# ; -# This statement will update record with ID = 1 but then hanging because rows with ID = 0 is locked by LOCKER-1. -# At this point WORKER changes its TIL to RC NO RECORD_VERSION. This allows WORKER to see all records which will be committed later; -# NOTE: records with ID = 2...5 will not be subect for this statement (because they will not returned by data source marked as 's'). -# -# * LOCKER-2 updates row with ID = 5 by reverting sign of this field (i.e. set ID to -5), then issues commit and updates this row again (without commit); -# * LOCKER-1 updates row with ID = 4 and set ID to -4, then issues commit and updates this row again (without commit); -# * LOCKER-2 updates row with ID = 3 and set ID to -3, then issues commit and updates this row again (without commit); -# * LOCKER-1 updates row with ID = 2 and set ID to -2, then issues commit and updates this row again (without commit); -# * LOCKER-2 inserts row (ID,X) = (-1, 1), commit and updates this row again (without commit); -# * LOCKER-1 issues commit; -# * LOCKER-2 issues commit; -# -# Each of updates/inserts which are performed by LOCKERs lead to new record be appeared in the data source 's' of MERGE statement. -# But note that last statement: insert into test(id,x) values(-1,1) -- creates record that will match TWISE when ON-expression of MERGE -# will evaluates "abs(t.id) = abs(s.id)": first match will be found for record with ID = +1 and second - for newly added rows with ID=-1. -# -# At this point MERGE must fail with message -# Statement failed, SQLSTATE = 21000 -# Multiple source records cannot match the same target during MERGE -# -# All changes that was performed by MERGE must be rolled back. -# ISQL which did MERGE must issue "Records affected: 2" because MERGE was actually could process records with ID = 1 and 0 (and failed on row with ID=-1). -# -# Above mentioned actions are performed two times: first for TABLE and second for naturally-updatable VIEW (v_test), see 'target_object_type'. -# -# Checked on 4.0.0.2214 SS/CS. -# -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: + +""" +ID: transactions.read-consist-sttm-merge-deny-multiple-matches +TITLE: READ CONSISTENCY. MERGE must reject multiple matches, regardless on statement-level restart. +DESCRIPTION: + Initial article for reading: + https://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:11504247549852 + Note on terms which are used there: "BLOCKER", "LONG" and "FIRSTLAST" - their names are slightly changed here + to: LOCKER-1, WORKER and LOCKER-2 respectively. + + See also: doc/README.read_consistency.md + Letter from Vlad: 15.09.2020 20:04 // subj "read consistency // addi test(s)" + + ::: NB ::: + This test uses script %FBT_REPO%/files/read-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. + Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual + results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any + outcome of test. + + Test verifies DENIAL of multiple matches when MERGE encounteres them, but this statement works in read committed read consistency TIL + and forced to do several statement-level restarts before such condition will occur. + + Scenario: + * add initial data to the table TEST: six rows with ID and X = (0, ..., 5); + * launch LOCKER-1 and catch record with ID = 0: update, then commit and once again update this record (without commit); + * launch WORKER which tries to do: + merge into test t + using ( + select s.id, s.x from test as s + where s.id <= 1 + order by s.id DESC + ) s + on abs(t.id) = abs(s.id) + when matched then + update set t.x = t.x * s.x + ; + This statement will update record with ID = 1 but then hanging because rows with ID = 0 is locked by LOCKER-1. + At this point WORKER changes its TIL to RC NO RECORD_VERSION. This allows WORKER to see all records which will be committed later; + NOTE: records with ID = 2...5 will not be subect for this statement (because they will not returned by data source marked as 's'). + + * LOCKER-2 updates row with ID = 5 by reverting sign of this field (i.e. set ID to -5), then issues commit and updates this row again (without commit); + * LOCKER-1 updates row with ID = 4 and set ID to -4, then issues commit and updates this row again (without commit); + * LOCKER-2 updates row with ID = 3 and set ID to -3, then issues commit and updates this row again (without commit); + * LOCKER-1 updates row with ID = 2 and set ID to -2, then issues commit and updates this row again (without commit); + * LOCKER-2 inserts row (ID,X) = (-1, 1), commit and updates this row again (without commit); + * LOCKER-1 issues commit; + * LOCKER-2 issues commit; + + Each of updates/inserts which are performed by LOCKERs lead to new record be appeared in the data source 's' of MERGE statement. + But note that last statement: insert into test(id,x) values(-1,1) -- creates record that will match TWISE when ON-expression of MERGE + will evaluates "abs(t.id) = abs(s.id)": first match will be found for record with ID = +1 and second - for newly added rows with ID=-1. + + At this point MERGE must fail with message + Statement failed, SQLSTATE = 21000 + Multiple source records cannot match the same target during MERGE + + All changes that was performed by MERGE must be rolled back. + ISQL which did MERGE must issue "Records affected: 2" because MERGE was actually could process records with ID = 1 and 0 (and failed on row with ID=-1). + + Above mentioned actions are performed two times: first for TABLE and second for naturally-updatable VIEW (v_test), see 'target_object_type'. + + Checked on 4.0.0.2214 SS/CS. +FBTEST: functional.transactions.read_consist_sttm_merge_deny_multiple_matches +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +substitutions = [('=', ''), ('[ \t]+', ' '), ('.After\\s+line\\s+\\d+\\s+.*', '')] -substitutions_1 = [('=', ''), ('[ \t]+', ' '), ('.After\\s+line\\s+\\d+\\s+.*', '')] +db = db_factory() -init_script_1 = """""" +act = python_act('db', substitutions=substitutions) -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import subprocess -# from subprocess import Popen -# import shutil -# from fdb import services -# import time -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# # How long LOCKER must wait before raise update-conflict error -# # (useful for debug in case os some error in this test algorithm): -# LOCKER_LOCK_TIMEOUT = 5 -# -# ############################## -# # Temply, for debug obly: -# this_fdb=db_conn.database_name -# this_dbg=os.path.splitext(this_fdb)[0] + '.4debug.fdb' -# ############################## -# -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') -# -# for target_object_type in('table', 'view'): -# -# -# target_obj = 'test' if target_object_type == 'table' else 'v_test' -# -# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-merge-deny-multiple-matches-DDL.log'), 'w') -# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') -# -# # RECREATION OF ALL DB OBJECTS: -# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) -# -# flush_and_close(f_init_log) -# flush_and_close(f_init_err) -# -# sql_addi=''' -# set term ^; -# execute block as -# begin -# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); -# end -# ^ -# set term ;^ -# -# -- INITIAL DATA: add rows with ID = 0...6 -# -- ############# -# insert into %(target_obj)s(id, x) -# select row_number()over()-1, row_number()over()-1 -# from rdb$types rows 6; -# -# commit; -# ''' % locals() -# -# runProgram('isql', [ dsn, '-q' ], sql_addi) -# -# locker_tpb = fdb.TPB() -# locker_tpb.lock_timeout = LOCKER_LOCK_TIMEOUT -# locker_tpb.lock_resolution = fdb.isc_tpb_wait -# -# con_lock_1 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) -# con_lock_2 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) -# -# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) -# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# -# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = 0' % locals() ) -# -# sql_text=''' -# connect '%(dsn)s'; -# set list on; -# set autoddl off; -# set term ^; -# execute block as -# begin -# rdb$set_context('USER_SESSION','WHO', 'WORKER'); -# end -# ^ -# set term ;^ -# commit; -# SET KEEP_TRAN_PARAMS ON; -# set transaction read committed read consistency; -# set list off; -# set wng off; -# -# set count on; -# -# merge into %(target_obj)s t -# using ( -# select s.id, s.x from %(target_obj)s as s -# where s.id <= 1 -# order by s.id DESC -# ) s -# on abs(t.id) = abs(s.id) -# when matched then -# update set t.x = t.x * s.x -# ; -# -# -- check results: -# -- ############### -# -# select id,x from %(target_obj)s order by id; -# -# select v.old_id, v.op, v.snap_no_rank -# from v_worker_log v -# where v.op = 'upd'; -# -# -# --set width who 10; -# -- DO NOT check this! Values can differ here from one run to another! -# -- select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; -# rollback; -# -# ''' % dict(globals(), **locals()) -# -# f_worker_sql=open( os.path.join(context['temp_directory'],'read-consist-sttm-merge-deny-multiple-matches.sql'), 'w') -# f_worker_sql.write(sql_text) -# flush_and_close(f_worker_sql) -# -# -# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') -# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') -# -# ############################################################################ -# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### -# ############################################################################ -# -# p_worker = Popen( [ context['isql_path'], '-pag', '9999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) -# time.sleep(1) -# -# cur_lock_1 = con_lock_1.cursor() -# cur_lock_2 = con_lock_2.cursor() -# -# -# sttm = 'update %(target_obj)s set id = ? where abs( id ) = ?' % locals() -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# cur_lock_2.execute( sttm, ( -5, 5, ) ) -# con_lock_2.commit() -# cur_lock_2.execute( sttm, ( -5, 5, ) ) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# con_lock_1.commit() -# cur_lock_1.execute( sttm, ( -4, 4, ) ) -# con_lock_1.commit() -# cur_lock_1.execute( sttm, ( -4, 4, ) ) -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# con_lock_2.commit() -# cur_lock_2.execute( sttm, ( -3, 3, ) ) -# con_lock_2.commit() -# cur_lock_2.execute( sttm, ( -3, 3, ) ) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# con_lock_1.commit() -# cur_lock_1.execute( sttm, ( -2, 2, ) ) -# con_lock_1.commit() -# cur_lock_1.execute( sttm, ( -2, 2, ) ) -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# con_lock_2.commit() -# cur_lock_2.execute( 'insert into %(target_obj)s(id,x) values(?, ?)' % locals(), ( -1, 1, ) ) -# con_lock_2.commit() -# cur_lock_2.execute( 'update %(target_obj)s set id = id where id = ?' % locals(), ( -1, ) ) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# con_lock_1.commit() -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# con_lock_2.commit() # At this point merge can complete its job but it must FAIL because of multiple matches for abs(t.id) = abs(s.id), i.e. when ID = -1 and 1 -# -# # Close lockers: -# ################ -# for c in (con_lock_1, con_lock_2): -# c.close() -# -# # Here we wait for ISQL complete its mission: -# p_worker.wait() -# -# flush_and_close(f_worker_log) -# flush_and_close(f_worker_err) -# -# # CHECK RESULTS -# ############### -# with open(f_init_err.name,'r') as f: -# for line in f: -# if line: -# print( 'target_object_type: %(target_object_type)s, checked_DML = %(checked_DML)s, UNEXPECTED STDERR for initial SQL: %(line)s' % locals() ) -# -# for f in (f_worker_log, f_worker_err): -# with open(f.name,'r') as g: -# for line in g: -# if line: -# logname = 'STDLOG' if f.name == f_worker_log.name else 'STDERR' -# print( 'target_object_type: %(target_object_type)s, worker %(logname)s: %(line)s' % locals() ) -# -# -# # < for target_object_type in ('table', 'view') -# -# # Cleanup. -# ########## -# time.sleep(1) -# cleanup( (f_init_log, f_init_err, f_worker_sql, f_worker_log, f_worker_err) ) -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ +expected_stdout = """ target_object_type: table, worker STDLOG: Records affected: 2 target_object_type: table, worker STDLOG: target_object_type: table, worker STDLOG: ID X @@ -412,9 +140,270 @@ bt-repo mp mp_sttm_restart_max_limit.sql bt-repo mp mp_sttm_restart_max_limit.sql """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# +# import os +# import sys +# import subprocess +# from subprocess import Popen +# import shutil +# from fdb import services +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# # How long LOCKER must wait before raise update-conflict error +# # (useful for debug in case os some error in this test algorithm): +# LOCKER_LOCK_TIMEOUT = 5 +# +# ############################## +# # Temply, for debug obly: +# this_fdb=db_conn.database_name +# this_dbg=os.path.splitext(this_fdb)[0] + '.4debug.fdb' +# ############################## +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') +# +# for target_object_type in('table', 'view'): +# +# +# target_obj = 'test' if target_object_type == 'table' else 'v_test' +# +# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-merge-deny-multiple-matches-DDL.log'), 'w') +# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') +# +# # RECREATION OF ALL DB OBJECTS: +# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) +# +# flush_and_close(f_init_log) +# flush_and_close(f_init_err) +# +# sql_addi=''' +# set term ^; +# execute block as +# begin +# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); +# end +# ^ +# set term ;^ +# +# -- INITIAL DATA: add rows with ID = 0...6 +# -- ############# +# insert into %(target_obj)s(id, x) +# select row_number()over()-1, row_number()over()-1 +# from rdb$types rows 6; +# +# commit; +# ''' % locals() +# +# runProgram('isql', [ dsn, '-q' ], sql_addi) +# +# locker_tpb = fdb.TPB() +# locker_tpb.lock_timeout = LOCKER_LOCK_TIMEOUT +# locker_tpb.lock_resolution = fdb.isc_tpb_wait +# +# con_lock_1 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) +# con_lock_2 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) +# +# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) +# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# +# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = 0' % locals() ) +# +# sql_text=''' +# connect '%(dsn)s'; +# set list on; +# set autoddl off; +# set term ^; +# execute block as +# begin +# rdb$set_context('USER_SESSION','WHO', 'WORKER'); +# end +# ^ +# set term ;^ +# commit; +# SET KEEP_TRAN_PARAMS ON; +# set transaction read committed read consistency; +# set list off; +# set wng off; +# +# set count on; +# +# merge into %(target_obj)s t +# using ( +# select s.id, s.x from %(target_obj)s as s +# where s.id <= 1 +# order by s.id DESC +# ) s +# on abs(t.id) = abs(s.id) +# when matched then +# update set t.x = t.x * s.x +# ; +# +# -- check results: +# -- ############### +# +# select id,x from %(target_obj)s order by id; +# +# select v.old_id, v.op, v.snap_no_rank +# from v_worker_log v +# where v.op = 'upd'; +# +# +# --set width who 10; +# -- DO NOT check this! Values can differ here from one run to another! +# -- select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; +# rollback; +# +# ''' % dict(globals(), **locals()) +# +# f_worker_sql=open( os.path.join(context['temp_directory'],'read-consist-sttm-merge-deny-multiple-matches.sql'), 'w') +# f_worker_sql.write(sql_text) +# flush_and_close(f_worker_sql) +# +# +# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') +# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') +# +# ############################################################################ +# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### +# ############################################################################ +# +# p_worker = Popen( [ context['isql_path'], '-pag', '9999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) +# time.sleep(1) +# +# cur_lock_1 = con_lock_1.cursor() +# cur_lock_2 = con_lock_2.cursor() +# +# +# sttm = 'update %(target_obj)s set id = ? where abs( id ) = ?' % locals() +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# cur_lock_2.execute( sttm, ( -5, 5, ) ) +# con_lock_2.commit() +# cur_lock_2.execute( sttm, ( -5, 5, ) ) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# con_lock_1.commit() +# cur_lock_1.execute( sttm, ( -4, 4, ) ) +# con_lock_1.commit() +# cur_lock_1.execute( sttm, ( -4, 4, ) ) +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# con_lock_2.commit() +# cur_lock_2.execute( sttm, ( -3, 3, ) ) +# con_lock_2.commit() +# cur_lock_2.execute( sttm, ( -3, 3, ) ) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# con_lock_1.commit() +# cur_lock_1.execute( sttm, ( -2, 2, ) ) +# con_lock_1.commit() +# cur_lock_1.execute( sttm, ( -2, 2, ) ) +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# con_lock_2.commit() +# cur_lock_2.execute( 'insert into %(target_obj)s(id,x) values(?, ?)' % locals(), ( -1, 1, ) ) +# con_lock_2.commit() +# cur_lock_2.execute( 'update %(target_obj)s set id = id where id = ?' % locals(), ( -1, ) ) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# con_lock_1.commit() +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# con_lock_2.commit() # At this point merge can complete its job but it must FAIL because of multiple matches for abs(t.id) = abs(s.id), i.e. when ID = -1 and 1 +# +# # Close lockers: +# ################ +# for c in (con_lock_1, con_lock_2): +# c.close() +# +# # Here we wait for ISQL complete its mission: +# p_worker.wait() +# +# flush_and_close(f_worker_log) +# flush_and_close(f_worker_err) +# +# # CHECK RESULTS +# ############### +# with open(f_init_err.name,'r') as f: +# for line in f: +# if line: +# print( 'target_object_type: %(target_object_type)s, checked_DML = %(checked_DML)s, UNEXPECTED STDERR for initial SQL: %(line)s' % locals() ) +# +# for f in (f_worker_log, f_worker_err): +# with open(f.name,'r') as g: +# for line in g: +# if line: +# logname = 'STDLOG' if f.name == f_worker_log.name else 'STDERR' +# print( 'target_object_type: %(target_object_type)s, worker %(logname)s: %(line)s' % locals() ) +# +# +# # < for target_object_type in ('table', 'view') +# +# # Cleanup. +# ########## +# time.sleep(1) +# cleanup( (f_init_log, f_init_err, f_worker_sql, f_worker_log, f_worker_err) ) +# ----------------------------------- diff --git a/tests/functional/transactions/test_read_consist_sttm_restart_max_limit.py b/tests/functional/transactions/test_read_consist_sttm_restart_max_limit.py index 512c2e03..49da6c07 100644 --- a/tests/functional/transactions/test_read_consist_sttm_restart_max_limit.py +++ b/tests/functional/transactions/test_read_consist_sttm_restart_max_limit.py @@ -1,335 +1,65 @@ #coding:utf-8 -# -# id: functional.transactions.read_consist_sttm_restart_max_limit -# title: READ CONSISTENCY. Maximal number of statement-level restarts must be 10. -# decription: -# Initial article for reading: -# https://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:11504247549852 -# Note on terms which are used there: "BLOCKER", "LONG" and "FIRSTLAST" - their names are slightly changed here -# to: LOCKER-1, WORKER and LOCKER-2 respectively. -# -# See also: doc\\README.read_consistency.md -# Letter from Vlad: 15.09.2020 20:04 // subj "read consistency // addi test(s)" -# -# ::: NB ::: -# This test uses script %FBT_REPO% -# iles -# ead-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. -# Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual -# results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any -# outcome of test. -# -# Detailed description can be found in "read-consist-sttm-restart-on-update-04.fbt", this test is based on the same ideas: -# * initial script add records with ID = 1...12 and does commit; -# * start locker-1 which catch record with ID = 1 that is to be involved futher in cursor of worker; -# * start worker DML which must change records in descending order of ID, starting with ID=2; worker must write ID = ID * 100 for each row; -# * start locker-2 which changes record with ID=12 by assigning this ID to -12, makes COMMIT and locks this record again (makes UPDATE w/o commit); -# * locker-1 releases record with ID=1, then changes record with ID=11 by assigning this ID to -11, makes COMMIT and locks this record again; -# * locker-2 releases record with ID=-12, then changes record with ID=10 by assigning this ID to -10, makes COMMIT and locks this record again; -# * ... and so on, until number of such actions iterations less 10 or 11 (see below) ... -# -# Each UPDATE that is performed by lockers (starting from ID=11) produces new ID (-11, -10, -9, ...) that was not present in the scope which worker -# could see before this action. This forces worker to make statement-level restart. -# -# When number of such new IDs is less than 10 then worker must finish its job successfully. -# But if this number if 11 then worker must raise exception (SQLSTATE = 40001 / deadlock / update conflicts) and rollback all changes. -# -# Test verifies both cases, using loop with TWO iterations (see 'main_iter' below): first for 10 and second to 11 records that are to be updated. -# After each iteration we do queries to the table TEST and to the view V_WORKER_LOG which contains data generated by trigger TLOG_DONE for logging. -# -# Test verifies restart number for three modes of WORKER job: UPDATE, MERGE, DELETE and SELECT WITH LOCK (see loop for checked_DML: 'upd', 'mer', 'del', 'lok'). -# NOTE-1. -# For 'SELECT WITH LOCK' we must provide that no rows will be returned to client while worker is waiting for records. -# EXECUTE BLOCK with for-select which does nothing is used for this. -# -# NOTE-2. -# SELECT WITH LOCK does not allow to use VIEW as subject of query (raises "-WITH LOCK can be used only with a single physical table"). -# This error is expected in current FB versions and its text presents in expected_std* section. -# -# Checked on 4.0.0.2195 SS/CS. -# 29.09.2020: added for-loop in order to check different target objects: TABLE ('test') and VIEW ('v_test'), see 'target_object_type'. -# -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: + +""" +ID: transactions.read-consist-sttm-restart-max-limit +TITLE: READ CONSISTENCY. Maximal number of statement-level restarts must be 10. +DESCRIPTION: + Initial article for reading: + https://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:11504247549852 + Note on terms which are used there: "BLOCKER", "LONG" and "FIRSTLAST" - their names are slightly changed here + to: LOCKER-1, WORKER and LOCKER-2 respectively. + + See also: doc/README.read_consistency.md + Letter from Vlad: 15.09.2020 20:04 // subj "read consistency // addi test(s)" + + ::: NB ::: + This test uses script %FBT_REPO%/files/read-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. + Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual + results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any + outcome of test. + + Detailed description can be found in "read-consist-sttm-restart-on-update-04.fbt", this test is based on the same ideas: + * initial script add records with ID = 1...12 and does commit; + * start locker-1 which catch record with ID = 1 that is to be involved futher in cursor of worker; + * start worker DML which must change records in descending order of ID, starting with ID=2; worker must write ID = ID * 100 for each row; + * start locker-2 which changes record with ID=12 by assigning this ID to -12, makes COMMIT and locks this record again (makes UPDATE w/o commit); + * locker-1 releases record with ID=1, then changes record with ID=11 by assigning this ID to -11, makes COMMIT and locks this record again; + * locker-2 releases record with ID=-12, then changes record with ID=10 by assigning this ID to -10, makes COMMIT and locks this record again; + * ... and so on, until number of such actions iterations less 10 or 11 (see below) ... + + Each UPDATE that is performed by lockers (starting from ID=11) produces new ID (-11, -10, -9, ...) that was not present in the scope which worker + could see before this action. This forces worker to make statement-level restart. + + When number of such new IDs is less than 10 then worker must finish its job successfully. + But if this number if 11 then worker must raise exception (SQLSTATE = 40001 / deadlock / update conflicts) and rollback all changes. + + Test verifies both cases, using loop with TWO iterations (see 'main_iter' below): first for 10 and second to 11 records that are to be updated. + After each iteration we do queries to the table TEST and to the view V_WORKER_LOG which contains data generated by trigger TLOG_DONE for logging. + + Test verifies restart number for three modes of WORKER job: UPDATE, MERGE, DELETE and SELECT WITH LOCK (see loop for checked_DML: 'upd', 'mer', 'del', 'lok'). + NOTE-1. + For 'SELECT WITH LOCK' we must provide that no rows will be returned to client while worker is waiting for records. + EXECUTE BLOCK with for-select which does nothing is used for this. + + NOTE-2. + SELECT WITH LOCK does not allow to use VIEW as subject of query (raises "-WITH LOCK can be used only with a single physical table"). + This error is expected in current FB versions and its text presents in expected_std* section. + + Checked on 4.0.0.2195 SS/CS. + 29.09.2020: added for-loop in order to check different target objects: TABLE ('test') and VIEW ('v_test'), see 'target_object_type'. +FBTEST: functional.transactions.read_consist_sttm_restart_max_limit +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +substitutions = [('=', ''), ('[ \t]+', ' '), ('.*After line \\d+.*', ''), ('.*[\\-]?concurrent transaction number is \\d+', 'concurrent transaction number is'), ('.*At\\s+block\\s+line(:)?\\s+\\d+(,)?\\s+col(:)?\\s+\\d+', ''), ('.After\\s+line\\s+\\d+\\s+.*', '')] -substitutions_1 = [('=', ''), ('[ \t]+', ' '), ('.*After line \\d+.*', ''), ('.*[\\-]?concurrent transaction number is \\d+', 'concurrent transaction number is'), ('.*At\\s+block\\s+line(:)?\\s+\\d+(,)?\\s+col(:)?\\s+\\d+', ''), ('.After\\s+line\\s+\\d+\\s+.*', '')] +db = db_factory() -init_script_1 = """""" +act = python_act('db', substitutions=substitutions) -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import subprocess -# from subprocess import Popen -# import shutil -# from fdb import services -# import time -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# # How long LOCKER must wait before raise update-conflict error -# # (useful for debug in case os some error in this test algorithm): -# LOCKER_LOCK_TIMEOUT = 5 -# -# ############################## -# # Temply, for debug obly: -# this_fdb=db_conn.database_name -# this_dbg=os.path.splitext(this_fdb)[0] + '.4debug.fdb' -# ############################## -# -# db_conn.close() -# fb_home = services.connect(host='localhost').get_home_directory() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') -# -# for target_object_type in('table', 'view'): -# -# target_obj = 'test' if target_object_type == 'table' else 'v_test' -# -# for checked_DML in('upd', 'mer', 'del', 'lok'): -# #for checked_DML in('lok',): -# worker_dml = "select 'UNKNOWN MODE' as msg from rdb$database" -# if checked_DML == 'upd': -# worker_dml = 'update %(target_obj)s set id = id * 100 where id <= 2 order by id DESC;' % locals() -# elif checked_DML == 'mer': -# worker_dml = 'merge into %(target_obj)s t using (select x.id from %(target_obj)s x where x.id <= 2 order by id DESC) s on t.id = s.id when matched then update set t.id = s.id * 100;' % locals() -# elif checked_DML == 'del': -# worker_dml = 'delete from %(target_obj)s where id <= 2 order by id DESC;' % locals() -# elif checked_DML == 'lok': -# # ::: NB ::: -# # We must SUPRESS sending record to client for SELECT WITH LOCK, otherwise error -# # deadlock/update conflist will raise immediately! Because of this, we enclose -# # such select into execute block which returns nothing: -# worker_dml = 'set term ^; execute block as declare c int; begin for select id from %(target_obj)s where id<=2 order by id desc with lock into c do begin end end^ set term ;^' % locals() -# -# for main_iter in (0,1): -# #for main_iter in (1,): -# -# ################################################################################### -# ### H O W M A N Y R E S T A R T S W E W A N T T O C H E C K ### -# ################################################################################### -# ROWS_TO_ADD = 10 + 2 * main_iter -# -# -# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') -# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') -# -# # RECREATION OF ALL DB OBJECTS: -# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) -# -# flush_and_close(f_init_log) -# flush_and_close(f_init_err) -# -# sql_addi=''' -# set term ^; -# execute block as -# begin -# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); -# end -# ^ -# set term ;^ -# insert into %(target_obj)s(id, x) select row_number()over(),row_number()over() from rdb$types rows (2 + %(ROWS_TO_ADD)s); -- <<< INITIAL DATA -# commit; -# ''' % locals() -# -# runProgram('isql', [ dsn, '-q' ], sql_addi) -# -# locker_tpb = fdb.TPB() -# locker_tpb.lock_timeout = LOCKER_LOCK_TIMEOUT -# locker_tpb.lock_resolution = fdb.isc_tpb_wait -# -# con_lock_1 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) -# con_lock_2 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) -# -# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) -# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# -# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = 1' % locals() ) -# -# sql_text=''' -# connect '%(dsn)s'; -# set list on; -# set autoddl off; -# set term ^; -# execute block as -# begin -# rdb$set_context('USER_SESSION','WHO', 'WORKER'); -# end -# ^ -# set term ;^ -# commit; -# SET KEEP_TRAN_PARAMS ON; -# set transaction read committed read consistency; -# set list off; -# set wng off; -# -# set count on; -# %(worker_dml)s -- UPDATE or DELETE or SELECT WITH LOCK; all ORDER BY ID DESC; MUST HANG BECAUSE OF LOCKERs -# -# -- check results: -# -- ############### -# -# select id from %(target_obj)s order by id; -# -# select v.old_id, v.op, v.snap_no_rank -# from v_worker_log v -# where v.op = iif( '%(checked_DML)s' = 'mer', 'upd', '%(checked_DML)s'); -- 'UPD' or 'DEL'; for 'SELECT WITH LOCK' no records will be in v_worker_log. -# -# -# --set width who 10; -# -- DO NOT check this! Values can differ here from one run to another! -# -- select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; -# rollback; -# -# ''' % dict(globals(), **locals()) -# -# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_max_limit.sql'), 'w') -# f_worker_sql.write(sql_text) -# flush_and_close(f_worker_sql) -# -# -# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') -# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') -# -# ############################################################################ -# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### -# ############################################################################ -# -# p_worker = Popen( [ context['isql_path'], '-pag', '9999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) -# time.sleep(1) -# -# cur_lock_1 = con_lock_1.cursor() -# cur_lock_2 = con_lock_2.cursor() -# sttm = 'update %(target_obj)s set id = ? where abs( id ) = ?' % locals() -# -# -# for i in range(0,ROWS_TO_ADD): -# v_id = 2 + ROWS_TO_ADD-i -# if i % 2 == 0: -# cur_lock_2.execute( sttm, ( -abs( v_id ), v_id, ) ) -# con_lock_2.commit() -# cur_lock_2.execute( sttm, ( -abs( v_id ), v_id, ) ) -# con_lock_1.commit() -# else: -# cur_lock_1.execute( sttm, ( -abs( v_id ), v_id, ) ) -# con_lock_1.commit() -# cur_lock_1.execute( sttm, ( -abs( v_id ), v_id, ) ) -# con_lock_2.commit() -# -# cur_lock_1.close() -# cur_lock_2.close() -# -# if ROWS_TO_ADD % 2 == 0: -# con_lock_2.commit() -# con_lock_1.commit() -# else: -# con_lock_1.commit() -# con_lock_2.commit() -# -# # Close lockers: -# ################ -# for c in (con_lock_1, con_lock_2): -# c.close() -# -# # Here we wait for ISQL complete its mission: -# p_worker.wait() -# -# flush_and_close(f_worker_log) -# flush_and_close(f_worker_err) -# -# # CHECK RESULTS -# ############### -# -# print( 'target_object_type: %(target_object_type)s, checked_DML = %(checked_DML)s, iter = %(main_iter)s, restarts number to be tested: %(ROWS_TO_ADD)s' % locals() ) -# -# with open(f_init_err.name,'r') as f: -# for line in f: -# if line: -# print( 'target_object_type: %(target_object_type)s, checked_DML = %(checked_DML)s, iter = %(main_iter)s, UNEXPECTED STDERR for initial SQL: %(line)s' % locals() ) -# -# for f in (f_worker_log, f_worker_err): -# with open(f.name,'r') as g: -# for line in g: -# if line: -# logname = 'STDLOG' if f.name == f_worker_log.name else 'STDERR' -# print( 'target_object_type: %(target_object_type)s, checked_DML = %(checked_DML)s, iter = %(main_iter)s, worker %(logname)s: %(line)s' % locals() ) -# -# -# #< for main_iter in (0,1) -# # < for checked_DML in ('upd', 'mer', 'del', 'lok') -# # < for target_object_type in ('table', 'view') -# # Cleanup. -# ########## -# time.sleep(1) -# cleanup( (f_init_log, f_init_err, f_worker_sql, f_worker_log, f_worker_err) ) -# -# ''' -# 'substitutions':[ -# ('=','') -# ,('[ ]+',' ') -# ,('.*After line \\d+.*', '') -# ,('.*[\\-]?concurrent transaction number is \\d+', 'concurrent transaction number is') -# ,('.*At\\s+block\\s+line(:)?\\s+\\d+(,)?\\s+col(:)?\\s+\\d+', '') -# ] -# -# ''' -# -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) - -expected_stdout_1 = """ +expected_stdout = """ target_object_type: table, checked_DML = upd, iter = 0, restarts number to be tested: 10 target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: Records affected: 12 target_object_type: table, checked_DML = upd, iter = 0, worker STDLOG: @@ -1062,9 +792,268 @@ bt-repo mp mp_sttm_restart_max_limit.sql bt-repo mp mp_sttm_restart_max_limit.sql """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# +# import os +# import sys +# import subprocess +# from subprocess import Popen +# import shutil +# from fdb import services +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# # How long LOCKER must wait before raise update-conflict error +# # (useful for debug in case os some error in this test algorithm): +# LOCKER_LOCK_TIMEOUT = 5 +# +# ############################## +# # Temply, for debug obly: +# this_fdb=db_conn.database_name +# this_dbg=os.path.splitext(this_fdb)[0] + '.4debug.fdb' +# ############################## +# +# db_conn.close() +# fb_home = services.connect(host='localhost').get_home_directory() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') +# +# for target_object_type in('table', 'view'): +# +# target_obj = 'test' if target_object_type == 'table' else 'v_test' +# +# for checked_DML in('upd', 'mer', 'del', 'lok'): +# #for checked_DML in('lok',): +# worker_dml = "select 'UNKNOWN MODE' as msg from rdb$database" +# if checked_DML == 'upd': +# worker_dml = 'update %(target_obj)s set id = id * 100 where id <= 2 order by id DESC;' % locals() +# elif checked_DML == 'mer': +# worker_dml = 'merge into %(target_obj)s t using (select x.id from %(target_obj)s x where x.id <= 2 order by id DESC) s on t.id = s.id when matched then update set t.id = s.id * 100;' % locals() +# elif checked_DML == 'del': +# worker_dml = 'delete from %(target_obj)s where id <= 2 order by id DESC;' % locals() +# elif checked_DML == 'lok': +# # ::: NB ::: +# # We must SUPRESS sending record to client for SELECT WITH LOCK, otherwise error +# # deadlock/update conflist will raise immediately! Because of this, we enclose +# # such select into execute block which returns nothing: +# worker_dml = 'set term ^; execute block as declare c int; begin for select id from %(target_obj)s where id<=2 order by id desc with lock into c do begin end end^ set term ;^' % locals() +# +# for main_iter in (0,1): +# #for main_iter in (1,): +# +# ################################################################################### +# ### H O W M A N Y R E S T A R T S W E W A N T T O C H E C K ### +# ################################################################################### +# ROWS_TO_ADD = 10 + 2 * main_iter +# +# +# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') +# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') +# +# # RECREATION OF ALL DB OBJECTS: +# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) +# +# flush_and_close(f_init_log) +# flush_and_close(f_init_err) +# +# sql_addi=''' +# set term ^; +# execute block as +# begin +# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); +# end +# ^ +# set term ;^ +# insert into %(target_obj)s(id, x) select row_number()over(),row_number()over() from rdb$types rows (2 + %(ROWS_TO_ADD)s); -- <<< INITIAL DATA +# commit; +# ''' % locals() +# +# runProgram('isql', [ dsn, '-q' ], sql_addi) +# +# locker_tpb = fdb.TPB() +# locker_tpb.lock_timeout = LOCKER_LOCK_TIMEOUT +# locker_tpb.lock_resolution = fdb.isc_tpb_wait +# +# con_lock_1 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) +# con_lock_2 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) +# +# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) +# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# +# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = 1' % locals() ) +# +# sql_text=''' +# connect '%(dsn)s'; +# set list on; +# set autoddl off; +# set term ^; +# execute block as +# begin +# rdb$set_context('USER_SESSION','WHO', 'WORKER'); +# end +# ^ +# set term ;^ +# commit; +# SET KEEP_TRAN_PARAMS ON; +# set transaction read committed read consistency; +# set list off; +# set wng off; +# +# set count on; +# %(worker_dml)s -- UPDATE or DELETE or SELECT WITH LOCK; all ORDER BY ID DESC; MUST HANG BECAUSE OF LOCKERs +# +# -- check results: +# -- ############### +# +# select id from %(target_obj)s order by id; +# +# select v.old_id, v.op, v.snap_no_rank +# from v_worker_log v +# where v.op = iif( '%(checked_DML)s' = 'mer', 'upd', '%(checked_DML)s'); -- 'UPD' or 'DEL'; for 'SELECT WITH LOCK' no records will be in v_worker_log. +# +# +# --set width who 10; +# -- DO NOT check this! Values can differ here from one run to another! +# -- select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; +# rollback; +# +# ''' % dict(globals(), **locals()) +# +# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_max_limit.sql'), 'w') +# f_worker_sql.write(sql_text) +# flush_and_close(f_worker_sql) +# +# +# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') +# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') +# +# ############################################################################ +# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### +# ############################################################################ +# +# p_worker = Popen( [ context['isql_path'], '-pag', '9999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) +# time.sleep(1) +# +# cur_lock_1 = con_lock_1.cursor() +# cur_lock_2 = con_lock_2.cursor() +# sttm = 'update %(target_obj)s set id = ? where abs( id ) = ?' % locals() +# +# +# for i in range(0,ROWS_TO_ADD): +# v_id = 2 + ROWS_TO_ADD-i +# if i % 2 == 0: +# cur_lock_2.execute( sttm, ( -abs( v_id ), v_id, ) ) +# con_lock_2.commit() +# cur_lock_2.execute( sttm, ( -abs( v_id ), v_id, ) ) +# con_lock_1.commit() +# else: +# cur_lock_1.execute( sttm, ( -abs( v_id ), v_id, ) ) +# con_lock_1.commit() +# cur_lock_1.execute( sttm, ( -abs( v_id ), v_id, ) ) +# con_lock_2.commit() +# +# cur_lock_1.close() +# cur_lock_2.close() +# +# if ROWS_TO_ADD % 2 == 0: +# con_lock_2.commit() +# con_lock_1.commit() +# else: +# con_lock_1.commit() +# con_lock_2.commit() +# +# # Close lockers: +# ################ +# for c in (con_lock_1, con_lock_2): +# c.close() +# +# # Here we wait for ISQL complete its mission: +# p_worker.wait() +# +# flush_and_close(f_worker_log) +# flush_and_close(f_worker_err) +# +# # CHECK RESULTS +# ############### +# +# print( 'target_object_type: %(target_object_type)s, checked_DML = %(checked_DML)s, iter = %(main_iter)s, restarts number to be tested: %(ROWS_TO_ADD)s' % locals() ) +# +# with open(f_init_err.name,'r') as f: +# for line in f: +# if line: +# print( 'target_object_type: %(target_object_type)s, checked_DML = %(checked_DML)s, iter = %(main_iter)s, UNEXPECTED STDERR for initial SQL: %(line)s' % locals() ) +# +# for f in (f_worker_log, f_worker_err): +# with open(f.name,'r') as g: +# for line in g: +# if line: +# logname = 'STDLOG' if f.name == f_worker_log.name else 'STDERR' +# print( 'target_object_type: %(target_object_type)s, checked_DML = %(checked_DML)s, iter = %(main_iter)s, worker %(logname)s: %(line)s' % locals() ) +# +# +# #< for main_iter in (0,1) +# # < for checked_DML in ('upd', 'mer', 'del', 'lok') +# # < for target_object_type in ('table', 'view') +# # Cleanup. +# ########## +# time.sleep(1) +# cleanup( (f_init_log, f_init_err, f_worker_sql, f_worker_log, f_worker_err) ) +# +# ''' +# 'substitutions':[ +# ('=','') +# ,('[ ]+',' ') +# ,('.*After line \\d+.*', '') +# ,('.*[\\-]?concurrent transaction number is \\d+', 'concurrent transaction number is') +# ,('.*At\\s+block\\s+line(:)?\\s+\\d+(,)?\\s+col(:)?\\s+\\d+', '') +# ] +# +# ''' +# +# ----------------------------------- diff --git a/tests/functional/transactions/test_read_consist_sttm_restart_on_delete_01.py b/tests/functional/transactions/test_read_consist_sttm_restart_on_delete_01.py index 359ec403..13aa51d7 100644 --- a/tests/functional/transactions/test_read_consist_sttm_restart_on_delete_01.py +++ b/tests/functional/transactions/test_read_consist_sttm_restart_on_delete_01.py @@ -1,368 +1,144 @@ #coding:utf-8 -# -# id: functional.transactions.read_consist_sttm_restart_on_delete_01 -# title: READ CONSISTENCY. Check creation of new statement-level snapshot and restarting changed caused by DELETE. Test-01. -# decription: -# Initial article for reading: -# https://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:11504247549852 -# Note on terms which are used there: "BLOCKER", "LONG" and "FIRSTLAST" - their names are slightly changed here -# to: LOCKER-1, WORKER and LOCKER-2 respectively. -# -# ********************************************** -# -# This test verifies that statement-level snapshot and restart will be performed when "main" session ("worker") -# performs DELETE statement and is involved in update conflicts. -# ("When update conflict is detected <...> then engine <...> creates new statement-level snapshot and restart execution...") -# -# ::: NB ::: -# This test uses script %FBT_REPO% -# iles -# ead-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. -# Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual -# results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any -# outcome of test. -# -# ############### -# Following scenario if executed here (see also: "doc\\README.read_consistency.md"; hereafer is marked as "DOC"): -# -# * five rows are inserted into the table TEST, with IDs: 1,2,3,4,5 -# * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): -# update test set id = id where id = 5; -# -# * session 'worker' ("LONG" in TK article) has mission: -# delete from test order by id rows ( iif(exists(select 1 from test where id < 0), 3, 8 ) ) // using TIL = read committed read consistency -# -# // Execution will have PLAN ORDER . NOTE: NUMBER OF ROWS TO BE DELETED DEPENDS ON EXISTENSE OF ID < 0! -# // Worker starts with deletion (avaliable for its cursor) rows with ID = 1...4 but can not change row with ID = 5 because of locker-1. -# -# * session 'locker-2' ("FIRSTLAST" in TK article): -# (1) insert into test(id) values(6); -# (2) commit; -# (3) update test set id=id where id = 6; -# // session-'worker' remains waiting at this point because row with ID = 5 is still occupied by by locker-1 -# // but worker must further see record with (new) id = 6 because its TIL was changed to RC NO RECORD_VERSION. -# -# * session 'locker-1': -# (1) commit; -# (2) insert into test(id) values(7); -# (3) commit; -# (4) update test set id=id where id = 7; -# -# // This: '(1) commit' - will release record with ID = 5. Worker sees this record and put write-lock on it. -# // [DOC]: "b) engine put write lock on conflicted record" -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Worker resumes search for any rows which with taking in account required order of its DML (i.e. 'ORDER BY ID') -# // and number of records to be deleted: 'rows ( iif(exists(select 1 from test where id < 0), 3, 8 ) )' -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // Worker starts to search records which must be involved in its DML and *found* first sucn row: it has ID = 1. -# // Then it goes on through IDs = 2...5 and stops on ID=6 because id is occupied by locker-2. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since -# // top-level statement execution starts and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# * session 'locker-2': -# (1) commit; -# (2) insert into test(id) values(8); -# (3) commit; -# (4) update test set id=id where id = 8; -# -# // This: '(1) commit' - will release record with ID = 6. Worker sees this record and put write-lock on it. -# // [DOC]: "b) engine put write lock on conflicted record" -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Worker resumes search for any rows which with taking in account required order of its DML (i.e. 'ORDER BY ID') -# // and number of records to be deleted: 'rows ( iif(exists(select 1 from test where id < 0), 3, 8 ) )' -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // Worker starts to search records which must be involved in its DML and *found* first sucn row: it has ID = 1. -# // Then it goes on through IDs = 2...6 and stops on ID=7 because id is occupied by locker-1. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since -# // top-level statement execution starts and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# -# * session 'locker-1': -# (1) commit; -# (2) insert into test(id) values(-1); -# (3) commit; -# (4) update test set id=id where id = -1; -# -# // This: '(1) commit' - will release record with ID = 8. Worker sees this record and put write-lock on it. -# // [DOC]: "b) engine put write lock on conflicted record" -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Worker resumes search for any rows which with taking in account required order of its DML (i.e. 'ORDER BY ID') -# // and number of records to be deleted: 'rows ( iif(exists(select 1 from test where id < 0), 3, 8 ) )' -# // Here worker starts from row with ID = -1 but can not go on because it is occupied by locker-1 -# -# * session 'locker-2': -# (1) commit; -# // This releases record with ID = 8 but worker can not do somewhat here: it is still waiting for row with ID = -1. -# -# * session 'locker-1': -# (1) commit; -# -# // This releases row with ID = -1. Worker sees this record and put write-lock on it. -# // [DOC]: "b) engine put write lock on conflicted record" -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Worker resumes search for any rows which with taking in account required order of its DML (i.e. 'ORDER BY ID') -# // and number of records to be deleted: 'rows ( iif(exists(select 1 from test where id < 0), 3, 8 ) )' -# // One row *exists* with ID < 0 thus worker must delete only 3 rows (according to 'ROWS ...' clause) rather than 8. -# // At this point there are no more records to be locked (by worker) that meet cursor condition: worker did put -# // write locks on all rows that meet its cursor conditions. -# // BECAUSE OF FACT THAT NO MORE RECORDS FOUND TO BE LOCKED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN -# // MAKES FIRST STATEMENT-LEVEL RESTART. This restart is also the last in this test. -# -# -# Expected result: -# * session-'worker' must *successfully* complete deletion of THREE rows with ID = -1, 1 and 2. Other rows (ID=3...8) must remain. -# -# * Two unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed DELETE statement: first of them -# was created by initial statement start and second reflect SINGLE restart (this column has values which are evaluated using -# rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). -# It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. -# -# NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! -# This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. -# -# ################ -# -# Checked on 4.0.0.2144 -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: + +""" +ID: transactions.read-consist-sttm-restart-on-delete-01 +TITLE: READ CONSISTENCY. Check creation of new statement-level snapshot and restarting changed caused by DELETE. Test-01. +DESCRIPTION: + Initial article for reading: + https://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:11504247549852 + Note on terms which are used there: "BLOCKER", "LONG" and "FIRSTLAST" - their names are slightly changed here + to: LOCKER-1, WORKER and LOCKER-2 respectively. + + ********************************************** + + This test verifies that statement-level snapshot and restart will be performed when "main" session ("worker") + performs DELETE statement and is involved in update conflicts. + ("When update conflict is detected <...> then engine <...> creates new statement-level snapshot and restart execution...") + + ::: NB ::: + This test uses script %FBT_REPO%/files/read-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. + Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual + results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any + outcome of test. + + ############### + Following scenario if executed here (see also: "doc/README.read_consistency.md"; hereafer is marked as "DOC"): + + * five rows are inserted into the table TEST, with IDs: 1,2,3,4,5 + * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): + update test set id = id where id = 5; + + * session 'worker' ("LONG" in TK article) has mission: + delete from test order by id rows ( iif(exists(select 1 from test where id < 0), 3, 8 ) ) // using TIL = read committed read consistency + + // Execution will have PLAN ORDER . NOTE: NUMBER OF ROWS TO BE DELETED DEPENDS ON EXISTENSE OF ID < 0! + // Worker starts with deletion (avaliable for its cursor) rows with ID = 1...4 but can not change row with ID = 5 because of locker-1. + + * session 'locker-2' ("FIRSTLAST" in TK article): + (1) insert into test(id) values(6); + (2) commit; + (3) update test set id=id where id = 6; + // session-'worker' remains waiting at this point because row with ID = 5 is still occupied by by locker-1 + // but worker must further see record with (new) id = 6 because its TIL was changed to RC NO RECORD_VERSION. + + * session 'locker-1': + (1) commit; + (2) insert into test(id) values(7); + (3) commit; + (4) update test set id=id where id = 7; + + // This: '(1) commit' - will release record with ID = 5. Worker sees this record and put write-lock on it. + // [DOC]: "b) engine put write lock on conflicted record" + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Worker resumes search for any rows which with taking in account required order of its DML (i.e. 'ORDER BY ID') + // and number of records to be deleted: 'rows ( iif(exists(select 1 from test where id < 0), 3, 8 ) )' + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // Worker starts to search records which must be involved in its DML and *found* first sucn row: it has ID = 1. + // Then it goes on through IDs = 2...5 and stops on ID=6 because id is occupied by locker-2. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since + // top-level statement execution starts and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + * session 'locker-2': + (1) commit; + (2) insert into test(id) values(8); + (3) commit; + (4) update test set id=id where id = 8; + + // This: '(1) commit' - will release record with ID = 6. Worker sees this record and put write-lock on it. + // [DOC]: "b) engine put write lock on conflicted record" + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Worker resumes search for any rows which with taking in account required order of its DML (i.e. 'ORDER BY ID') + // and number of records to be deleted: 'rows ( iif(exists(select 1 from test where id < 0), 3, 8 ) )' + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // Worker starts to search records which must be involved in its DML and *found* first sucn row: it has ID = 1. + // Then it goes on through IDs = 2...6 and stops on ID=7 because id is occupied by locker-1. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since + // top-level statement execution starts and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + + * session 'locker-1': + (1) commit; + (2) insert into test(id) values(-1); + (3) commit; + (4) update test set id=id where id = -1; + + // This: '(1) commit' - will release record with ID = 8. Worker sees this record and put write-lock on it. + // [DOC]: "b) engine put write lock on conflicted record" + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Worker resumes search for any rows which with taking in account required order of its DML (i.e. 'ORDER BY ID') + // and number of records to be deleted: 'rows ( iif(exists(select 1 from test where id < 0), 3, 8 ) )' + // Here worker starts from row with ID = -1 but can not go on because it is occupied by locker-1 + + * session 'locker-2': + (1) commit; + // This releases record with ID = 8 but worker can not do somewhat here: it is still waiting for row with ID = -1. + + * session 'locker-1': + (1) commit; + + // This releases row with ID = -1. Worker sees this record and put write-lock on it. + // [DOC]: "b) engine put write lock on conflicted record" + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Worker resumes search for any rows which with taking in account required order of its DML (i.e. 'ORDER BY ID') + // and number of records to be deleted: 'rows ( iif(exists(select 1 from test where id < 0), 3, 8 ) )' + // One row *exists* with ID < 0 thus worker must delete only 3 rows (according to 'ROWS ...' clause) rather than 8. + // At this point there are no more records to be locked (by worker) that meet cursor condition: worker did put + // write locks on all rows that meet its cursor conditions. + // BECAUSE OF FACT THAT NO MORE RECORDS FOUND TO BE LOCKED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN + // MAKES FIRST STATEMENT-LEVEL RESTART. This restart is also the last in this test. + + + Expected result: + * session-'worker' must *successfully* complete deletion of THREE rows with ID = -1, 1 and 2. Other rows (ID=3...8) must remain. + + * Two unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed DELETE statement: first of them + was created by initial statement start and second reflect SINGLE restart (this column has values which are evaluated using + rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). + It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. + + NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! + This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. + + ################ + + Checked on 4.0.0.2144 +FBTEST: functional.transactions.read_consist_sttm_restart_on_delete_01 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] +act = python_act('db', substitutions=[('=', ''), ('[ \t]+', ' ')]) -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import subprocess -# from subprocess import Popen -# from fdb import services -# import time -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') -# -# for checked_mode in('table', 'view'): -# -# target_obj = 'test' if checked_mode == 'table' else 'v_test' -# -# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') -# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') -# -# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) -# flush_and_close(f_init_log) -# flush_and_close(f_init_err) -# -# # add rows with ID = 1,2,3,4,5: -# sql_addi=''' -# set term ^; -# execute block as -# begin -# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); -# end -# ^ -# set term ;^ -# insert into %(target_obj)s(id, x) -# select row_number()over(),row_number()over() -# from rdb$types rows 5; -# commit; -# ''' % locals() -# runProgram('isql', [ dsn, '-q' ], sql_addi) -# -# con_lock_1 = fdb.connect( dsn = dsn ) -# con_lock_2 = fdb.connect( dsn = dsn ) -# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) -# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) -# -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# -# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = 5' % locals() ) -# -# sql_text=''' -# connect '%(dsn)s'; -# set list on; -# set autoddl off; -# set term ^; -# execute block returns (whoami varchar(30)) as -# begin -# whoami = 'WORKER'; -- , ATT#' || current_connection; -# rdb$set_context('USER_SESSION','WHO', whoami); -# -- suspend; -# end -# ^ -# set term ;^ -# commit; -# --set echo on; -# SET KEEP_TRAN_PARAMS ON; -# set transaction read committed read consistency; -# --select current_connection, current_transaction from rdb$database; -# set list off; -# set wng off; -# --set plan on; -# set count on; -# -# delete from %(target_obj)s order by id rows ( iif(exists(select 1 from %(target_obj)s where id < 0), 3, 8 ) ); -- THIS MUST BE LOCKED -# -# -- check results: -# -- ############### -# -# select id from %(target_obj)s order by id; -- this will produce output only after all lockers do their commit/rollback -# -# select v.old_id, v.op, v.snap_no_rank -# from v_worker_log v -# where v.op = 'del'; -# -# set width who 10; -# -- DO NOT check this! Values can differ here from one run to another! -# --select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; -# -# rollback; -# -# ''' % dict(globals(), **locals()) -# -# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_delete_01.sql'), 'w') -# f_worker_sql.write(sql_text) -# flush_and_close(f_worker_sql) -# -# -# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') -# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') -# -# ############################################################################ -# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### -# ############################################################################ -# -# p_worker = Popen( [ context['isql_path'], '-pag', '999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) -# time.sleep(1) -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(6)' % locals() ) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = 6' % locals() ) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id) values(7)' % locals() ) -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = 7' % locals() ) -# -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(8)' % locals() ) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = 8' % locals() ) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id) values(-1)' % locals() ) -# con_lock_1.commit() -# -# con_lock_2.commit() -# -# con_lock_1.commit() -# -# # Here we wait for ISQL complete its mission: -# p_worker.wait() -# -# flush_and_close(f_worker_log) -# flush_and_close(f_worker_err) -# -# # Close lockers: -# ################ -# for c in (con_lock_1, con_lock_2): -# c.close() -# -# -# # CHECK RESULTS -# ############### -# with open(f_worker_log.name,'r') as f: -# for line in f: -# if line.strip(): -# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) -# -# for f in (f_init_err, f_worker_err): -# with open(f.name,'r') as g: -# for line in g: -# if line.strip(): -# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) -# -# #=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# +# import os +# import sys +# import subprocess +# from subprocess import Popen +# from fdb import services +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') +# +# for checked_mode in('table', 'view'): +# +# target_obj = 'test' if checked_mode == 'table' else 'v_test' +# +# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') +# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') +# +# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) +# flush_and_close(f_init_log) +# flush_and_close(f_init_err) +# +# # add rows with ID = 1,2,3,4,5: +# sql_addi=''' +# set term ^; +# execute block as +# begin +# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); +# end +# ^ +# set term ;^ +# insert into %(target_obj)s(id, x) +# select row_number()over(),row_number()over() +# from rdb$types rows 5; +# commit; +# ''' % locals() +# runProgram('isql', [ dsn, '-q' ], sql_addi) +# +# con_lock_1 = fdb.connect( dsn = dsn ) +# con_lock_2 = fdb.connect( dsn = dsn ) +# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) +# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) +# +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# +# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = 5' % locals() ) +# +# sql_text=''' +# connect '%(dsn)s'; +# set list on; +# set autoddl off; +# set term ^; +# execute block returns (whoami varchar(30)) as +# begin +# whoami = 'WORKER'; -- , ATT#' || current_connection; +# rdb$set_context('USER_SESSION','WHO', whoami); +# -- suspend; +# end +# ^ +# set term ;^ +# commit; +# --set echo on; +# SET KEEP_TRAN_PARAMS ON; +# set transaction read committed read consistency; +# --select current_connection, current_transaction from rdb$database; +# set list off; +# set wng off; +# --set plan on; +# set count on; +# +# delete from %(target_obj)s order by id rows ( iif(exists(select 1 from %(target_obj)s where id < 0), 3, 8 ) ); -- THIS MUST BE LOCKED +# +# -- check results: +# -- ############### +# +# select id from %(target_obj)s order by id; -- this will produce output only after all lockers do their commit/rollback +# +# select v.old_id, v.op, v.snap_no_rank +# from v_worker_log v +# where v.op = 'del'; +# +# set width who 10; +# -- DO NOT check this! Values can differ here from one run to another! +# --select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; +# +# rollback; +# +# ''' % dict(globals(), **locals()) +# +# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_delete_01.sql'), 'w') +# f_worker_sql.write(sql_text) +# flush_and_close(f_worker_sql) +# +# +# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') +# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') +# +# ############################################################################ +# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### +# ############################################################################ +# +# p_worker = Popen( [ context['isql_path'], '-pag', '999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) +# time.sleep(1) +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(6)' % locals() ) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = 6' % locals() ) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id) values(7)' % locals() ) +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = 7' % locals() ) +# +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(8)' % locals() ) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = 8' % locals() ) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id) values(-1)' % locals() ) +# con_lock_1.commit() +# +# con_lock_2.commit() +# +# con_lock_1.commit() +# +# # Here we wait for ISQL complete its mission: +# p_worker.wait() +# +# flush_and_close(f_worker_log) +# flush_and_close(f_worker_err) +# +# # Close lockers: +# ################ +# for c in (con_lock_1, con_lock_2): +# c.close() +# +# +# # CHECK RESULTS +# ############### +# with open(f_worker_log.name,'r') as f: +# for line in f: +# if line.strip(): +# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) +# +# for f in (f_init_err, f_worker_err): +# with open(f.name,'r') as g: +# for line in g: +# if line.strip(): +# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) +# +# # then engine <...> creates new statement-level snapshot and restart execution...") -# -# ::: NB ::: -# This test uses script %FBT_REPO% -# iles -# ead-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. -# Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual -# results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any -# outcome of test. -# -# -# ############### -# Following scenario if executed here (see also: "doc\\README.read_consistency.md"; hereafer is marked as "DOC"): -# -# * five rows are inserted into the table TEST, with IDs: 1...5 -# * session 'locker-1' ("BLOCKER" in Tom Kyte's article ); -# update set id=id where id = 5; -# -# * session 'worker' ("LONG" in TK article) has mission: -# delete from test where id < 0 or id >= 3 order by id; // using TIL = read committed read consistency -# -# // Execution will have PLAN ORDER . -# // It will delete rows with ID = 3 and 4 but hang on row with ID = 5 because of locker-1; -# // Update conflict appears here and, because of this, worker temporary changes its TIL to RC no record_version (RC NRV). -# // [DOC]: "a) transaction isolation mode temporarily switched to the READ COMMITTED *NO RECORD VERSION MODE*" -# // This (new) TIL allows worker further to see all committed versions, regardless of its own snapshot. -# -# * session 'locker-2' ("FIRSTLAST" in TK article): -# (1) insert into test(id) values(-1); // i.e. LESS than min(id)=1 that existed at the start of session-worker statement -# (2) commit; -# (3) update test set id=id where id = -1; -# // Session-worker must still hang because row with ID = 5 is occupied by locker-1. -# // But worker must further see record with (new) id = -1 because its TIL was changed to RC NO RECORD_VERSION. -# -# * session 'locker-1': -# (1) commit; -# (2) insert into test(id) values(-2); // i.e. LESS than min(id)=-1 that existed before this -# (3) commit; -# (4) update test set id=id where id = -2; -# // This: '(1) commit' - will release record with ID = 5. Worker sees this record and put write-lock on it. -# // [DOC]: "b) engine put write lock on conflicted record" -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Worker resumes search for any rows which meet condition: "id < 0 or id >= 3", and it does this with taking in account -# // required order of its DML (i.e. 'ORDER BY ID') -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // Worker starts to search records which must be involved in its DML and *found* first sucn row: it has ID = -1. -# // NB. This row currently can NOT be deleted by worker because locker-2 has uncommitted update of it. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# // :::!! NB, AGAIN !! ::: restart NOT occurs here because at least one records found, see: -# // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since -# // top-level statement execution starts and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# -# * session 'locker-2': -# (1) commit; -# (2) insert into test(id) values(-3); // i.e. LESS than min(id)=-1 that existed before this -# (3) commit; -# (4) update test set id=id where id = -3; -# -# // This: '(1) commit' - will release record with ID = -1. Worker sees this record and put write-lock on it. -# // [DOC]: "b) engine put write lock on conflicted record" -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Worker resumes search for any rows which meet condition: "id < 0 or id >= 3", and it does this with taking in account -# // required order of its DML (i.e. 'ORDER BY ID') -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // Worker starts to search records which must be involved in its DML and *found* first sucn row: it has ID = -2. -# // NB. This row currently can NOT be deleted by worker because locker-1 has uncommitted update of it. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# -# * session 'locker-1': -# commit; -# // This: '(1) commit' - will release record with ID = -2. Worker sees this record and put write-lock on it. -# // Because of worker TIL = RC NRV, he must see all committed records regardless on its own snapshot. -# // Worker resumes search for any rows with ID < 0, and it does this with taking in account required order -# // of its DML (i.e. 'ORDER BY ID') -# // Worker starts to search records which must be involved in its DML and *found* first sucn row with ID = -3. -# // NB. This row currently can NOT be deleted by worker because locker-2 has uncommitted update of it. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# -# * session 'locker-2': -# commit; -# // This will release record with ID=-3. Worker sees this record and put write-lock on it. -# // Because of worker TIL = RC NRV, he must see all committed records regardless on its own snapshot. -# // Worker resumes search for any rows with ID < 0, and it does this with taking in account required order -# // of its DML (i.e. 'ORDER BY ID'). -# // At this point there are no more records to be locked (by worker) that meet cursor condition: worker did put -# // write locks on all rows that meet its cursor conditions (ID < 0 or ID>= 3). -# // BECAUSE OF FACT THAT NO MORE RECORDS FOUND TO BE LOCKED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN -# // MAKES FIRST STATEMENT-LEVEL RESTART. This restart is also the last in this test. -# -# Expected result: -# * session-'worker' must *successfully* complete deletion of all rows with ID < 0 or ID >= 3. Rows with ID = 1 and 2 must remain. -# -# * Two unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed DELETE statement: first of them -# was created by initial statement start and second reflect SINGLE restart (this column has values which are evaluated using -# rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). -# It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. -# -# NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! -# This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. -# -# ################ -# -# -# Checked on 4.0.0.2144 SS/CS -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: + +""" +ID: transactions.read-consist-sttm-restart-on-delete-02 +TITLE: READ CONSISTENCY. Check creation of new statement-level snapshot and restarting changed caused by DELETE. Test-02. +DESCRIPTION: + Initial article for reading: + https://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:11504247549852 + Note on terms which are used there: "BLOCKER", "LONG" and "FIRSTLAST" - their names are slightly changed here + to: LOCKER-1, WORKER and LOCKER-2 respectively. + See also: doc/README.read_consistency.md + + ********************************************** + + This test verifies that statement-level snapshot and restart will be performed when "main" session ("worker") + performs DELETE statement and is involved in update conflicts. + ("When update conflict is detected <...> then engine <...> creates new statement-level snapshot and restart execution...") + + ::: NB ::: + This test uses script %FBT_REPO%/files/read-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. + Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual + results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any + outcome of test. + + + ############### + Following scenario if executed here (see also: "doc/README.read_consistency.md"; hereafer is marked as "DOC"): + + * five rows are inserted into the table TEST, with IDs: 1...5 + * session 'locker-1' ("BLOCKER" in Tom Kyte's article ); + update set id=id where id = 5; + + * session 'worker' ("LONG" in TK article) has mission: + delete from test where id < 0 or id >= 3 order by id; // using TIL = read committed read consistency + + // Execution will have PLAN ORDER . + // It will delete rows with ID = 3 and 4 but hang on row with ID = 5 because of locker-1; + // Update conflict appears here and, because of this, worker temporary changes its TIL to RC no record_version (RC NRV). + // [DOC]: "a) transaction isolation mode temporarily switched to the READ COMMITTED *NO RECORD VERSION MODE*" + // This (new) TIL allows worker further to see all committed versions, regardless of its own snapshot. + + * session 'locker-2' ("FIRSTLAST" in TK article): + (1) insert into test(id) values(-1); // i.e. LESS than min(id)=1 that existed at the start of session-worker statement + (2) commit; + (3) update test set id=id where id = -1; + // Session-worker must still hang because row with ID = 5 is occupied by locker-1. + // But worker must further see record with (new) id = -1 because its TIL was changed to RC NO RECORD_VERSION. + + * session 'locker-1': + (1) commit; + (2) insert into test(id) values(-2); // i.e. LESS than min(id)=-1 that existed before this + (3) commit; + (4) update test set id=id where id = -2; + // This: '(1) commit' - will release record with ID = 5. Worker sees this record and put write-lock on it. + // [DOC]: "b) engine put write lock on conflicted record" + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Worker resumes search for any rows which meet condition: "id < 0 or id >= 3", and it does this with taking in account + // required order of its DML (i.e. 'ORDER BY ID') + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // Worker starts to search records which must be involved in its DML and *found* first sucn row: it has ID = -1. + // NB. This row currently can NOT be deleted by worker because locker-2 has uncommitted update of it. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + // :::!! NB, AGAIN !! ::: restart NOT occurs here because at least one records found, see: + // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since + // top-level statement execution starts and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + + * session 'locker-2': + (1) commit; + (2) insert into test(id) values(-3); // i.e. LESS than min(id)=-1 that existed before this + (3) commit; + (4) update test set id=id where id = -3; + + // This: '(1) commit' - will release record with ID = -1. Worker sees this record and put write-lock on it. + // [DOC]: "b) engine put write lock on conflicted record" + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Worker resumes search for any rows which meet condition: "id < 0 or id >= 3", and it does this with taking in account + // required order of its DML (i.e. 'ORDER BY ID') + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // Worker starts to search records which must be involved in its DML and *found* first sucn row: it has ID = -2. + // NB. This row currently can NOT be deleted by worker because locker-1 has uncommitted update of it. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + + * session 'locker-1': + commit; + // This: '(1) commit' - will release record with ID = -2. Worker sees this record and put write-lock on it. + // Because of worker TIL = RC NRV, he must see all committed records regardless on its own snapshot. + // Worker resumes search for any rows with ID < 0, and it does this with taking in account required order + // of its DML (i.e. 'ORDER BY ID') + // Worker starts to search records which must be involved in its DML and *found* first sucn row with ID = -3. + // NB. This row currently can NOT be deleted by worker because locker-2 has uncommitted update of it. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + + * session 'locker-2': + commit; + // This will release record with ID=-3. Worker sees this record and put write-lock on it. + // Because of worker TIL = RC NRV, he must see all committed records regardless on its own snapshot. + // Worker resumes search for any rows with ID < 0, and it does this with taking in account required order + // of its DML (i.e. 'ORDER BY ID'). + // At this point there are no more records to be locked (by worker) that meet cursor condition: worker did put + // write locks on all rows that meet its cursor conditions (ID < 0 or ID>= 3). + // BECAUSE OF FACT THAT NO MORE RECORDS FOUND TO BE LOCKED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN + // MAKES FIRST STATEMENT-LEVEL RESTART. This restart is also the last in this test. + + Expected result: + * session-'worker' must *successfully* complete deletion of all rows with ID < 0 or ID >= 3. Rows with ID = 1 and 2 must remain. + + * Two unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed DELETE statement: first of them + was created by initial statement start and second reflect SINGLE restart (this column has values which are evaluated using + rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). + It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. + + NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! + This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. + + ################ + + + Checked on 4.0.0.2144 SS/CS +FBTEST: functional.transactions.read_consist_sttm_restart_on_delete_02 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] +act = python_act('db', substitutions=[('=', ''), ('[ \t]+', ' ')]) -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import subprocess -# from subprocess import Popen -# from fdb import services -# import time -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') -# -# for checked_mode in('table', 'view'): -# -# target_obj = 'test' if checked_mode == 'table' else 'v_test' -# -# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') -# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') -# -# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) -# flush_and_close(f_init_log) -# flush_and_close(f_init_err) -# -# # add rows with ID = 1,2,3,4,5: -# sql_addi=''' -# set term ^; -# execute block as -# begin -# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); -# end -# ^ -# set term ;^ -# insert into %(target_obj)s(id, x) -# select row_number()over(),row_number()over() -# from rdb$types rows 5; -# commit; -# ''' % locals() -# runProgram('isql', [ dsn, '-q' ], sql_addi) -# -# con_lock_1 = fdb.connect( dsn = dsn ) -# con_lock_2 = fdb.connect( dsn = dsn ) -# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) -# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# -# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = 5' % locals() ) -# -# sql_text=''' -# connect '%(dsn)s'; -# set list on; -# set autoddl off; -# set term ^; -# execute block returns (whoami varchar(30)) as -# begin -# whoami = 'WORKER'; -- , ATT#' || current_connection; -# rdb$set_context('USER_SESSION','WHO', whoami); -# -- suspend; -# end -# ^ -# set term ;^ -# commit; -# --set echo on; -# SET KEEP_TRAN_PARAMS ON; -# set transaction read committed read consistency; -# --select current_connection, current_transaction from rdb$database; -# set list off; -# set wng off; -# --set plan on; -# set count on; -# -# delete from %(target_obj)s where id < 0 or id >= 3 order by id; -- THIS MUST BE LOCKED -# -# -- check results: -# -- ############### -# -# select id from %(target_obj)s order by id; -- this will produce output only after all lockers do their commit/rollback -# -# select v.old_id, v.op, v.snap_no_rank -# from v_worker_log v -# where v.op = 'del'; -# -# set width who 10; -# -- DO NOT check this! Values can differ here from one run to another! -# -- select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; -# -# rollback; -# -# ''' % dict(globals(), **locals()) -# -# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_delete_02.sql'), 'w') -# f_worker_sql.write(sql_text) -# flush_and_close(f_worker_sql) -# -# -# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') -# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') -# -# ############################################################################ -# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### -# ############################################################################ -# -# p_worker = Popen( [ context['isql_path'], '-pag', '999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) -# time.sleep(1) -# -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# # Insert ID value that is less than previous min(id). -# # Session-worker is executing its statement using PLAN ORDER, -# # and it should see this new value and restart its statement: -# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(-1)' % locals() ) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = -1' % locals() ) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id) values(-2)' % locals() ) -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = -2' % locals() ) -# -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# # Insert ID value that is less than previous min(id). -# # Session-worker is executing its statement using PLAN ORDER, -# # and it should see this new value and restart its statement: -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(-3)' % locals() ) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = -3' % locals() ) -# -# con_lock_1.commit() -# con_lock_2.commit() -# -# # Here we wait for ISQL complete its mission: -# p_worker.wait() -# -# flush_and_close(f_worker_log) -# flush_and_close(f_worker_err) -# -# # Close lockers: -# ################ -# for c in (con_lock_1, con_lock_2): -# c.close() -# -# -# # CHECK RESULTS -# ############### -# with open(f_worker_log.name,'r') as f: -# for line in f: -# if line.strip(): -# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) -# -# for f in (f_init_err, f_worker_err): -# with open(f.name,'r') as g: -# for line in g: -# if line.strip(): -# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) -# -# -# #=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# +# import os +# import sys +# import subprocess +# from subprocess import Popen +# from fdb import services +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') +# +# for checked_mode in('table', 'view'): +# +# target_obj = 'test' if checked_mode == 'table' else 'v_test' +# +# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') +# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') +# +# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) +# flush_and_close(f_init_log) +# flush_and_close(f_init_err) +# +# # add rows with ID = 1,2,3,4,5: +# sql_addi=''' +# set term ^; +# execute block as +# begin +# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); +# end +# ^ +# set term ;^ +# insert into %(target_obj)s(id, x) +# select row_number()over(),row_number()over() +# from rdb$types rows 5; +# commit; +# ''' % locals() +# runProgram('isql', [ dsn, '-q' ], sql_addi) +# +# con_lock_1 = fdb.connect( dsn = dsn ) +# con_lock_2 = fdb.connect( dsn = dsn ) +# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) +# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# +# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = 5' % locals() ) +# +# sql_text=''' +# connect '%(dsn)s'; +# set list on; +# set autoddl off; +# set term ^; +# execute block returns (whoami varchar(30)) as +# begin +# whoami = 'WORKER'; -- , ATT#' || current_connection; +# rdb$set_context('USER_SESSION','WHO', whoami); +# -- suspend; +# end +# ^ +# set term ;^ +# commit; +# --set echo on; +# SET KEEP_TRAN_PARAMS ON; +# set transaction read committed read consistency; +# --select current_connection, current_transaction from rdb$database; +# set list off; +# set wng off; +# --set plan on; +# set count on; +# +# delete from %(target_obj)s where id < 0 or id >= 3 order by id; -- THIS MUST BE LOCKED +# +# -- check results: +# -- ############### +# +# select id from %(target_obj)s order by id; -- this will produce output only after all lockers do their commit/rollback +# +# select v.old_id, v.op, v.snap_no_rank +# from v_worker_log v +# where v.op = 'del'; +# +# set width who 10; +# -- DO NOT check this! Values can differ here from one run to another! +# -- select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; +# +# rollback; +# +# ''' % dict(globals(), **locals()) +# +# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_delete_02.sql'), 'w') +# f_worker_sql.write(sql_text) +# flush_and_close(f_worker_sql) +# +# +# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') +# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') +# +# ############################################################################ +# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### +# ############################################################################ +# +# p_worker = Popen( [ context['isql_path'], '-pag', '999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) +# time.sleep(1) +# +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# # Insert ID value that is less than previous min(id). +# # Session-worker is executing its statement using PLAN ORDER, +# # and it should see this new value and restart its statement: +# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(-1)' % locals() ) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = -1' % locals() ) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id) values(-2)' % locals() ) +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = -2' % locals() ) +# +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# # Insert ID value that is less than previous min(id). +# # Session-worker is executing its statement using PLAN ORDER, +# # and it should see this new value and restart its statement: +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(-3)' % locals() ) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = -3' % locals() ) +# +# con_lock_1.commit() +# con_lock_2.commit() +# +# # Here we wait for ISQL complete its mission: +# p_worker.wait() +# +# flush_and_close(f_worker_log) +# flush_and_close(f_worker_err) +# +# # Close lockers: +# ################ +# for c in (con_lock_1, con_lock_2): +# c.close() +# +# +# # CHECK RESULTS +# ############### +# with open(f_worker_log.name,'r') as f: +# for line in f: +# if line.strip(): +# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) +# +# for f in (f_init_err, f_worker_err): +# with open(f.name,'r') as g: +# for line in g: +# if line.strip(): +# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) +# +# +# # then engine <...> creates new statement-level snapshot and restart execution...") -# -# ::: NB ::: -# This test uses script %FBT_REPO% -# iles -# ead-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. -# Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual -# results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any -# outcome of test. -# -# ############### -# Following scenario if executed here (see also: "doc\\README.read_consistency.md"; hereafer is marked as "DOC"): -# -# * add new table that is child to test: TDETL (with FK that references TEST and 'on delete cascade' clause) -# * three rows are inserted into the table TEST, with IDs: 2, 3 and 5. -# -# * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): -# update set id=id where id = 5; -# -# * session 'worker' ("LONG" in TK article) has mission: -# delete from test where id >= 3 order by id; // using TIL = read committed read consistency -# -# // Execution will have PLAN ORDER . -# // It will delete (first avaliable for cursor) row with ID = 3 but can not change row with ID = 5 because of locker-1. -# // Update conflict appears here and, because of this, worker temporary changes its TIL to RC no record_version (RC NRV). -# // [DOC]: "a) transaction isolation mode temporarily switched to the READ COMMITTED *NO RECORD VERSION MODE*" -# // This (new) TIL allows worker further to see all committed versions, regardless of its own snapshot. -# -# * session 'locker-2' ("FIRSTLAST" in TK article): replaces ID = 2 with new value = 4, then commits -# and locks this record again: -# (1) update test set id = 4 where id = 2; -# (2) commit; -# (3) update test set id=id where id = 4; -# // session-'worker' remains waiting at this point because row with ID = 5 is still occupied by by locker-1 -# // but worker must further see record with (new) id = 4 because its TIL was changed to RC NO RECORD_VERSION. -# -# * session 'locker-1': -# (1) commit; -# (2) insert into test(id) values(6); -# (3) insert into detl(id, pid) values(6001, 6); -# (4) commit; -# (5) update test set id=id where id=6; -# // first of these statements: '(1) commit' - will release record with ID = 5. -# // Worker sees this record (because of TIL = RC NRV) and put write-lock on it. -# // [DOC]: "b) engine put write lock on conflicted record" -# // Also, because worker TIL = RC NRV, it will see two new rows with ID = 4 and 6, and they meet worker cursor condition ("id>=3"). -# // Worker resumes search for any rows with ID >=3, and it does this with taking in account "ORDER BY ID ASC". -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // Worker starts to search records which must be involved in its DML and *found* sucn rows (with ID = 4 and 6). -# // NB. These rows currently can NOT be deleted by worker because of locker-2 and locker-1 have uncommitted updates. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# // :::!! NB, AGAIN !! ::: restart NOT occurs here because at least one records found, see: -# // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since -# // top-level statement execution starts and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# * session 'locker-2': -# commit; -# // This will release record with ID = 4 (but row with ID = 6 is still inaccessible because of locker-1). -# // Worker sees record (because of TIL = RC NRV) with ID = 4 and put write-lock on it. -# // Then worker resumes search for any (new) rows with ID >= 3, and it does this with taking in account required order -# // of its DML (i.e. ORDER BY ID ASC). -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // But there are no such rows in the tableL earlier worker already encountered all possible rows (with ID=4 and 6) -# // and *did* put write-locks on them. So at this point NO new rows can be found for putting new lock on it. -# // BECAUSE OF FACT THAT NO RECORDS FOUND, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES FIRST STATEMENT-LEVEL RESTART. -# // [DOC]: "d) when there is no more records to fetch, engine start to undo all actions ... and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# * session 'locker-1': -# commit; -# // This will release record with ID = 6 - and this is the last row which meet cursor condition of session-worker. -# // Worker sees record (because of TIL = RC NRV) with ID = 6 and put write-lock on it. -# // Then worker resumes search for any (new) rows with ID >= 3, and it does this with taking in account required order -# // of its DML (i.e. ORDER BY ID ASC). NO new rows (with ID >= 3) can be found for putting new lock on it. -# // BECAUSE OF FACT THAT NO RECORDS FOUND, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES SECOND STATEMENT-LEVEL RESTART. -# -# Expected result: -# * session-'worker' must *successfully* complete deletion of all rows which it could see at the starting point (ID=3 and 5) -# PLUS rows with ID = 4 (ex. ID=2) and 6 (this ID is new, it did not exist at the statement start). -# As result, all rows must be deleted. -# -# * three unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed DELETE statement: first of them -# was created by initial statement start and all others reflect two restarts (this column has values which are evaluated using -# rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). -# It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. -# -# NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! -# This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. -# ################ -# -# -# Checked on 4.0.0.2144 SS/CS -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: + +""" +ID: transactions.read-consist-sttm-restart-on-delete-03 +TITLE: READ CONSISTENCY. Check creation of new statement-level snapshot and restarting changed caused by DELETE. Test-03. +DESCRIPTION: + Initial article for reading: + https://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:11504247549852 + Note on terms which are used there: "BLOCKER", "LONG" and "FIRSTLAST" - their names are slightly changed here + to: LOCKER-1, WORKER and LOCKER-2 respectively. + See also: doc/README.read_consistency.md + + ********************************************** + + This test verifies that statement-level snapshot and restart will be performed when "main" session ("worker") + performs DELETE statement and is involved in update conflicts. + ("When update conflict is detected <...> then engine <...> creates new statement-level snapshot and restart execution...") + + ::: NB ::: + This test uses script %FBT_REPO%/files/read-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. + Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual + results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any + outcome of test. + + ############### + Following scenario if executed here (see also: "doc/README.read_consistency.md"; hereafer is marked as "DOC"): + + * add new table that is child to test: TDETL (with FK that references TEST and 'on delete cascade' clause) + * three rows are inserted into the table TEST, with IDs: 2, 3 and 5. + + * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): + update set id=id where id = 5; + + * session 'worker' ("LONG" in TK article) has mission: + delete from test where id >= 3 order by id; // using TIL = read committed read consistency + + // Execution will have PLAN ORDER . + // It will delete (first avaliable for cursor) row with ID = 3 but can not change row with ID = 5 because of locker-1. + // Update conflict appears here and, because of this, worker temporary changes its TIL to RC no record_version (RC NRV). + // [DOC]: "a) transaction isolation mode temporarily switched to the READ COMMITTED *NO RECORD VERSION MODE*" + // This (new) TIL allows worker further to see all committed versions, regardless of its own snapshot. + + * session 'locker-2' ("FIRSTLAST" in TK article): replaces ID = 2 with new value = 4, then commits + and locks this record again: + (1) update test set id = 4 where id = 2; + (2) commit; + (3) update test set id=id where id = 4; + // session-'worker' remains waiting at this point because row with ID = 5 is still occupied by by locker-1 + // but worker must further see record with (new) id = 4 because its TIL was changed to RC NO RECORD_VERSION. + + * session 'locker-1': + (1) commit; + (2) insert into test(id) values(6); + (3) insert into detl(id, pid) values(6001, 6); + (4) commit; + (5) update test set id=id where id=6; + // first of these statements: '(1) commit' - will release record with ID = 5. + // Worker sees this record (because of TIL = RC NRV) and put write-lock on it. + // [DOC]: "b) engine put write lock on conflicted record" + // Also, because worker TIL = RC NRV, it will see two new rows with ID = 4 and 6, and they meet worker cursor condition ("id>=3"). + // Worker resumes search for any rows with ID >=3, and it does this with taking in account "ORDER BY ID ASC". + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // Worker starts to search records which must be involved in its DML and *found* sucn rows (with ID = 4 and 6). + // NB. These rows currently can NOT be deleted by worker because of locker-2 and locker-1 have uncommitted updates. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + // :::!! NB, AGAIN !! ::: restart NOT occurs here because at least one records found, see: + // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since + // top-level statement execution starts and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + * session 'locker-2': + commit; + // This will release record with ID = 4 (but row with ID = 6 is still inaccessible because of locker-1). + // Worker sees record (because of TIL = RC NRV) with ID = 4 and put write-lock on it. + // Then worker resumes search for any (new) rows with ID >= 3, and it does this with taking in account required order + // of its DML (i.e. ORDER BY ID ASC). + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // But there are no such rows in the tableL earlier worker already encountered all possible rows (with ID=4 and 6) + // and *did* put write-locks on them. So at this point NO new rows can be found for putting new lock on it. + // BECAUSE OF FACT THAT NO RECORDS FOUND, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES FIRST STATEMENT-LEVEL RESTART. + // [DOC]: "d) when there is no more records to fetch, engine start to undo all actions ... and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + * session 'locker-1': + commit; + // This will release record with ID = 6 - and this is the last row which meet cursor condition of session-worker. + // Worker sees record (because of TIL = RC NRV) with ID = 6 and put write-lock on it. + // Then worker resumes search for any (new) rows with ID >= 3, and it does this with taking in account required order + // of its DML (i.e. ORDER BY ID ASC). NO new rows (with ID >= 3) can be found for putting new lock on it. + // BECAUSE OF FACT THAT NO RECORDS FOUND, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES SECOND STATEMENT-LEVEL RESTART. + + Expected result: + * session-'worker' must *successfully* complete deletion of all rows which it could see at the starting point (ID=3 and 5) + PLUS rows with ID = 4 (ex. ID=2) and 6 (this ID is new, it did not exist at the statement start). + As result, all rows must be deleted. + + * three unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed DELETE statement: first of them + was created by initial statement start and all others reflect two restarts (this column has values which are evaluated using + rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). + It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. + + NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! + This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. + ################ + + + Checked on 4.0.0.2144 SS/CS +FBTEST: functional.transactions.read_consist_sttm_restart_on_delete_03 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] +act = python_act('db', substitutions=[('=', ''), ('[ \t]+', ' ')]) -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import subprocess -# from subprocess import Popen -# import re -# import difflib -# from fdb import services -# import time -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') -# -# for checked_mode in('table', 'view'): -# -# target_obj = 'test' if checked_mode == 'table' else 'v_test' -# -# # drop dependencies: -# runProgram('isql', [ dsn, '-q' ], 'recreate table detl(id int);') -# -# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') -# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') -# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) -# flush_and_close(f_init_log) -# flush_and_close(f_init_err) -# -# sql_addi=''' -# recreate table detl(id int, PID int references test on delete cascade on update cascade); -# commit; -# -# delete from test; -# insert into test(id, x) values(2,2); -# insert into test(id, x) values(3,3); -# insert into test(id, x) values(5,5); -# insert into detl(id, pid) values(2000, 2); -# insert into detl(id, pid) values(2001, 2); -# insert into detl(id, pid) values(2002, 2); -# insert into detl(id, pid) values(3001, 3); -# insert into detl(id, pid) values(5001, 5); -# insert into detl(id, pid) values(5001, 5); -# commit; -# ''' -# runProgram('isql', [ dsn, '-q' ], sql_addi) -# -# locker_tpb = fdb.TPB() -# locker_tpb.lock_timeout = 3; # LOCKER_LOCK_TIMEOUT -# locker_tpb.lock_resolution = fdb.isc_tpb_wait -# -# con_lock_1 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) -# con_lock_2 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) -# -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# -# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id=5' % locals() ) -# -# sql_text=''' -# connect '%(dsn)s'; -# set list on; -# set autoddl off; -# set term ^; -# execute block returns (whoami varchar(30)) as -# begin -# whoami = 'WORKER'; -- , ATT#' || current_connection; -# rdb$set_context('USER_SESSION','WHO', whoami); -# -- suspend; -# end -# ^ -# set term ;^ -# commit; -# SET KEEP_TRAN_PARAMS ON; -# set transaction read committed read consistency; -# --select current_connection, current_transaction from rdb$database; -# set list off; -# set wng off; -# --set plan on; -# set count on; -# -# delete from %(target_obj)s where id >= 3 order by id; -- THIS MUST BE LOCKED -# -# -- check results: -# -- ############### -# -# select id from %(target_obj)s order by id; -- this will produce output only after all lockers do their commit/rollback -# -# select v.old_id, v.op, v.snap_no_rank -# from v_worker_log v -# where v.op = 'del'; -# -# rollback; -# -# ''' % dict(globals(), **locals()) -# -# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_delete_03.sql'), 'w') -# f_worker_sql.write(sql_text) -# flush_and_close(f_worker_sql) -# -# -# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') -# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') -# -# ############################################################################ -# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### -# ############################################################################ -# -# p_worker = Popen( [ context['isql_path'], '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) -# time.sleep(1) -# -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# con_lock_2.execute_immediate( 'update %(target_obj)s set id=4 where id=2;' % locals() ) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id=4;' % locals() ) -# -# -# con_lock_1.commit() # release record with ID=5 (allow it to be deleted by session-worker) -# -# # Add record which did not exists when session-worker statement started. -# # Add also child record for it, then commit + re-lock just added record: -# con_lock_1.execute_immediate('insert into %(target_obj)s(id,x) values(6,6)' % locals()) -# con_lock_1.execute_immediate('insert into detl(id, pid) values(6001, 6)') -# con_lock_1.commit() -# con_lock_1.execute_immediate('update %(target_obj)s set id=id where id=6' % locals()) -# -# con_lock_2.commit() # release record with ID=4. At this point session-worker will be allowed to delete rows with ID=4 and 5. -# -# con_lock_1.commit() # release record with ID=6. It is the last record which also must be deleted by session-worker. -# -# # Here we wait for ISQL complete its mission: -# p_worker.wait() -# -# flush_and_close(f_worker_log) -# flush_and_close(f_worker_err) -# -# # Close lockers: -# ################ -# for c in (con_lock_1, con_lock_2): -# c.close() -# -# -# # CHECK RESULTS -# ############### -# with open(f_worker_log.name,'r') as f: -# for line in f: -# if line.strip(): -# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) -# -# for f in (f_init_err, f_worker_err): -# with open(f.name,'r') as g: -# for line in g: -# if line.strip(): -# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) -# -# #=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# +# import os +# import sys +# import subprocess +# from subprocess import Popen +# import re +# import difflib +# from fdb import services +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') +# +# for checked_mode in('table', 'view'): +# +# target_obj = 'test' if checked_mode == 'table' else 'v_test' +# +# # drop dependencies: +# runProgram('isql', [ dsn, '-q' ], 'recreate table detl(id int);') +# +# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') +# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') +# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) +# flush_and_close(f_init_log) +# flush_and_close(f_init_err) +# +# sql_addi=''' +# recreate table detl(id int, PID int references test on delete cascade on update cascade); +# commit; +# +# delete from test; +# insert into test(id, x) values(2,2); +# insert into test(id, x) values(3,3); +# insert into test(id, x) values(5,5); +# insert into detl(id, pid) values(2000, 2); +# insert into detl(id, pid) values(2001, 2); +# insert into detl(id, pid) values(2002, 2); +# insert into detl(id, pid) values(3001, 3); +# insert into detl(id, pid) values(5001, 5); +# insert into detl(id, pid) values(5001, 5); +# commit; +# ''' +# runProgram('isql', [ dsn, '-q' ], sql_addi) +# +# locker_tpb = fdb.TPB() +# locker_tpb.lock_timeout = 3; # LOCKER_LOCK_TIMEOUT +# locker_tpb.lock_resolution = fdb.isc_tpb_wait +# +# con_lock_1 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) +# con_lock_2 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) +# +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# +# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id=5' % locals() ) +# +# sql_text=''' +# connect '%(dsn)s'; +# set list on; +# set autoddl off; +# set term ^; +# execute block returns (whoami varchar(30)) as +# begin +# whoami = 'WORKER'; -- , ATT#' || current_connection; +# rdb$set_context('USER_SESSION','WHO', whoami); +# -- suspend; +# end +# ^ +# set term ;^ +# commit; +# SET KEEP_TRAN_PARAMS ON; +# set transaction read committed read consistency; +# --select current_connection, current_transaction from rdb$database; +# set list off; +# set wng off; +# --set plan on; +# set count on; +# +# delete from %(target_obj)s where id >= 3 order by id; -- THIS MUST BE LOCKED +# +# -- check results: +# -- ############### +# +# select id from %(target_obj)s order by id; -- this will produce output only after all lockers do their commit/rollback +# +# select v.old_id, v.op, v.snap_no_rank +# from v_worker_log v +# where v.op = 'del'; +# +# rollback; +# +# ''' % dict(globals(), **locals()) +# +# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_delete_03.sql'), 'w') +# f_worker_sql.write(sql_text) +# flush_and_close(f_worker_sql) +# +# +# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') +# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') +# +# ############################################################################ +# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### +# ############################################################################ +# +# p_worker = Popen( [ context['isql_path'], '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) +# time.sleep(1) +# +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# con_lock_2.execute_immediate( 'update %(target_obj)s set id=4 where id=2;' % locals() ) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id=4;' % locals() ) +# +# +# con_lock_1.commit() # release record with ID=5 (allow it to be deleted by session-worker) +# +# # Add record which did not exists when session-worker statement started. +# # Add also child record for it, then commit + re-lock just added record: +# con_lock_1.execute_immediate('insert into %(target_obj)s(id,x) values(6,6)' % locals()) +# con_lock_1.execute_immediate('insert into detl(id, pid) values(6001, 6)') +# con_lock_1.commit() +# con_lock_1.execute_immediate('update %(target_obj)s set id=id where id=6' % locals()) +# +# con_lock_2.commit() # release record with ID=4. At this point session-worker will be allowed to delete rows with ID=4 and 5. +# +# con_lock_1.commit() # release record with ID=6. It is the last record which also must be deleted by session-worker. +# +# # Here we wait for ISQL complete its mission: +# p_worker.wait() +# +# flush_and_close(f_worker_log) +# flush_and_close(f_worker_err) +# +# # Close lockers: +# ################ +# for c in (con_lock_1, con_lock_2): +# c.close() +# +# +# # CHECK RESULTS +# ############### +# with open(f_worker_log.name,'r') as f: +# for line in f: +# if line.strip(): +# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) +# +# for f in (f_init_err, f_worker_err): +# with open(f.name,'r') as g: +# for line in g: +# if line.strip(): +# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) +# +# # then engine <...> creates new statement-level snapshot and restart execution...") -# -# ::: NB ::: -# This test uses script %FBT_REPO% -# iles -# ead-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. -# Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual -# results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any -# outcome of test. -# -# ############### -# Following scenario if executed here (see also: "doc\\README.read_consistency.md"; hereafer is marked as "DOC"): -# * five rows are inserted into the table TEST, with IDs: 1...5. -# -# * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): -# update test set id = id where id = 1 -# -# * session 'worker' ("LONG" in TK article) has mission: -# delete from test where id<=2 order by id DESC rows 4; // using TIL = read committed read consistency -# -# // Execution will have PLAN ORDER . -# // It will delete rows starting with ID = 2 but can not change row with ID = 1 because of locker-1. -# // Update conflict appears here and, because of this, worker temporary changes its TIL to RC no record_version (RC NRV). -# // [DOC]: "a) transaction isolation mode temporarily switched to the READ COMMITTED *NO RECORD VERSION MODE*" -# // This (new) TIL allows worker further to see all committed versions, regardless of its own snapshot. -# -# * session 'locker-2' ("FIRSTLAST" in TK article): replaces ID = 5 with new value = -5, then commits -# and locks this record again: -# (1) commit; -# (2) update test set id = -5 where abs(id)=5; -# (3) commit; -# (4) update test set id = id where abs(id)=5; -# // session-'worker' remains waiting at this point because row with ID = 1 is still occupied by by locker-1. -# // but worker must further see record with (new) id = -5 because its TIL was changed to RC NO RECORD_VERSION. -# -# -# * session 'locker-1': replaces ID = 4 with new value = -4, then commits and locks this record again: -# (1) commit; -# (2) update test set id = -4 where abs(id)=4; -# (3) commit; -# (4) update test set id = id where abs(id)=4; -# -# // This: '(1) commit' - will release record with ID = 1. Worker sees this record and put write-lock on it. -# // [DOC]: "b) engine put write lock on conflicted record" -# // But it is only 2nd row of total 4 that worker must delete. -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Worker resumes search for any rows with ID < 2, and it does this with taking in account required order -# // of its DML (i.e. 'ORDER BY ID DESC ...') -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // Worker starts to search records which must be involved in its DML and *found* first sucn row with ID = -5. -# // NB. This row currently can NOT be deleted by worker because locker-2 has uncommitted update of it. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# // :::!! NB, AGAIN !! ::: restart NOT occurs here because at least one records found, see: -# // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since -# // top-level statement execution starts and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# * session 'locker-2': replaces ID = 3 with new value = -3, then commits and locks this record again: -# (1) commit; -# (2) update test set id = -3 where abs(id)=3; -# (3) commit; -# (4) update test set id = id where abs(id)=3; -# -# // This: '(1) commit' - will release record with ID = -5. Worker sees this record and put write-lock on it. -# // But this is only 3rd row of total 4 that worker must delete. -# // Because of worker TIL = RC NRV, he must see all committed records regardless on its own snapshot. -# // Worker resumes search for any rows with ID < -5, and it does this with taking in account required order -# // of its DML (i.e. 'ORDER BY ID DESC ...') -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // There are no such rows in the table. -# // BECAUSE OF FACT THAT NO RECORDS FOUND, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES FIRST STATEMENT-LEVEL RESTART. -# // [DOC]: "d) when there is no more records to fetch, engine start to undo all actions ... and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# * session 'locker-1': -# commit; -# // This will release record with ID=-4. Worker sees this record and put write-lock on it. -# // At this point worker has proceeded all required number of rows for DML: 2, 1, -4 and -5. -# // BECAUSE OF FACT THAT ALL ROWS WERE PROCEEDED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES SECOND STATEMENT-LEVEL RESTART. -# // [DOC]: "d) when there is no more records to fetch, engine start to undo all actions ... and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# // After this restart worker will waiting for row with ID = -3 (it sees this because of TIL = RC NRV). -# -# * session 'locker-2': -# commit. -# // This releases row with ID=-3. Worker sees this record and put write-lock on it. -# // Records with ID = 2, 1, -4 and -5 already have been locked, but worker must delete only FOUR rows (see its DML statement). -# // Thus only rows with ID = 2, 1, -3 and -4 will be deleted. Record with ID = -5 must *remain* in the table. -# // At this point worker has proceeded all required rows that meet condition for DML: 2, 1, -3 and -4. -# // BECAUSE OF FACT THAT ALL ROWS WERE PROCEEDED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES THIRD STATEMENT-LEVEL RESTART. -# // [DOC]: "d) when there is no more records to fetch, engine start to undo all actions ... and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# Expected result: -# * session-'worker' must *successfully* complete deletion of 4 rows (but only two of them did exist at the starting point). -# Record with ID = -5 must remain in the table. -# -# * four unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed DELETE statement: first of them -# was created by initial statement start and all others reflect three restarts (this column has values which are evaluated using -# rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). -# It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. -# -# NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! -# This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. -# -# ################ -# -# Checked on 4.0.0.2144 SS/CS -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: + +""" +ID: transactions.read-consist-sttm-restart-on-delete-04 +TITLE: READ CONSISTENCY. Check creation of new statement-level snapshot and restarting changed caused by DELETE. Test-04. +DESCRIPTION: + Initial article for reading: + https://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:11504247549852 + Note on terms which are used there: "BLOCKER", "LONG" and "FIRSTLAST" - their names are slightly changed here + to: LOCKER-1, WORKER and LOCKER-2 respectively. + See also: doc/README.read_consistency.md + + ********************************************** + + This test verifies that statement-level snapshot and restart will be performed when "main" session ("worker") + performs DELETE statement and is involved in update conflicts. + ("When update conflict is detected <...> then engine <...> creates new statement-level snapshot and restart execution...") + + ::: NB ::: + This test uses script %FBT_REPO%/files/read-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. + Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual + results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any + outcome of test. + + ############### + Following scenario if executed here (see also: "doc/README.read_consistency.md"; hereafer is marked as "DOC"): + * five rows are inserted into the table TEST, with IDs: 1...5. + + * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): + update test set id = id where id = 1 + + * session 'worker' ("LONG" in TK article) has mission: + delete from test where id<=2 order by id DESC rows 4; // using TIL = read committed read consistency + + // Execution will have PLAN ORDER . + // It will delete rows starting with ID = 2 but can not change row with ID = 1 because of locker-1. + // Update conflict appears here and, because of this, worker temporary changes its TIL to RC no record_version (RC NRV). + // [DOC]: "a) transaction isolation mode temporarily switched to the READ COMMITTED *NO RECORD VERSION MODE*" + // This (new) TIL allows worker further to see all committed versions, regardless of its own snapshot. + + * session 'locker-2' ("FIRSTLAST" in TK article): replaces ID = 5 with new value = -5, then commits + and locks this record again: + (1) commit; + (2) update test set id = -5 where abs(id)=5; + (3) commit; + (4) update test set id = id where abs(id)=5; + // session-'worker' remains waiting at this point because row with ID = 1 is still occupied by by locker-1. + // but worker must further see record with (new) id = -5 because its TIL was changed to RC NO RECORD_VERSION. + + + * session 'locker-1': replaces ID = 4 with new value = -4, then commits and locks this record again: + (1) commit; + (2) update test set id = -4 where abs(id)=4; + (3) commit; + (4) update test set id = id where abs(id)=4; + + // This: '(1) commit' - will release record with ID = 1. Worker sees this record and put write-lock on it. + // [DOC]: "b) engine put write lock on conflicted record" + // But it is only 2nd row of total 4 that worker must delete. + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Worker resumes search for any rows with ID < 2, and it does this with taking in account required order + // of its DML (i.e. 'ORDER BY ID DESC ...') + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // Worker starts to search records which must be involved in its DML and *found* first sucn row with ID = -5. + // NB. This row currently can NOT be deleted by worker because locker-2 has uncommitted update of it. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + // :::!! NB, AGAIN !! ::: restart NOT occurs here because at least one records found, see: + // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since + // top-level statement execution starts and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + * session 'locker-2': replaces ID = 3 with new value = -3, then commits and locks this record again: + (1) commit; + (2) update test set id = -3 where abs(id)=3; + (3) commit; + (4) update test set id = id where abs(id)=3; + + // This: '(1) commit' - will release record with ID = -5. Worker sees this record and put write-lock on it. + // But this is only 3rd row of total 4 that worker must delete. + // Because of worker TIL = RC NRV, he must see all committed records regardless on its own snapshot. + // Worker resumes search for any rows with ID < -5, and it does this with taking in account required order + // of its DML (i.e. 'ORDER BY ID DESC ...') + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // There are no such rows in the table. + // BECAUSE OF FACT THAT NO RECORDS FOUND, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES FIRST STATEMENT-LEVEL RESTART. + // [DOC]: "d) when there is no more records to fetch, engine start to undo all actions ... and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + * session 'locker-1': + commit; + // This will release record with ID=-4. Worker sees this record and put write-lock on it. + // At this point worker has proceeded all required number of rows for DML: 2, 1, -4 and -5. + // BECAUSE OF FACT THAT ALL ROWS WERE PROCEEDED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES SECOND STATEMENT-LEVEL RESTART. + // [DOC]: "d) when there is no more records to fetch, engine start to undo all actions ... and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + // After this restart worker will waiting for row with ID = -3 (it sees this because of TIL = RC NRV). + + * session 'locker-2': + commit. + // This releases row with ID=-3. Worker sees this record and put write-lock on it. + // Records with ID = 2, 1, -4 and -5 already have been locked, but worker must delete only FOUR rows (see its DML statement). + // Thus only rows with ID = 2, 1, -3 and -4 will be deleted. Record with ID = -5 must *remain* in the table. + // At this point worker has proceeded all required rows that meet condition for DML: 2, 1, -3 and -4. + // BECAUSE OF FACT THAT ALL ROWS WERE PROCEEDED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES THIRD STATEMENT-LEVEL RESTART. + // [DOC]: "d) when there is no more records to fetch, engine start to undo all actions ... and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + Expected result: + * session-'worker' must *successfully* complete deletion of 4 rows (but only two of them did exist at the starting point). + Record with ID = -5 must remain in the table. + + * four unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed DELETE statement: first of them + was created by initial statement start and all others reflect three restarts (this column has values which are evaluated using + rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). + It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. + + NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! + This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. + + ################ + + Checked on 4.0.0.2144 SS/CS +FBTEST: functional.transactions.read_consist_sttm_restart_on_delete_04 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] +act = python_act('db', substitutions=[('=', ''), ('[ \t]+', ' ')]) -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import subprocess -# from subprocess import Popen -# import shutil -# from fdb import services -# import time -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# # How long can we wait for session-worker completition, seconds -# # (ISQL often can not complete its job for several seconds!): -# MAX_TIME_FOR_WAITING_WORKER_FINISH = 60 -# -# ############################## -# # Temply, for debug obly: -# this_fdb=db_conn.database_name -# this_dbg=os.path.splitext(this_fdb)[0] + '.4debug.fdb' -# ############################## -# -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# -# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') -# -# for checked_mode in('table', 'view'): -# -# target_obj = 'test' if checked_mode == 'table' else 'v_test' -# -# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') -# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') -# -# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) -# flush_and_close(f_init_log) -# flush_and_close(f_init_err) -# -# sql_addi=''' -# set term ^; -# execute block as -# begin -# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); -# end -# ^ -# set term ;^ -# insert into %(target_obj)s(id, x) -# select row_number()over(),row_number()over() -# from rdb$types rows 5; -# commit; -# ''' % locals() -# runProgram('isql', [ dsn, '-q' ], sql_addi) -# -# locker_tpb = fdb.TPB() -# locker_tpb.lock_timeout = MAX_TIME_FOR_WAITING_WORKER_FINISH -# locker_tpb.lock_resolution = fdb.isc_tpb_wait -# -# con_lock_1 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) -# con_lock_2 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) -# -# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) -# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# -# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id=1' % locals() ) -# -# sql_text=''' -# connect '%(dsn)s'; -# set list on; -# set autoddl off; -# set term ^; -# execute block returns (whoami varchar(30)) as -# begin -# whoami = 'WORKER'; -- , ATT#' || current_connection; -# rdb$set_context('USER_SESSION','WHO', whoami); -# -- suspend; -# end -# ^ -# set term ;^ -# commit; -# SET KEEP_TRAN_PARAMS ON; -# set transaction read committed read consistency; -# --select current_connection, current_transaction from rdb$database; -# set list off; -# set wng off; -# -# --set plan on; -# set count on; -# delete from %(target_obj)s where id <= 2 order by id DESC rows 4; -- THIS MUST HANG BECAUSE OF LOCKERs -# -# -- check results: -# -- ############### -# -# select id from %(target_obj)s order by id; -- one record must remain, with ID = -5 -# -# select v.old_id, v.op, v.snap_no_rank -- snap_no_rank must have four unique values: 1,2,3 and 4. -# from v_worker_log v -# where v.op = 'del'; -# -# --set width who 10; -# -- DO NOT check this! Values can differ here from one run to another! -# -- select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; -# rollback; -# -# ''' % dict(globals(), **locals()) -# -# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_delete_04.sql'), 'w') -# f_worker_sql.write(sql_text) -# flush_and_close(f_worker_sql) -# -# -# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') -# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') -# -# ############################################################################ -# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### -# ############################################################################ -# -# p_worker = Popen( [ context['isql_path'], '-pag', '9999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) -# time.sleep(1) -# -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# -# # Change ID so that it **will* be included in the set of rows that must be affected by session-worker: -# con_lock_2.execute_immediate( 'update %(target_obj)s set id = -5 where abs(id) = 5;' % locals() ) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'update %(target_obj)s set id = id where abs(id) = 5;' % locals() ) -# -# -# con_lock_1.commit() # releases record with ID=1 (allow it to be deleted by session-worker) -# -# # Change ID so that it **will* be included in the set of rows that must be affected by session-worker: -# con_lock_1.execute_immediate( 'update %(target_obj)s set id = -4 where abs(id) = 4;' % locals() ) -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'update %(target_obj)s set id = id where abs(id) = 4;' % locals() ) -# -# -# con_lock_2.commit() # releases record with ID = -5, but session-worker is waiting for record with ID = -4 (that was changed by locker-1). -# con_lock_2.execute_immediate( 'update %(target_obj)s set id = -3 where abs(id) = 3;' % locals() ) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'update %(target_obj)s set id = id where abs(id) = 3;' % locals() ) -# -# con_lock_1.commit() # This releases row with ID=-4 but session-worker is waiting for ID = - 3 (changed by locker-2). -# con_lock_2.commit() # This releases row with ID=-3. No more locked rows so session-worker can finish its mission. -# -# # Here we wait for ISQL complete its mission: -# p_worker.wait() -# -# flush_and_close(f_worker_log) -# flush_and_close(f_worker_err) -# -# # Close lockers: -# ################ -# for c in (con_lock_1, con_lock_2): -# c.close() -# -# -# # CHECK RESULTS -# ############### -# with open(f_worker_log.name,'r') as f: -# for line in f: -# if line.strip(): -# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) -# -# for f in (f_init_err, f_worker_err): -# with open(f.name,'r') as g: -# for line in g: -# if line.strip(): -# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) -# -# #=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# +# import os +# import sys +# import subprocess +# from subprocess import Popen +# import shutil +# from fdb import services +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# # How long can we wait for session-worker completition, seconds +# # (ISQL often can not complete its job for several seconds!): +# MAX_TIME_FOR_WAITING_WORKER_FINISH = 60 +# +# ############################## +# # Temply, for debug obly: +# this_fdb=db_conn.database_name +# this_dbg=os.path.splitext(this_fdb)[0] + '.4debug.fdb' +# ############################## +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# +# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') +# +# for checked_mode in('table', 'view'): +# +# target_obj = 'test' if checked_mode == 'table' else 'v_test' +# +# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') +# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') +# +# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) +# flush_and_close(f_init_log) +# flush_and_close(f_init_err) +# +# sql_addi=''' +# set term ^; +# execute block as +# begin +# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); +# end +# ^ +# set term ;^ +# insert into %(target_obj)s(id, x) +# select row_number()over(),row_number()over() +# from rdb$types rows 5; +# commit; +# ''' % locals() +# runProgram('isql', [ dsn, '-q' ], sql_addi) +# +# locker_tpb = fdb.TPB() +# locker_tpb.lock_timeout = MAX_TIME_FOR_WAITING_WORKER_FINISH +# locker_tpb.lock_resolution = fdb.isc_tpb_wait +# +# con_lock_1 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) +# con_lock_2 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) +# +# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) +# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# +# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id=1' % locals() ) +# +# sql_text=''' +# connect '%(dsn)s'; +# set list on; +# set autoddl off; +# set term ^; +# execute block returns (whoami varchar(30)) as +# begin +# whoami = 'WORKER'; -- , ATT#' || current_connection; +# rdb$set_context('USER_SESSION','WHO', whoami); +# -- suspend; +# end +# ^ +# set term ;^ +# commit; +# SET KEEP_TRAN_PARAMS ON; +# set transaction read committed read consistency; +# --select current_connection, current_transaction from rdb$database; +# set list off; +# set wng off; +# +# --set plan on; +# set count on; +# delete from %(target_obj)s where id <= 2 order by id DESC rows 4; -- THIS MUST HANG BECAUSE OF LOCKERs +# +# -- check results: +# -- ############### +# +# select id from %(target_obj)s order by id; -- one record must remain, with ID = -5 +# +# select v.old_id, v.op, v.snap_no_rank -- snap_no_rank must have four unique values: 1,2,3 and 4. +# from v_worker_log v +# where v.op = 'del'; +# +# --set width who 10; +# -- DO NOT check this! Values can differ here from one run to another! +# -- select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; +# rollback; +# +# ''' % dict(globals(), **locals()) +# +# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_delete_04.sql'), 'w') +# f_worker_sql.write(sql_text) +# flush_and_close(f_worker_sql) +# +# +# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') +# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') +# +# ############################################################################ +# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### +# ############################################################################ +# +# p_worker = Popen( [ context['isql_path'], '-pag', '9999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) +# time.sleep(1) +# +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# +# # Change ID so that it **will* be included in the set of rows that must be affected by session-worker: +# con_lock_2.execute_immediate( 'update %(target_obj)s set id = -5 where abs(id) = 5;' % locals() ) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'update %(target_obj)s set id = id where abs(id) = 5;' % locals() ) +# +# +# con_lock_1.commit() # releases record with ID=1 (allow it to be deleted by session-worker) +# +# # Change ID so that it **will* be included in the set of rows that must be affected by session-worker: +# con_lock_1.execute_immediate( 'update %(target_obj)s set id = -4 where abs(id) = 4;' % locals() ) +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'update %(target_obj)s set id = id where abs(id) = 4;' % locals() ) +# +# +# con_lock_2.commit() # releases record with ID = -5, but session-worker is waiting for record with ID = -4 (that was changed by locker-1). +# con_lock_2.execute_immediate( 'update %(target_obj)s set id = -3 where abs(id) = 3;' % locals() ) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'update %(target_obj)s set id = id where abs(id) = 3;' % locals() ) +# +# con_lock_1.commit() # This releases row with ID=-4 but session-worker is waiting for ID = - 3 (changed by locker-2). +# con_lock_2.commit() # This releases row with ID=-3. No more locked rows so session-worker can finish its mission. +# +# # Here we wait for ISQL complete its mission: +# p_worker.wait() +# +# flush_and_close(f_worker_log) +# flush_and_close(f_worker_err) +# +# # Close lockers: +# ################ +# for c in (con_lock_1, con_lock_2): +# c.close() +# +# +# # CHECK RESULTS +# ############### +# with open(f_worker_log.name,'r') as f: +# for line in f: +# if line.strip(): +# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) +# +# for f in (f_init_err, f_worker_err): +# with open(f.name,'r') as g: +# for line in g: +# if line.strip(): +# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) +# +# # then engine <...> creates new statement-level snapshot and restart execution...") -# -# ::: NB ::: -# This test uses script %FBT_REPO% -# iles -# ead-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. -# Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual -# results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any -# outcome of test. -# -# ############### -# Following scenario if executed here (see also: "doc\\README.read_consistency.md"; hereafer is marked as "DOC"): -# -# * three rows are inserted into the table TEST, with ID = 1,2,3 (and X=1,2,3) -# * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): -# update test set id = id where id = 3; -# -# * session 'worker' ("LONG" in TK article) has mission: -# merge into test t -- THIS MUST BE LOCKED -# using (select * from test order by id) s on s.id=t.id -# when matched then -# update set t.id = -t.id, t.x = -s.x -# when not matched then -# insert(id,x) values(s.id, -s.x - 500); -# -- and it does this within read committed read consistency. -# -# -# // Execution will have PLAN ORDER . -# // Worker starts with updating rows with ID = 1, 2 but can not change row with ID = 3 because of locker-1. -# // Because of detecting update conflist, worker changes here its TIL to RC NO RECORD_VERSION. -# -# * session 'locker-2' ("FIRSTLAST" in TK article): -# (1) insert into test(id) values(-13); -# (2) commit; -# (3) update test set id=id where id = -13; -# // session-'worker' remains waiting at this point because row with ID = 3 is still occupied by by locker-1. -# // Record with (new) id = -13 will be seen further because worker's TIL was changed to RC NO RECORD_VERSION. -# -# * session 'locker-1': -# (1) commit; -# (2) insert into test(id) values(-12); -# (3) commit; -# (4) update test set id=id where id = -12; -# -# // This: '(1) commit' - will release record with ID = 3. Worker sees this record and put write-lock on it. -# // [DOC]: "b) engine put write lock on conflicted record" -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Worker resumes search for rows that must be updated with taking in account required order of its DML (i.e. 'ORDER BY ID'). -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // New record which is involved in DML (and did not exist before) *will be found*, its ID = -13. -# // Worker stops on this record (with ID = -13) because id is occupied by locker-2. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since -# // top-level statement execution starts and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# * session 'locker-2': -# commit; -# -# // This: 'commit' - will release record with ID = -13. Worker sees this record and put write-lock on it. -# // [DOC]: "b) engine put write lock on conflicted record" -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Worker resumes search for rows that must be updated with taking in account required order of its DML (i.e. 'ORDER BY ID'). -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // New record which is involved in DML (and did not exist before) *will be found*, its ID = -12. -# // Worker stops on this record (with ID = -12) because id is occupied by locker-1. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since -# // top-level statement execution starts and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# -# * session 'locker-1': -# commit; -# // This commit will release record with ID = -12. Worker sees this record and put write-lock on it. -# // At this point there are no more records to be locked (by worker) that meet cursor condition: worker did put -# // write locks on all rows that meet its cursor conditions. -# // BECAUSE OF FACT THAT NO MORE RECORDS FOUND TO BE LOCKED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN -# // MAKES FIRST STATEMENT-LEVEL RESTART. This restart is also the last in this test. -# -# -# Expected result: -# * session-'worker' must update of all rows with reverting signs of their IDs. Records which were inserted must have positive IDs. -# -# * Two unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed MERGE statement: first of them -# was created by initial statement start and second reflects SINGLE restart (this column has values which are evaluated using -# rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). -# It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. -# -# NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! -# This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. -# -# ################ -# -# Checked on 4.0.0.2204 -# NOTE test contains for-loop in order to check different target objects: TABLE ('test') and VIEW ('v_test'), see 'checked_mode'. -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: + +""" +ID: transactions.read-consist-sttm-restart-on-merge-01 +TITLE: READ CONSISTENCY. Check creation of new statement-level snapshot and restarting changed caused by MERGE. Test-01. +DESCRIPTION: + Initial article for reading: + https://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:11504247549852 + Note on terms which are used there: "BLOCKER", "LONG" and "FIRSTLAST" - their names are slightly changed here + to: LOCKER-1, WORKER and LOCKER-2 respectively. + + ********************************************** + + This test verifies that statement-level snapshot and restart will be performed when "main" session ("worker") + performs MERGE statement and is involved in update conflicts. + ("When update conflict is detected <...> then engine <...> creates new statement-level snapshot and restart execution...") + + ::: NB ::: + This test uses script %FBT_REPO%/files/read-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. + Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual + results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any + outcome of test. + + ############### + Following scenario if executed here (see also: "doc/README.read_consistency.md"; hereafer is marked as "DOC"): + + * three rows are inserted into the table TEST, with ID = 1,2,3 (and X=1,2,3) + * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): + update test set id = id where id = 3; + + * session 'worker' ("LONG" in TK article) has mission: + merge into test t -- THIS MUST BE LOCKED + using (select * from test order by id) s on s.id=t.id + when matched then + update set t.id = -t.id, t.x = -s.x + when not matched then + insert(id,x) values(s.id, -s.x - 500); + -- and it does this within read committed read consistency. + + + // Execution will have PLAN ORDER . + // Worker starts with updating rows with ID = 1, 2 but can not change row with ID = 3 because of locker-1. + // Because of detecting update conflist, worker changes here its TIL to RC NO RECORD_VERSION. + + * session 'locker-2' ("FIRSTLAST" in TK article): + (1) insert into test(id) values(-13); + (2) commit; + (3) update test set id=id where id = -13; + // session-'worker' remains waiting at this point because row with ID = 3 is still occupied by by locker-1. + // Record with (new) id = -13 will be seen further because worker's TIL was changed to RC NO RECORD_VERSION. + + * session 'locker-1': + (1) commit; + (2) insert into test(id) values(-12); + (3) commit; + (4) update test set id=id where id = -12; + + // This: '(1) commit' - will release record with ID = 3. Worker sees this record and put write-lock on it. + // [DOC]: "b) engine put write lock on conflicted record" + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Worker resumes search for rows that must be updated with taking in account required order of its DML (i.e. 'ORDER BY ID'). + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // New record which is involved in DML (and did not exist before) *will be found*, its ID = -13. + // Worker stops on this record (with ID = -13) because id is occupied by locker-2. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since + // top-level statement execution starts and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + * session 'locker-2': + commit; + + // This: 'commit' - will release record with ID = -13. Worker sees this record and put write-lock on it. + // [DOC]: "b) engine put write lock on conflicted record" + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Worker resumes search for rows that must be updated with taking in account required order of its DML (i.e. 'ORDER BY ID'). + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // New record which is involved in DML (and did not exist before) *will be found*, its ID = -12. + // Worker stops on this record (with ID = -12) because id is occupied by locker-1. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since + // top-level statement execution starts and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + + * session 'locker-1': + commit; + // This commit will release record with ID = -12. Worker sees this record and put write-lock on it. + // At this point there are no more records to be locked (by worker) that meet cursor condition: worker did put + // write locks on all rows that meet its cursor conditions. + // BECAUSE OF FACT THAT NO MORE RECORDS FOUND TO BE LOCKED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN + // MAKES FIRST STATEMENT-LEVEL RESTART. This restart is also the last in this test. + + + Expected result: + * session-'worker' must update of all rows with reverting signs of their IDs. Records which were inserted must have positive IDs. + + * Two unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed MERGE statement: first of them + was created by initial statement start and second reflects SINGLE restart (this column has values which are evaluated using + rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). + It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. + + NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! + This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. + + ################ + + Checked on 4.0.0.2204 + NOTE test contains for-loop in order to check different target objects: TABLE ('test') and VIEW ('v_test'), see 'checked_mode'. +FBTEST: functional.transactions.read_consist_sttm_restart_on_merge_01 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] +act = python_act('db', substitutions=[('=', ''), ('[ \t]+', ' ')]) -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import subprocess -# from subprocess import Popen -# from fdb import services -# import time -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') -# -# for checked_mode in('table', 'view'): -# -# target_obj = 'test' if checked_mode == 'table' else 'v_test' -# -# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') -# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') -# -# # RECREATION OF ALL DB OBJECTS: -# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) -# flush_and_close(f_init_log) -# flush_and_close(f_init_err) -# -# # add rows with ID = 1, 2, 3: -# sql_addi=''' -# set term ^; -# execute block as -# begin -# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); -# end -# ^ -# set term ;^ -# insert into %(target_obj)s(id, x) -# select row_number()over(),row_number()over() -# from rdb$types rows 3; -# commit; -# ''' % locals() -# runProgram('isql', [ dsn, '-q' ], sql_addi) -# -# con_lock_1 = fdb.connect( dsn = dsn ) -# con_lock_2 = fdb.connect( dsn = dsn ) -# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) -# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) -# -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# -# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = 3' % locals()) -# -# sql_text=''' -# connect '%(dsn)s'; -# set list on; -# set autoddl off; -# set term ^; -# execute block returns (whoami varchar(30)) as -# begin -# whoami = 'WORKER'; -- , ATT#' || current_connection; -# rdb$set_context('USER_SESSION','WHO', whoami); -# -- suspend; -# end -# ^ -# set term ;^ -# commit; -# --set echo on; -# SET KEEP_TRAN_PARAMS ON; -# set transaction read committed read consistency; -# -# set list off; -# set wng off; -# set count on; -# -# merge into %(target_obj)s t -- THIS MUST BE LOCKED -# using (select * from %(target_obj)s order by id) s on s.id=t.id -# when matched then -# update set t.id = -t.id, t.x = -s.x -# when not matched then -# insert(id,x) values(s.id, -s.x - 500); -# -# -- check results: -# -- ############### -# -# select id,x from %(target_obj)s order by id; -- this will produce output only after all lockers do their commit/rollback -# -# select v.old_id, v.op, v.snap_no_rank -# from v_worker_log v -# where v.op = 'upd'; -# -# set width who 10; -# -- DO NOT check this! Values can differ here from one run to another! -# -- select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; -# -# rollback; -# -# ''' % dict(globals(), **locals()) -# -# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_merge_01.sql'), 'w') -# f_worker_sql.write(sql_text) -# flush_and_close(f_worker_sql) -# -# -# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') -# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') -# -# ############################################################################ -# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### -# ############################################################################ -# -# p_worker = Popen( [ context['isql_path'], '-pag', '999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) -# time.sleep(1) -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id,x) values(-13,-13)' % locals()) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = -13' % locals()) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id,x) values(-12,-12)' % locals() ) -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = -12' % locals() ) -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# con_lock_2.commit() -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# con_lock_1.commit() # WORKER will complete his job after this -# -# # Here we wait for ISQL complete its mission: -# p_worker.wait() -# -# flush_and_close(f_worker_log) -# flush_and_close(f_worker_err) -# -# # Close lockers: -# ################ -# for c in (con_lock_1, con_lock_2): -# c.close() -# -# -# # CHECK RESULTS -# ############### -# with open(f_worker_log.name,'r') as f: -# for line in f: -# if line.strip(): -# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) -# -# for f in (f_init_err, f_worker_err): -# with open(f.name,'r') as g: -# for line in g: -# if line.strip(): -# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) -# -# #=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# +# import os +# import sys +# import subprocess +# from subprocess import Popen +# from fdb import services +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') +# +# for checked_mode in('table', 'view'): +# +# target_obj = 'test' if checked_mode == 'table' else 'v_test' +# +# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') +# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') +# +# # RECREATION OF ALL DB OBJECTS: +# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) +# flush_and_close(f_init_log) +# flush_and_close(f_init_err) +# +# # add rows with ID = 1, 2, 3: +# sql_addi=''' +# set term ^; +# execute block as +# begin +# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); +# end +# ^ +# set term ;^ +# insert into %(target_obj)s(id, x) +# select row_number()over(),row_number()over() +# from rdb$types rows 3; +# commit; +# ''' % locals() +# runProgram('isql', [ dsn, '-q' ], sql_addi) +# +# con_lock_1 = fdb.connect( dsn = dsn ) +# con_lock_2 = fdb.connect( dsn = dsn ) +# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) +# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) +# +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# +# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = 3' % locals()) +# +# sql_text=''' +# connect '%(dsn)s'; +# set list on; +# set autoddl off; +# set term ^; +# execute block returns (whoami varchar(30)) as +# begin +# whoami = 'WORKER'; -- , ATT#' || current_connection; +# rdb$set_context('USER_SESSION','WHO', whoami); +# -- suspend; +# end +# ^ +# set term ;^ +# commit; +# --set echo on; +# SET KEEP_TRAN_PARAMS ON; +# set transaction read committed read consistency; +# +# set list off; +# set wng off; +# set count on; +# +# merge into %(target_obj)s t -- THIS MUST BE LOCKED +# using (select * from %(target_obj)s order by id) s on s.id=t.id +# when matched then +# update set t.id = -t.id, t.x = -s.x +# when not matched then +# insert(id,x) values(s.id, -s.x - 500); +# +# -- check results: +# -- ############### +# +# select id,x from %(target_obj)s order by id; -- this will produce output only after all lockers do their commit/rollback +# +# select v.old_id, v.op, v.snap_no_rank +# from v_worker_log v +# where v.op = 'upd'; +# +# set width who 10; +# -- DO NOT check this! Values can differ here from one run to another! +# -- select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; +# +# rollback; +# +# ''' % dict(globals(), **locals()) +# +# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_merge_01.sql'), 'w') +# f_worker_sql.write(sql_text) +# flush_and_close(f_worker_sql) +# +# +# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') +# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') +# +# ############################################################################ +# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### +# ############################################################################ +# +# p_worker = Popen( [ context['isql_path'], '-pag', '999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) +# time.sleep(1) +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id,x) values(-13,-13)' % locals()) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = -13' % locals()) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id,x) values(-12,-12)' % locals() ) +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = -12' % locals() ) +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# con_lock_2.commit() +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# con_lock_1.commit() # WORKER will complete his job after this +# +# # Here we wait for ISQL complete its mission: +# p_worker.wait() +# +# flush_and_close(f_worker_log) +# flush_and_close(f_worker_err) +# +# # Close lockers: +# ################ +# for c in (con_lock_1, con_lock_2): +# c.close() +# +# +# # CHECK RESULTS +# ############### +# with open(f_worker_log.name,'r') as f: +# for line in f: +# if line.strip(): +# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) +# +# for f in (f_init_err, f_worker_err): +# with open(f.name,'r') as g: +# for line in g: +# if line.strip(): +# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) +# +# # then engine <...> creates new statement-level snapshot and restart execution...") -# -# ::: NB ::: -# This test uses script %FBT_REPO% -# iles -# ead-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. -# Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual -# results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any -# outcome of test. -# -# -# ############### -# Following scenario if executed here (see also: "doc\\README.read_consistency.md"; hereafer is marked as "DOC"): -# -# * five rows are inserted into the table TEST, with IDs: 1...5 -# * session 'locker-1' ("BLOCKER" in Tom Kyte's article ); -# update set id=id where id = 5; -# -# * session 'worker' ("LONG" in TK article) has mission: -# merge into test t using(select * from test where id < 0 or id >= 3 order by id) s on t.id = s.id when matched then delete; -# // using TIL = read committed read consistency -# -# // Execution will have PLAN ORDER . -# // It will delete rows with ID = 3 and 4 but hang on row with ID = 5 because of locker-1; -# // Update conflict appears here and, because of this, worker temporary changes its TIL to RC no record_version (RC NRV). -# // [DOC]: "a) transaction isolation mode temporarily switched to the READ COMMITTED *NO RECORD VERSION MODE*" -# // This (new) TIL allows worker further to see all committed versions, regardless of its own snapshot. -# -# * session 'locker-2' ("FIRSTLAST" in TK article): -# (1) insert into test(id) values(-1); // i.e. LESS than min(id)=1 that existed at the start of session-worker statement -# (2) commit; -# (3) update test set id=id where id = -1; -# // Session-worker must still hang because row with ID = 5 is occupied by locker-1. -# // But worker must further see record with (new) id = -1 because its TIL was changed to RC NO RECORD_VERSION. -# -# * session 'locker-1': -# (1) commit; -# (2) insert into test(id) values(-2); // i.e. LESS than min(id)=-1 that existed before this -# (3) commit; -# (4) update test set id=id where id = -2; -# // This: '(1) commit' - will release record with ID = 5. Worker sees this record and put write-lock on it. -# // [DOC]: "b) engine put write lock on conflicted record" -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Worker resumes search for any rows which meet condition: "id < 0 or id >= 3", and it does this with taking in account -# // required order of its DML (i.e. 'ORDER BY ID') -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // Worker starts to search records which must be involved in its DML and *found* first sucn row: it has ID = -1. -# // NB. This row currently can NOT be deleted by worker because locker-2 has uncommitted update of it. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# // :::!! NB, AGAIN !! ::: restart NOT occurs here because at least one records found, see: -# // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since -# // top-level statement execution starts and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# -# * session 'locker-2': -# (1) commit; -# (2) insert into test(id) values(-3); // i.e. LESS than min(id)=-1 that existed before this -# (3) commit; -# (4) update test set id=id where id = -3; -# -# // This: '(1) commit' - will release record with ID = -1. Worker sees this record and put write-lock on it. -# // [DOC]: "b) engine put write lock on conflicted record" -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Worker resumes search for any rows which meet condition: "id < 0 or id >= 3", and it does this with taking in account -# // required order of its DML (i.e. 'ORDER BY ID') -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // Worker starts to search records which must be involved in its DML and *found* first sucn row: it has ID = -2. -# // NB. This row currently can NOT be deleted by worker because locker-1 has uncommitted update of it. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# -# * session 'locker-1': -# commit; -# // This: '(1) commit' - will release record with ID = -2. Worker sees this record and put write-lock on it. -# // Because of worker TIL = RC NRV, he must see all committed records regardless on its own snapshot. -# // Worker resumes search for any rows with ID < 0, and it does this with taking in account required order -# // of its DML (i.e. 'ORDER BY ID') -# // Worker starts to search records which must be involved in its DML and *found* first sucn row with ID = -3. -# // NB. This row currently can NOT be deleted by worker because locker-2 has uncommitted update of it. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# -# * session 'locker-2': -# commit; -# // This will release record with ID=-3. Worker sees this record and put write-lock on it. -# // Because of worker TIL = RC NRV, he must see all committed records regardless on its own snapshot. -# // Worker resumes search for any rows with ID < 0, and it does this with taking in account required order -# // of its DML (i.e. 'ORDER BY ID'). -# // At this point there are no more records to be locked (by worker) that meet cursor condition: worker did put -# // write locks on all rows that meet its cursor conditions (ID < 0 or ID>= 3). -# // BECAUSE OF FACT THAT NO MORE RECORDS FOUND TO BE LOCKED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN -# // MAKES FIRST STATEMENT-LEVEL RESTART. This restart is also the last in this test. -# -# Expected result: -# * session-'worker' must *successfully* complete deletion of all rows with ID < 0 or ID >= 3. Rows with ID = 1 and 2 must remain. -# -# * Two unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed DELETE statement: first of them -# was created by initial statement start and second reflect SINGLE restart (this column has values which are evaluated using -# rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). -# It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. -# -# NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! -# This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. -# -# ################ -# -# Checked on 4.0.0.2204 SS/CS -# NOTE: added for-loop in order to check different target objects: TABLE ('test') and VIEW ('v_test'), see 'checked_mode'. -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: + +""" +ID: transactions.read-consist-sttm-restart-on-merge-02 +TITLE: READ CONSISTENCY. Check creation of new statement-level snapshot and restarting changed caused by MERGE. Test-02. +DESCRIPTION: + Initial article for reading: + https://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:11504247549852 + Note on terms which are used there: "BLOCKER", "LONG" and "FIRSTLAST" - their names are slightly changed here + to: LOCKER-1, WORKER and LOCKER-2 respectively. + See also: doc/README.read_consistency.md + + ********************************************** + + This test verifies that statement-level snapshot and restart will be performed when "main" session ("worker") + performs DELETE statement and is involved in update conflicts. + ("When update conflict is detected <...> then engine <...> creates new statement-level snapshot and restart execution...") + + ::: NB ::: + This test uses script %FBT_REPO%/files/read-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. + Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual + results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any + outcome of test. + + + ############### + Following scenario if executed here (see also: "doc/README.read_consistency.md"; hereafer is marked as "DOC"): + + * five rows are inserted into the table TEST, with IDs: 1...5 + * session 'locker-1' ("BLOCKER" in Tom Kyte's article ); + update set id=id where id = 5; + + * session 'worker' ("LONG" in TK article) has mission: + merge into test t using(select * from test where id < 0 or id >= 3 order by id) s on t.id = s.id when matched then delete; + // using TIL = read committed read consistency + + // Execution will have PLAN ORDER . + // It will delete rows with ID = 3 and 4 but hang on row with ID = 5 because of locker-1; + // Update conflict appears here and, because of this, worker temporary changes its TIL to RC no record_version (RC NRV). + // [DOC]: "a) transaction isolation mode temporarily switched to the READ COMMITTED *NO RECORD VERSION MODE*" + // This (new) TIL allows worker further to see all committed versions, regardless of its own snapshot. + + * session 'locker-2' ("FIRSTLAST" in TK article): + (1) insert into test(id) values(-1); // i.e. LESS than min(id)=1 that existed at the start of session-worker statement + (2) commit; + (3) update test set id=id where id = -1; + // Session-worker must still hang because row with ID = 5 is occupied by locker-1. + // But worker must further see record with (new) id = -1 because its TIL was changed to RC NO RECORD_VERSION. + + * session 'locker-1': + (1) commit; + (2) insert into test(id) values(-2); // i.e. LESS than min(id)=-1 that existed before this + (3) commit; + (4) update test set id=id where id = -2; + // This: '(1) commit' - will release record with ID = 5. Worker sees this record and put write-lock on it. + // [DOC]: "b) engine put write lock on conflicted record" + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Worker resumes search for any rows which meet condition: "id < 0 or id >= 3", and it does this with taking in account + // required order of its DML (i.e. 'ORDER BY ID') + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // Worker starts to search records which must be involved in its DML and *found* first sucn row: it has ID = -1. + // NB. This row currently can NOT be deleted by worker because locker-2 has uncommitted update of it. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + // :::!! NB, AGAIN !! ::: restart NOT occurs here because at least one records found, see: + // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since + // top-level statement execution starts and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + + * session 'locker-2': + (1) commit; + (2) insert into test(id) values(-3); // i.e. LESS than min(id)=-1 that existed before this + (3) commit; + (4) update test set id=id where id = -3; + + // This: '(1) commit' - will release record with ID = -1. Worker sees this record and put write-lock on it. + // [DOC]: "b) engine put write lock on conflicted record" + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Worker resumes search for any rows which meet condition: "id < 0 or id >= 3", and it does this with taking in account + // required order of its DML (i.e. 'ORDER BY ID') + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // Worker starts to search records which must be involved in its DML and *found* first sucn row: it has ID = -2. + // NB. This row currently can NOT be deleted by worker because locker-1 has uncommitted update of it. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + + * session 'locker-1': + commit; + // This: '(1) commit' - will release record with ID = -2. Worker sees this record and put write-lock on it. + // Because of worker TIL = RC NRV, he must see all committed records regardless on its own snapshot. + // Worker resumes search for any rows with ID < 0, and it does this with taking in account required order + // of its DML (i.e. 'ORDER BY ID') + // Worker starts to search records which must be involved in its DML and *found* first sucn row with ID = -3. + // NB. This row currently can NOT be deleted by worker because locker-2 has uncommitted update of it. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + + * session 'locker-2': + commit; + // This will release record with ID=-3. Worker sees this record and put write-lock on it. + // Because of worker TIL = RC NRV, he must see all committed records regardless on its own snapshot. + // Worker resumes search for any rows with ID < 0, and it does this with taking in account required order + // of its DML (i.e. 'ORDER BY ID'). + // At this point there are no more records to be locked (by worker) that meet cursor condition: worker did put + // write locks on all rows that meet its cursor conditions (ID < 0 or ID>= 3). + // BECAUSE OF FACT THAT NO MORE RECORDS FOUND TO BE LOCKED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN + // MAKES FIRST STATEMENT-LEVEL RESTART. This restart is also the last in this test. + + Expected result: + * session-'worker' must *successfully* complete deletion of all rows with ID < 0 or ID >= 3. Rows with ID = 1 and 2 must remain. + + * Two unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed DELETE statement: first of them + was created by initial statement start and second reflect SINGLE restart (this column has values which are evaluated using + rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). + It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. + + NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! + This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. + + ################ + + Checked on 4.0.0.2204 SS/CS + NOTE: added for-loop in order to check different target objects: TABLE ('test') and VIEW ('v_test'), see 'checked_mode'. +FBTEST: functional.transactions.read_consist_sttm_restart_on_merge_02 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] +act = python_act('db', substitutions=[('=', ''), ('[ \t]+', ' ')]) -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import subprocess -# from subprocess import Popen -# from fdb import services -# import time -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') -# -# for checked_mode in('table', 'view'): -# -# target_obj = 'test' if checked_mode == 'table' else 'v_test' -# -# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') -# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') -# -# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) -# flush_and_close(f_init_log) -# flush_and_close(f_init_err) -# -# # add rows with ID = 1,2,3,4,5: -# sql_addi=''' -# set term ^; -# execute block as -# begin -# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); -# end -# ^ -# set term ;^ -# insert into %(target_obj)s(id, x) -# select row_number()over(),row_number()over() -# from rdb$types rows 5; -# commit; -# ''' % locals() -# runProgram('isql', [ dsn, '-q' ], sql_addi) -# -# con_lock_1 = fdb.connect( dsn = dsn ) -# con_lock_2 = fdb.connect( dsn = dsn ) -# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) -# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# -# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = 5' % locals() ) -# -# sql_text=''' -# connect '%(dsn)s'; -# set list on; -# set autoddl off; -# set term ^; -# execute block returns (whoami varchar(30)) as -# begin -# whoami = 'WORKER'; -- , ATT#' || current_connection; -# rdb$set_context('USER_SESSION','WHO', whoami); -# -- suspend; -# end -# ^ -# set term ;^ -# commit; -# --set echo on; -# SET KEEP_TRAN_PARAMS ON; -# set transaction read committed read consistency; -# --select current_connection, current_transaction from rdb$database; -# set list off; -# set wng off; -# --set plan on; -# set count on; -# -# merge into %(target_obj)s t -# using(select * from %(target_obj)s where id < 0 or id >= 3 order by id) s on t.id = s.id -# when matched then -# DELETE -# ; -# -# -- check results: -# -- ############### -# -# select id from %(target_obj)s order by id; -- this will produce output only after all lockers do their commit/rollback -# -# select v.old_id, v.op, v.snap_no_rank -# from v_worker_log v -# where v.op = 'del'; -# -# set width who 10; -# -- DO NOT check this! Values can differ here from one run to another! -# -- select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; -# -# rollback; -# -# ''' % dict(globals(), **locals()) -# -# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_delete_02.sql'), 'w') -# f_worker_sql.write(sql_text) -# flush_and_close(f_worker_sql) -# -# -# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') -# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') -# -# ############################################################################ -# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### -# ############################################################################ -# -# p_worker = Popen( [ context['isql_path'], '-pag', '999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) -# time.sleep(1) -# -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# # Insert ID value that is less than previous min(id). -# # Session-worker is executing its statement using PLAN ORDER, -# # and it should see this new value and restart its statement: -# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(-1)' % locals() ) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = -1' % locals() ) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id) values(-2)' % locals() ) -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = -2' % locals() ) -# -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# # Insert ID value that is less than previous min(id). -# # Session-worker is executing its statement using PLAN ORDER, -# # and it should see this new value and restart its statement: -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(-3)' % locals() ) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = -3' % locals() ) -# -# con_lock_1.commit() -# con_lock_2.commit() -# -# # Here we wait for ISQL complete its mission: -# p_worker.wait() -# -# flush_and_close(f_worker_log) -# flush_and_close(f_worker_err) -# -# # Close lockers: -# ################ -# for c in (con_lock_1, con_lock_2): -# c.close() -# -# -# # CHECK RESULTS -# ############### -# with open(f_worker_log.name,'r') as f: -# for line in f: -# if line.strip(): -# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) -# -# for f in (f_init_err, f_worker_err): -# with open(f.name,'r') as g: -# for line in g: -# if line.strip(): -# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) -# -# -# #=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# +# import os +# import sys +# import subprocess +# from subprocess import Popen +# from fdb import services +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') +# +# for checked_mode in('table', 'view'): +# +# target_obj = 'test' if checked_mode == 'table' else 'v_test' +# +# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') +# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') +# +# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) +# flush_and_close(f_init_log) +# flush_and_close(f_init_err) +# +# # add rows with ID = 1,2,3,4,5: +# sql_addi=''' +# set term ^; +# execute block as +# begin +# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); +# end +# ^ +# set term ;^ +# insert into %(target_obj)s(id, x) +# select row_number()over(),row_number()over() +# from rdb$types rows 5; +# commit; +# ''' % locals() +# runProgram('isql', [ dsn, '-q' ], sql_addi) +# +# con_lock_1 = fdb.connect( dsn = dsn ) +# con_lock_2 = fdb.connect( dsn = dsn ) +# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) +# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# +# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = 5' % locals() ) +# +# sql_text=''' +# connect '%(dsn)s'; +# set list on; +# set autoddl off; +# set term ^; +# execute block returns (whoami varchar(30)) as +# begin +# whoami = 'WORKER'; -- , ATT#' || current_connection; +# rdb$set_context('USER_SESSION','WHO', whoami); +# -- suspend; +# end +# ^ +# set term ;^ +# commit; +# --set echo on; +# SET KEEP_TRAN_PARAMS ON; +# set transaction read committed read consistency; +# --select current_connection, current_transaction from rdb$database; +# set list off; +# set wng off; +# --set plan on; +# set count on; +# +# merge into %(target_obj)s t +# using(select * from %(target_obj)s where id < 0 or id >= 3 order by id) s on t.id = s.id +# when matched then +# DELETE +# ; +# +# -- check results: +# -- ############### +# +# select id from %(target_obj)s order by id; -- this will produce output only after all lockers do their commit/rollback +# +# select v.old_id, v.op, v.snap_no_rank +# from v_worker_log v +# where v.op = 'del'; +# +# set width who 10; +# -- DO NOT check this! Values can differ here from one run to another! +# -- select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; +# +# rollback; +# +# ''' % dict(globals(), **locals()) +# +# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_delete_02.sql'), 'w') +# f_worker_sql.write(sql_text) +# flush_and_close(f_worker_sql) +# +# +# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') +# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') +# +# ############################################################################ +# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### +# ############################################################################ +# +# p_worker = Popen( [ context['isql_path'], '-pag', '999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) +# time.sleep(1) +# +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# # Insert ID value that is less than previous min(id). +# # Session-worker is executing its statement using PLAN ORDER, +# # and it should see this new value and restart its statement: +# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(-1)' % locals() ) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = -1' % locals() ) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id) values(-2)' % locals() ) +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = -2' % locals() ) +# +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# # Insert ID value that is less than previous min(id). +# # Session-worker is executing its statement using PLAN ORDER, +# # and it should see this new value and restart its statement: +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(-3)' % locals() ) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = -3' % locals() ) +# +# con_lock_1.commit() +# con_lock_2.commit() +# +# # Here we wait for ISQL complete its mission: +# p_worker.wait() +# +# flush_and_close(f_worker_log) +# flush_and_close(f_worker_err) +# +# # Close lockers: +# ################ +# for c in (con_lock_1, con_lock_2): +# c.close() +# +# +# # CHECK RESULTS +# ############### +# with open(f_worker_log.name,'r') as f: +# for line in f: +# if line.strip(): +# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) +# +# for f in (f_init_err, f_worker_err): +# with open(f.name,'r') as g: +# for line in g: +# if line.strip(): +# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) +# +# +# # then engine <...> creates new statement-level snapshot and restart execution...") -# -# ::: NB ::: -# This test uses script %FBT_REPO% -# iles -# ead-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. -# Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual -# results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any -# outcome of test. -# -# ############### -# Following scenario if executed here (see also: "doc\\README.read_consistency.md"; hereafer is marked as "DOC"): -# -# * add new table that is child to test: TDETL (with FK that references TEST and 'on delete cascade' clause) -# * three rows are inserted into the table TEST, with IDs: 2, 3 and 5. -# -# * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): -# update set id=id where id = 5; -# -# * session 'worker' ("LONG" in TK article) has mission: -# merge into test t using(select * from test where id >= 3 order by id) s on t.id = s.id when matched then delete; -# // using TIL = read committed read consistency -# -# // Execution will have PLAN ORDER . -# // It will delete (first avaliable for cursor) row with ID = 3 but can not change row with ID = 5 because of locker-1. -# // Update conflict appears here and, because of this, worker temporary changes its TIL to RC no record_version (RC NRV). -# // [DOC]: "a) transaction isolation mode temporarily switched to the READ COMMITTED *NO RECORD VERSION MODE*" -# // This (new) TIL allows worker further to see all committed versions, regardless of its own snapshot. -# -# * session 'locker-2' ("FIRSTLAST" in TK article): replaces ID = 2 with new value = 4, then commits -# and locks this record again: -# (1) update test set id = 4 where id = 2; -# (2) commit; -# (3) update test set id=id where id = 4; -# // session-'worker' remains waiting at this point because row with ID = 5 is still occupied by by locker-1 -# // but worker must further see record with (new) id = 4 because its TIL was changed to RC NO RECORD_VERSION. -# -# * session 'locker-1': -# (1) commit; -# (2) insert into test(id) values(6); -# (3) insert into detl(id, pid) values(6001, 6); -# (4) commit; -# (5) update test set id=id where id=6; -# // first of these statements: '(1) commit' - will release record with ID = 5. -# // Worker sees this record (because of TIL = RC NRV) and put write-lock on it. -# // [DOC]: "b) engine put write lock on conflicted record" -# // Also, because worker TIL = RC NRV, it will see two new rows with ID = 4 and 6, and they meet worker cursor condition ("id>=3"). -# // Worker resumes search for any rows with ID >=3, and it does this with taking in account "ORDER BY ID ASC". -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // Worker starts to search records which must be involved in its DML and *found* sucn rows (with ID = 4 and 6). -# // NB. These rows currently can NOT be deleted by worker because of locker-2 and locker-1 have uncommitted updates. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# // :::!! NB, AGAIN !! ::: restart NOT occurs here because at least one records found, see: -# // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since -# // top-level statement execution starts and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# * session 'locker-2': -# commit; -# // This will release record with ID = 4 (but row with ID = 6 is still inaccessible because of locker-1). -# // Worker sees record (because of TIL = RC NRV) with ID = 4 and put write-lock on it. -# // Then worker resumes search for any (new) rows with ID >= 3, and it does this with taking in account required order -# // of its DML (i.e. ORDER BY ID ASC). -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // But there are no such rows in the tableL earlier worker already encountered all possible rows (with ID=4 and 6) -# // and *did* put write-locks on them. So at this point NO new rows can be found for putting new lock on it. -# // BECAUSE OF FACT THAT NO RECORDS FOUND, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES FIRST STATEMENT-LEVEL RESTART. -# // [DOC]: "d) when there is no more records to fetch, engine start to undo all actions ... and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# * session 'locker-1': -# commit; -# // This will release record with ID = 6 - and this is the last row which meet cursor condition of session-worker. -# // Worker sees record (because of TIL = RC NRV) with ID = 6 and put write-lock on it. -# // Then worker resumes search for any (new) rows with ID >= 3, and it does this with taking in account required order -# // of its DML (i.e. ORDER BY ID ASC). NO new rows (with ID >= 3) can be found for putting new lock on it. -# // BECAUSE OF FACT THAT NO RECORDS FOUND, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES SECOND STATEMENT-LEVEL RESTART. -# -# Expected result: -# * session-'worker' must *successfully* complete deletion of all rows which it could see at the starting point (ID=3 and 5) -# PLUS rows with ID = 4 (ex. ID=2) and 6 (this ID is new, it did not exist at the statement start). -# As result, all rows must be deleted. -# -# * three unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed DELETE statement: first of them -# was created by initial statement start and all others reflect two restarts (this column has values which are evaluated using -# rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). -# It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. -# -# NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! -# This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. -# ################ -# -# Checked on 4.0.0.2204 SS/CS -# NOTE: added for-loop in order to check different target objects: TABLE ('test') and VIEW ('v_test'), see 'checked_mode'. -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: + +""" +ID: transactions.read-consist-sttm-restart-on-merge-03 +TITLE: READ CONSISTENCY. Check creation of new statement-level snapshot and restarting changed caused by MERGE. Test-03. +DESCRIPTION: + Initial article for reading: + https://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:11504247549852 + Note on terms which are used there: "BLOCKER", "LONG" and "FIRSTLAST" - their names are slightly changed here + to: LOCKER-1, WORKER and LOCKER-2 respectively. + See also: doc/README.read_consistency.md + + ********************************************** + + This test verifies that statement-level snapshot and restart will be performed when "main" session ("worker") + performs DELETE statement and is involved in update conflicts. + ("When update conflict is detected <...> then engine <...> creates new statement-level snapshot and restart execution...") + + ::: NB ::: + This test uses script %FBT_REPO%/files/read-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. + Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual + results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any + outcome of test. + + ############### + Following scenario if executed here (see also: "doc/README.read_consistency.md"; hereafer is marked as "DOC"): + + * add new table that is child to test: TDETL (with FK that references TEST and 'on delete cascade' clause) + * three rows are inserted into the table TEST, with IDs: 2, 3 and 5. + + * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): + update set id=id where id = 5; + + * session 'worker' ("LONG" in TK article) has mission: + merge into test t using(select * from test where id >= 3 order by id) s on t.id = s.id when matched then delete; + // using TIL = read committed read consistency + + // Execution will have PLAN ORDER . + // It will delete (first avaliable for cursor) row with ID = 3 but can not change row with ID = 5 because of locker-1. + // Update conflict appears here and, because of this, worker temporary changes its TIL to RC no record_version (RC NRV). + // [DOC]: "a) transaction isolation mode temporarily switched to the READ COMMITTED *NO RECORD VERSION MODE*" + // This (new) TIL allows worker further to see all committed versions, regardless of its own snapshot. + + * session 'locker-2' ("FIRSTLAST" in TK article): replaces ID = 2 with new value = 4, then commits + and locks this record again: + (1) update test set id = 4 where id = 2; + (2) commit; + (3) update test set id=id where id = 4; + // session-'worker' remains waiting at this point because row with ID = 5 is still occupied by by locker-1 + // but worker must further see record with (new) id = 4 because its TIL was changed to RC NO RECORD_VERSION. + + * session 'locker-1': + (1) commit; + (2) insert into test(id) values(6); + (3) insert into detl(id, pid) values(6001, 6); + (4) commit; + (5) update test set id=id where id=6; + // first of these statements: '(1) commit' - will release record with ID = 5. + // Worker sees this record (because of TIL = RC NRV) and put write-lock on it. + // [DOC]: "b) engine put write lock on conflicted record" + // Also, because worker TIL = RC NRV, it will see two new rows with ID = 4 and 6, and they meet worker cursor condition ("id>=3"). + // Worker resumes search for any rows with ID >=3, and it does this with taking in account "ORDER BY ID ASC". + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // Worker starts to search records which must be involved in its DML and *found* sucn rows (with ID = 4 and 6). + // NB. These rows currently can NOT be deleted by worker because of locker-2 and locker-1 have uncommitted updates. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + // :::!! NB, AGAIN !! ::: restart NOT occurs here because at least one records found, see: + // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since + // top-level statement execution starts and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + * session 'locker-2': + commit; + // This will release record with ID = 4 (but row with ID = 6 is still inaccessible because of locker-1). + // Worker sees record (because of TIL = RC NRV) with ID = 4 and put write-lock on it. + // Then worker resumes search for any (new) rows with ID >= 3, and it does this with taking in account required order + // of its DML (i.e. ORDER BY ID ASC). + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // But there are no such rows in the tableL earlier worker already encountered all possible rows (with ID=4 and 6) + // and *did* put write-locks on them. So at this point NO new rows can be found for putting new lock on it. + // BECAUSE OF FACT THAT NO RECORDS FOUND, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES FIRST STATEMENT-LEVEL RESTART. + // [DOC]: "d) when there is no more records to fetch, engine start to undo all actions ... and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + * session 'locker-1': + commit; + // This will release record with ID = 6 - and this is the last row which meet cursor condition of session-worker. + // Worker sees record (because of TIL = RC NRV) with ID = 6 and put write-lock on it. + // Then worker resumes search for any (new) rows with ID >= 3, and it does this with taking in account required order + // of its DML (i.e. ORDER BY ID ASC). NO new rows (with ID >= 3) can be found for putting new lock on it. + // BECAUSE OF FACT THAT NO RECORDS FOUND, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES SECOND STATEMENT-LEVEL RESTART. + + Expected result: + * session-'worker' must *successfully* complete deletion of all rows which it could see at the starting point (ID=3 and 5) + PLUS rows with ID = 4 (ex. ID=2) and 6 (this ID is new, it did not exist at the statement start). + As result, all rows must be deleted. + + * three unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed DELETE statement: first of them + was created by initial statement start and all others reflect two restarts (this column has values which are evaluated using + rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). + It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. + + NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! + This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. + ################ + + Checked on 4.0.0.2204 SS/CS + NOTE: added for-loop in order to check different target objects: TABLE ('test') and VIEW ('v_test'), see 'checked_mode'. +FBTEST: functional.transactions.read_consist_sttm_restart_on_merge_03 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] +act = python_act('db', substitutions=[('=', ''), ('[ \t]+', ' ')]) -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import subprocess -# from subprocess import Popen -# import re -# import difflib -# from fdb import services -# import time -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') -# -# for checked_mode in('table', 'view'): -# -# target_obj = 'test' if checked_mode == 'table' else 'v_test' -# -# # drop dependencies: -# runProgram('isql', [ dsn, '-q' ], 'recreate table detl(id int);') -# -# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') -# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') -# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) -# flush_and_close(f_init_log) -# flush_and_close(f_init_err) -# -# sql_addi=''' -# recreate table detl(id int, PID int references test on delete cascade on update cascade); -# commit; -# -# delete from test; -# insert into test(id, x) values(2,2); -# insert into test(id, x) values(3,3); -# insert into test(id, x) values(5,5); -# insert into detl(id, pid) values(2000, 2); -# insert into detl(id, pid) values(2001, 2); -# insert into detl(id, pid) values(2002, 2); -# insert into detl(id, pid) values(3001, 3); -# insert into detl(id, pid) values(5001, 5); -# insert into detl(id, pid) values(5001, 5); -# commit; -# ''' -# runProgram('isql', [ dsn, '-q' ], sql_addi) -# -# locker_tpb = fdb.TPB() -# locker_tpb.lock_timeout = 3; # LOCKER_LOCK_TIMEOUT -# locker_tpb.lock_resolution = fdb.isc_tpb_wait -# -# con_lock_1 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) -# con_lock_2 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) -# -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# -# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id=5' % locals() ) -# -# sql_text=''' -# connect '%(dsn)s'; -# set list on; -# set autoddl off; -# set term ^; -# execute block returns (whoami varchar(30)) as -# begin -# whoami = 'WORKER'; -- , ATT#' || current_connection; -# rdb$set_context('USER_SESSION','WHO', whoami); -# -- suspend; -# end -# ^ -# set term ;^ -# commit; -# SET KEEP_TRAN_PARAMS ON; -# set transaction read committed read consistency; -# --select current_connection, current_transaction from rdb$database; -# set list off; -# set wng off; -# --set plan on; -# set count on; -# -# merge into %(target_obj)s t -- THIS MUST BE LOCKED -# using(select * from %(target_obj)s where id >= 3 order by id) s on t.id = s.id -# when matched then -# DELETE -# ; -# -# -- check results: -# -- ############### -# -# select id from %(target_obj)s order by id; -- this will produce output only after all lockers do their commit/rollback -# -# select v.old_id, v.op, v.snap_no_rank -# from v_worker_log v -# where v.op = 'del'; -# -# rollback; -# -# ''' % dict(globals(), **locals()) -# -# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_merge_03.sql'), 'w') -# f_worker_sql.write(sql_text) -# flush_and_close(f_worker_sql) -# -# -# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') -# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') -# -# ############################################################################ -# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### -# ############################################################################ -# -# p_worker = Popen( [ context['isql_path'], '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) -# time.sleep(1) -# -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# con_lock_2.execute_immediate( 'update %(target_obj)s set id=4 where id=2;' % locals() ) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id=4;' % locals() ) -# -# -# con_lock_1.commit() # release record with ID=5 (allow it to be deleted by session-worker) -# -# # Add record which did not exists when session-worker statement started. -# # Add also child record for it, then commit + re-lock just added record: -# con_lock_1.execute_immediate('insert into %(target_obj)s(id,x) values(6,6)' % locals()) -# con_lock_1.execute_immediate('insert into detl(id, pid) values(6001, 6)') -# con_lock_1.commit() -# con_lock_1.execute_immediate('update %(target_obj)s set id=id where id=6' % locals()) -# -# con_lock_2.commit() # release record with ID=4. At this point session-worker will be allowed to delete rows with ID=4 and 5. -# -# con_lock_1.commit() # release record with ID=6. It is the last record which also must be deleted by session-worker. -# -# # Here we wait for ISQL complete its mission: -# p_worker.wait() -# -# flush_and_close(f_worker_log) -# flush_and_close(f_worker_err) -# -# # Close lockers: -# ################ -# for c in (con_lock_1, con_lock_2): -# c.close() -# -# -# # CHECK RESULTS -# ############### -# with open(f_worker_log.name,'r') as f: -# for line in f: -# if line.strip(): -# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) -# -# for f in (f_init_err, f_worker_err): -# with open(f.name,'r') as g: -# for line in g: -# if line.strip(): -# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) -# -# #=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# +# import os +# import sys +# import subprocess +# from subprocess import Popen +# import re +# import difflib +# from fdb import services +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') +# +# for checked_mode in('table', 'view'): +# +# target_obj = 'test' if checked_mode == 'table' else 'v_test' +# +# # drop dependencies: +# runProgram('isql', [ dsn, '-q' ], 'recreate table detl(id int);') +# +# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') +# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') +# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) +# flush_and_close(f_init_log) +# flush_and_close(f_init_err) +# +# sql_addi=''' +# recreate table detl(id int, PID int references test on delete cascade on update cascade); +# commit; +# +# delete from test; +# insert into test(id, x) values(2,2); +# insert into test(id, x) values(3,3); +# insert into test(id, x) values(5,5); +# insert into detl(id, pid) values(2000, 2); +# insert into detl(id, pid) values(2001, 2); +# insert into detl(id, pid) values(2002, 2); +# insert into detl(id, pid) values(3001, 3); +# insert into detl(id, pid) values(5001, 5); +# insert into detl(id, pid) values(5001, 5); +# commit; +# ''' +# runProgram('isql', [ dsn, '-q' ], sql_addi) +# +# locker_tpb = fdb.TPB() +# locker_tpb.lock_timeout = 3; # LOCKER_LOCK_TIMEOUT +# locker_tpb.lock_resolution = fdb.isc_tpb_wait +# +# con_lock_1 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) +# con_lock_2 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) +# +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# +# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id=5' % locals() ) +# +# sql_text=''' +# connect '%(dsn)s'; +# set list on; +# set autoddl off; +# set term ^; +# execute block returns (whoami varchar(30)) as +# begin +# whoami = 'WORKER'; -- , ATT#' || current_connection; +# rdb$set_context('USER_SESSION','WHO', whoami); +# -- suspend; +# end +# ^ +# set term ;^ +# commit; +# SET KEEP_TRAN_PARAMS ON; +# set transaction read committed read consistency; +# --select current_connection, current_transaction from rdb$database; +# set list off; +# set wng off; +# --set plan on; +# set count on; +# +# merge into %(target_obj)s t -- THIS MUST BE LOCKED +# using(select * from %(target_obj)s where id >= 3 order by id) s on t.id = s.id +# when matched then +# DELETE +# ; +# +# -- check results: +# -- ############### +# +# select id from %(target_obj)s order by id; -- this will produce output only after all lockers do their commit/rollback +# +# select v.old_id, v.op, v.snap_no_rank +# from v_worker_log v +# where v.op = 'del'; +# +# rollback; +# +# ''' % dict(globals(), **locals()) +# +# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_merge_03.sql'), 'w') +# f_worker_sql.write(sql_text) +# flush_and_close(f_worker_sql) +# +# +# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') +# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') +# +# ############################################################################ +# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### +# ############################################################################ +# +# p_worker = Popen( [ context['isql_path'], '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) +# time.sleep(1) +# +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# con_lock_2.execute_immediate( 'update %(target_obj)s set id=4 where id=2;' % locals() ) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id=4;' % locals() ) +# +# +# con_lock_1.commit() # release record with ID=5 (allow it to be deleted by session-worker) +# +# # Add record which did not exists when session-worker statement started. +# # Add also child record for it, then commit + re-lock just added record: +# con_lock_1.execute_immediate('insert into %(target_obj)s(id,x) values(6,6)' % locals()) +# con_lock_1.execute_immediate('insert into detl(id, pid) values(6001, 6)') +# con_lock_1.commit() +# con_lock_1.execute_immediate('update %(target_obj)s set id=id where id=6' % locals()) +# +# con_lock_2.commit() # release record with ID=4. At this point session-worker will be allowed to delete rows with ID=4 and 5. +# +# con_lock_1.commit() # release record with ID=6. It is the last record which also must be deleted by session-worker. +# +# # Here we wait for ISQL complete its mission: +# p_worker.wait() +# +# flush_and_close(f_worker_log) +# flush_and_close(f_worker_err) +# +# # Close lockers: +# ################ +# for c in (con_lock_1, con_lock_2): +# c.close() +# +# +# # CHECK RESULTS +# ############### +# with open(f_worker_log.name,'r') as f: +# for line in f: +# if line.strip(): +# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) +# +# for f in (f_init_err, f_worker_err): +# with open(f.name,'r') as g: +# for line in g: +# if line.strip(): +# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) +# +# # then engine <...> creates new statement-level snapshot and restart execution...") -# -# ::: NB ::: -# This test uses script %FBT_REPO% -# iles -# ead-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. -# Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual -# results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any -# outcome of test. -# -# ############### -# Following scenario if executed here (see also: "doc\\README.read_consistency.md"; hereafer is marked as "DOC"): -# * five rows are inserted into the table TEST, with ID = 1...5 and x = 1...5. -# -# * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): -# update test set id = id where id = 1 -# -# * session 'worker' ("LONG" in TK article) has mission: -# update test where set id = -id where id <= 2 order by id DESC rows 4; // using TIL = read committed read consistency -# -# merge into test t -# using (select * from test where id <=2 order by id DESC rows 4) s on s.id=t.id -# when matched then -# update set t.id = -t.id -# when not matched then -# insert(id,x) values(1000 + s.id, 1000+ s.x); -# -# // Execution will have PLAN ORDER . -# // It will update rows starting with ID = 2 but can not change row with ID = 1 because of locker-1. -# // Update conflict appears here and, because of this, worker temporary changes its TIL to RC no record_version (RC NRV). -# // [DOC]: "a) transaction isolation mode temporarily switched to the READ COMMITTED *NO RECORD VERSION MODE*" -# // This (new) TIL allows worker further to see all committed versions, regardless of its own snapshot. -# -# * session 'locker-2' ("FIRSTLAST" in TK article): replaces ID = 5 with new value = -5, then commits -# and locks this record again: -# (1) commit; -# (2) update test set id = -5 where abs(id)=5; -# (3) commit; -# (4) update test set id = id where abs(id)=5; -# // session-'worker' remains waiting at this point because row with ID = 1 is still occupied by by locker-1. -# // but worker must further see record with (new) id = -5 because its TIL was changed to RC NO RECORD_VERSION. -# -# -# * session 'locker-1': replaces ID = 4 with new value = -4, then commits and locks this record again: -# (1) commit; -# (2) update test set id = -4 where abs(id)=4; -# (3) commit; -# (4) update test set id = id where abs(id)=4; -# -# // This: '(1) commit' - will release record with ID = 1. Worker sees this record and put write-lock on it. -# // [DOC]: "b) engine put write lock on conflicted record" -# // But it is only 2nd row of total 4 that worker must delete. -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Worker resumes search for any rows with ID < 2, and it does this with taking in account required order -# // of its DML (i.e. 'ORDER BY ID DESC ...') -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // Worker starts to search records which must be involved in its DML and *found* first sucn row with ID = -5. -# // NB. This row currently can NOT be deleted by worker because locker-2 has uncommitted update of it. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# // :::!! NB, AGAIN !! ::: restart NOT occurs here because at least one records found, see: -# // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since -# // top-level statement execution starts and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# * session 'locker-2': replaces ID = 3 with new value = -3, then commits and locks this record again: -# (1) commit; -# (2) update test set id = -3 where abs(id)=3; -# (3) commit; -# (4) update test set id = id where abs(id)=3; -# -# // This: '(1) commit' - will release record with ID = -5. Worker sees this record and put write-lock on it. -# // But this is only 3rd row of total 4 that worker must update. -# // Because of worker TIL = RC NRV, he must see all committed records regardless on its own snapshot. -# // Worker resumes search for any rows with ID < -5, and it does this with taking in account required order -# // of its DML (i.e. 'ORDER BY ID DESC ...') -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // There are no such rows in the table. -# // BECAUSE OF FACT THAT NO RECORDS FOUND, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES FIRST STATEMENT-LEVEL RESTART. -# // [DOC]: "d) when there is no more records to fetch, engine start to undo all actions ... and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# * session 'locker-1': -# commit; -# // This will release record with ID=-4. Worker sees this record and put write-lock on it. -# // At this point worker has proceeded all required number of rows for DML: 2, 1, -4 and -5. -# // BECAUSE OF FACT THAT ALL ROWS WERE PROCEEDED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES SECOND STATEMENT-LEVEL RESTART. -# // [DOC]: "d) when there is no more records to fetch, engine start to undo all actions ... and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# // After this restart worker will waiting for row with ID = -3 (it sees this because of TIL = RC NRV). -# -# * session 'locker-2': -# commit. -# // This releases row with ID=-3. Worker sees this record and put write-lock on it. -# // Records with ID = 2, 1, -4 and -5 already have been locked, but worker must update only FOUR rows (see its DML statement). -# // Thus only rows with ID = 2, 1, -3 and -4 will be updated. Record with ID = -5 must *remain* in the table. -# // At this point worker has proceeded all required rows that meet condition for DML: 2, 1, -3 and -4. -# // BECAUSE OF FACT THAT ALL ROWS WERE PROCEEDED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES THIRD STATEMENT-LEVEL RESTART. -# // [DOC]: "d) when there is no more records to fetch, engine start to undo all actions ... and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# Expected result: -# * session-'worker' must *successfully* complete changes of 4 rows (but only two of them did exist at the starting point). -# Record with ID = -5 must remain in the table. -# -# * four unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed UPDATE statement: first of them -# was created by initial statement start and all others reflect three restarts (this column has values which are evaluated using -# rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). -# It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. -# -# NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! -# This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. -# -# ################ -# -# Checked on 4.0.0.2204 -# NOTE: added for-loop in order to check different target objects: TABLE ('test') and VIEW ('v_test'), see 'checked_mode'. -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: + +""" +ID: transactions.read-consist-sttm-restart-on-merge-04 +TITLE: READ CONSISTENCY. Check creation of new statement-level snapshot and restarting changed caused by MERGE. Test-04. +DESCRIPTION: + Initial article for reading: + https://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:11504247549852 + Note on terms which are used there: "BLOCKER", "LONG" and "FIRSTLAST" - their names are slightly changed here + to: LOCKER-1, WORKER and LOCKER-2 respectively. + See also: doc/README.read_consistency.md + + ********************************************** + + This test verifies that statement-level snapshot and restart will be performed when "main" session ("worker") + performs MERGE statement and is involved in update conflicts. + ("When update conflict is detected <...> then engine <...> creates new statement-level snapshot and restart execution...") + + ::: NB ::: + This test uses script %FBT_REPO%/files/read-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. + Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual + results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any + outcome of test. + + ############### + Following scenario if executed here (see also: "doc/README.read_consistency.md"; hereafer is marked as "DOC"): + * five rows are inserted into the table TEST, with ID = 1...5 and x = 1...5. + + * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): + update test set id = id where id = 1 + + * session 'worker' ("LONG" in TK article) has mission: + update test where set id = -id where id <= 2 order by id DESC rows 4; // using TIL = read committed read consistency + + merge into test t + using (select * from test where id <=2 order by id DESC rows 4) s on s.id=t.id + when matched then + update set t.id = -t.id + when not matched then + insert(id,x) values(1000 + s.id, 1000+ s.x); + + // Execution will have PLAN ORDER . + // It will update rows starting with ID = 2 but can not change row with ID = 1 because of locker-1. + // Update conflict appears here and, because of this, worker temporary changes its TIL to RC no record_version (RC NRV). + // [DOC]: "a) transaction isolation mode temporarily switched to the READ COMMITTED *NO RECORD VERSION MODE*" + // This (new) TIL allows worker further to see all committed versions, regardless of its own snapshot. + + * session 'locker-2' ("FIRSTLAST" in TK article): replaces ID = 5 with new value = -5, then commits + and locks this record again: + (1) commit; + (2) update test set id = -5 where abs(id)=5; + (3) commit; + (4) update test set id = id where abs(id)=5; + // session-'worker' remains waiting at this point because row with ID = 1 is still occupied by by locker-1. + // but worker must further see record with (new) id = -5 because its TIL was changed to RC NO RECORD_VERSION. + + + * session 'locker-1': replaces ID = 4 with new value = -4, then commits and locks this record again: + (1) commit; + (2) update test set id = -4 where abs(id)=4; + (3) commit; + (4) update test set id = id where abs(id)=4; + + // This: '(1) commit' - will release record with ID = 1. Worker sees this record and put write-lock on it. + // [DOC]: "b) engine put write lock on conflicted record" + // But it is only 2nd row of total 4 that worker must delete. + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Worker resumes search for any rows with ID < 2, and it does this with taking in account required order + // of its DML (i.e. 'ORDER BY ID DESC ...') + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // Worker starts to search records which must be involved in its DML and *found* first sucn row with ID = -5. + // NB. This row currently can NOT be deleted by worker because locker-2 has uncommitted update of it. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + // :::!! NB, AGAIN !! ::: restart NOT occurs here because at least one records found, see: + // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since + // top-level statement execution starts and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + * session 'locker-2': replaces ID = 3 with new value = -3, then commits and locks this record again: + (1) commit; + (2) update test set id = -3 where abs(id)=3; + (3) commit; + (4) update test set id = id where abs(id)=3; + + // This: '(1) commit' - will release record with ID = -5. Worker sees this record and put write-lock on it. + // But this is only 3rd row of total 4 that worker must update. + // Because of worker TIL = RC NRV, he must see all committed records regardless on its own snapshot. + // Worker resumes search for any rows with ID < -5, and it does this with taking in account required order + // of its DML (i.e. 'ORDER BY ID DESC ...') + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // There are no such rows in the table. + // BECAUSE OF FACT THAT NO RECORDS FOUND, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES FIRST STATEMENT-LEVEL RESTART. + // [DOC]: "d) when there is no more records to fetch, engine start to undo all actions ... and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + * session 'locker-1': + commit; + // This will release record with ID=-4. Worker sees this record and put write-lock on it. + // At this point worker has proceeded all required number of rows for DML: 2, 1, -4 and -5. + // BECAUSE OF FACT THAT ALL ROWS WERE PROCEEDED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES SECOND STATEMENT-LEVEL RESTART. + // [DOC]: "d) when there is no more records to fetch, engine start to undo all actions ... and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + // After this restart worker will waiting for row with ID = -3 (it sees this because of TIL = RC NRV). + + * session 'locker-2': + commit. + // This releases row with ID=-3. Worker sees this record and put write-lock on it. + // Records with ID = 2, 1, -4 and -5 already have been locked, but worker must update only FOUR rows (see its DML statement). + // Thus only rows with ID = 2, 1, -3 and -4 will be updated. Record with ID = -5 must *remain* in the table. + // At this point worker has proceeded all required rows that meet condition for DML: 2, 1, -3 and -4. + // BECAUSE OF FACT THAT ALL ROWS WERE PROCEEDED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES THIRD STATEMENT-LEVEL RESTART. + // [DOC]: "d) when there is no more records to fetch, engine start to undo all actions ... and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + Expected result: + * session-'worker' must *successfully* complete changes of 4 rows (but only two of them did exist at the starting point). + Record with ID = -5 must remain in the table. + + * four unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed UPDATE statement: first of them + was created by initial statement start and all others reflect three restarts (this column has values which are evaluated using + rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). + It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. + + NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! + This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. + + ################ + + Checked on 4.0.0.2204 + NOTE: added for-loop in order to check different target objects: TABLE ('test') and VIEW ('v_test'), see 'checked_mode'. +FBTEST: functional.transactions.read_consist_sttm_restart_on_merge_04 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] +act = python_act('db', substitutions=[('=', ''), ('[ \t]+', ' ')]) -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import subprocess -# from subprocess import Popen -# import shutil -# from fdb import services -# import time -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# # How long can we wait for session-worker completition, seconds -# # (ISQL often can not complete its job for several seconds!): -# MAX_TIME_FOR_WAITING_WORKER_FINISH = 60 -# -# ############################## -# # Temply, for debug obly: -# this_fdb=db_conn.database_name -# this_dbg=os.path.splitext(this_fdb)[0] + '.4debug.fdb' -# ############################## -# -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') -# -# for checked_mode in('table', 'view'): -# -# target_obj = 'test' if checked_mode == 'table' else 'v_test' -# -# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') -# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') -# -# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) -# flush_and_close(f_init_log) -# flush_and_close(f_init_err) -# -# sql_addi=''' -# set term ^; -# execute block as -# begin -# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); -# end -# ^ -# set term ;^ -# insert into %(target_obj)s(id, x) -# select row_number()over(),row_number()over() -# from rdb$types rows 5; -# commit; -# ''' % locals() -# runProgram('isql', [ dsn, '-q' ], sql_addi) -# -# locker_tpb = fdb.TPB() -# locker_tpb.lock_timeout = MAX_TIME_FOR_WAITING_WORKER_FINISH -# locker_tpb.lock_resolution = fdb.isc_tpb_wait -# -# con_lock_1 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) -# con_lock_2 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) -# -# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) -# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# -# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id=1' % locals() ) -# -# sql_text=''' -# connect '%(dsn)s'; -# set list on; -# set autoddl off; -# set term ^; -# execute block returns (whoami varchar(30)) as -# begin -# whoami = 'WORKER'; -- , ATT#' || current_connection; -# rdb$set_context('USER_SESSION','WHO', whoami); -# -- suspend; -# end -# ^ -# set term ;^ -# commit; -# SET KEEP_TRAN_PARAMS ON; -# set transaction read committed read consistency; -# --select current_connection, current_transaction from rdb$database; -# set list off; -# set wng off; -# -# --set plan on; -# set count on; -# -# merge into %(target_obj)s t -- THIS MUST HANG BECAUSE OF LOCKERs -# using (select * from %(target_obj)s where id <=2 order by id DESC rows 4) s on s.id=t.id -# when matched then -# update set t.id = -t.id -# when not matched then -# insert(id,x) values(1000 + s.id, 1000 + s.x); -# -# -- check results: -# -- ############### -# -# select id from %(target_obj)s order by id; -- one record must remain, with ID = -5 -# -# select v.old_id, v.op, v.snap_no_rank -- snap_no_rank must have four unique values: 1,2,3 and 4. -# from v_worker_log v -# where v.op = 'upd'; -# -# --set width who 10; -# -- DO NOT check this! Values can differ here from one run to another! -# -- select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; -# rollback; -# -# ''' % dict(globals(), **locals()) -# -# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_delete_04.sql'), 'w') -# f_worker_sql.write(sql_text) -# flush_and_close(f_worker_sql) -# -# -# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') -# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') -# -# ############################################################################ -# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### -# ############################################################################ -# -# p_worker = Popen( [ context['isql_path'], '-pag', '9999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) -# time.sleep(1) -# -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# -# # Change ID so that it **will* be included in the set of rows that must be affected by session-worker: -# con_lock_2.execute_immediate( 'update %(target_obj)s set id = -5 where abs(id) = 5;' % locals() ) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'update %(target_obj)s set id = id where abs(id) = 5;' % locals() ) -# -# -# con_lock_1.commit() # releases record with ID=1 (allow it to be deleted by session-worker) -# -# # Change ID so that it **will* be included in the set of rows that must be affected by session-worker: -# con_lock_1.execute_immediate( 'update %(target_obj)s set id = -4 where abs(id) = 4;' % locals() ) -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'update %(target_obj)s set id = id where abs(id) = 4;' % locals() ) -# -# -# con_lock_2.commit() # releases record with ID = -5, but session-worker is waiting for record with ID = -4 (that was changed by locker-1). -# con_lock_2.execute_immediate( 'update %(target_obj)s set id = -3 where abs(id) = 3;' % locals() ) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'update %(target_obj)s set id = id where abs(id) = 3;' % locals() ) -# -# con_lock_1.commit() # This releases row with ID=-4 but session-worker is waiting for ID = - 3 (changed by locker-2). -# con_lock_2.commit() # This releases row with ID=-3. No more locked rows so session-worker can finish its mission. -# -# # Here we wait for ISQL complete its mission: -# p_worker.wait() -# -# flush_and_close(f_worker_log) -# flush_and_close(f_worker_err) -# -# # Close lockers: -# ################ -# for c in (con_lock_1, con_lock_2): -# c.close() -# -# -# # CHECK RESULTS -# ############### -# with open(f_worker_log.name,'r') as f: -# for line in f: -# if line.strip(): -# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) -# -# for f in (f_init_err, f_worker_err): -# with open(f.name,'r') as g: -# for line in g: -# if line.strip(): -# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) -# -# #=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# +# import os +# import sys +# import subprocess +# from subprocess import Popen +# import shutil +# from fdb import services +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# # How long can we wait for session-worker completition, seconds +# # (ISQL often can not complete its job for several seconds!): +# MAX_TIME_FOR_WAITING_WORKER_FINISH = 60 +# +# ############################## +# # Temply, for debug obly: +# this_fdb=db_conn.database_name +# this_dbg=os.path.splitext(this_fdb)[0] + '.4debug.fdb' +# ############################## +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') +# +# for checked_mode in('table', 'view'): +# +# target_obj = 'test' if checked_mode == 'table' else 'v_test' +# +# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') +# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') +# +# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) +# flush_and_close(f_init_log) +# flush_and_close(f_init_err) +# +# sql_addi=''' +# set term ^; +# execute block as +# begin +# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); +# end +# ^ +# set term ;^ +# insert into %(target_obj)s(id, x) +# select row_number()over(),row_number()over() +# from rdb$types rows 5; +# commit; +# ''' % locals() +# runProgram('isql', [ dsn, '-q' ], sql_addi) +# +# locker_tpb = fdb.TPB() +# locker_tpb.lock_timeout = MAX_TIME_FOR_WAITING_WORKER_FINISH +# locker_tpb.lock_resolution = fdb.isc_tpb_wait +# +# con_lock_1 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) +# con_lock_2 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) +# +# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) +# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# +# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id=1' % locals() ) +# +# sql_text=''' +# connect '%(dsn)s'; +# set list on; +# set autoddl off; +# set term ^; +# execute block returns (whoami varchar(30)) as +# begin +# whoami = 'WORKER'; -- , ATT#' || current_connection; +# rdb$set_context('USER_SESSION','WHO', whoami); +# -- suspend; +# end +# ^ +# set term ;^ +# commit; +# SET KEEP_TRAN_PARAMS ON; +# set transaction read committed read consistency; +# --select current_connection, current_transaction from rdb$database; +# set list off; +# set wng off; +# +# --set plan on; +# set count on; +# +# merge into %(target_obj)s t -- THIS MUST HANG BECAUSE OF LOCKERs +# using (select * from %(target_obj)s where id <=2 order by id DESC rows 4) s on s.id=t.id +# when matched then +# update set t.id = -t.id +# when not matched then +# insert(id,x) values(1000 + s.id, 1000 + s.x); +# +# -- check results: +# -- ############### +# +# select id from %(target_obj)s order by id; -- one record must remain, with ID = -5 +# +# select v.old_id, v.op, v.snap_no_rank -- snap_no_rank must have four unique values: 1,2,3 and 4. +# from v_worker_log v +# where v.op = 'upd'; +# +# --set width who 10; +# -- DO NOT check this! Values can differ here from one run to another! +# -- select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; +# rollback; +# +# ''' % dict(globals(), **locals()) +# +# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_delete_04.sql'), 'w') +# f_worker_sql.write(sql_text) +# flush_and_close(f_worker_sql) +# +# +# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') +# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') +# +# ############################################################################ +# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### +# ############################################################################ +# +# p_worker = Popen( [ context['isql_path'], '-pag', '9999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) +# time.sleep(1) +# +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# +# # Change ID so that it **will* be included in the set of rows that must be affected by session-worker: +# con_lock_2.execute_immediate( 'update %(target_obj)s set id = -5 where abs(id) = 5;' % locals() ) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'update %(target_obj)s set id = id where abs(id) = 5;' % locals() ) +# +# +# con_lock_1.commit() # releases record with ID=1 (allow it to be deleted by session-worker) +# +# # Change ID so that it **will* be included in the set of rows that must be affected by session-worker: +# con_lock_1.execute_immediate( 'update %(target_obj)s set id = -4 where abs(id) = 4;' % locals() ) +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'update %(target_obj)s set id = id where abs(id) = 4;' % locals() ) +# +# +# con_lock_2.commit() # releases record with ID = -5, but session-worker is waiting for record with ID = -4 (that was changed by locker-1). +# con_lock_2.execute_immediate( 'update %(target_obj)s set id = -3 where abs(id) = 3;' % locals() ) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'update %(target_obj)s set id = id where abs(id) = 3;' % locals() ) +# +# con_lock_1.commit() # This releases row with ID=-4 but session-worker is waiting for ID = - 3 (changed by locker-2). +# con_lock_2.commit() # This releases row with ID=-3. No more locked rows so session-worker can finish its mission. +# +# # Here we wait for ISQL complete its mission: +# p_worker.wait() +# +# flush_and_close(f_worker_log) +# flush_and_close(f_worker_err) +# +# # Close lockers: +# ################ +# for c in (con_lock_1, con_lock_2): +# c.close() +# +# +# # CHECK RESULTS +# ############### +# with open(f_worker_log.name,'r') as f: +# for line in f: +# if line.strip(): +# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) +# +# for f in (f_init_err, f_worker_err): +# with open(f.name,'r') as g: +# for line in g: +# if line.strip(): +# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) +# +# # then engine <...> creates new statement-level snapshot and restart execution...") -# -# ::: NB ::: -# This test uses script %FBT_REPO% -# iles -# ead-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. -# Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual -# results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any -# outcome of test. -# -# ############### -# Following scenario if executed here (see also: "doc\\README.read_consistency.md"; hereafer is marked as "DOC"): -# -# * five rows are inserted into the table TEST, with IDs: 1,2,3,4,5 -# * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): -# update test set id = id where id = 5; -# -# * session 'worker' ("LONG" in TK article) has mission: -# update test set id = -id order by id // using TIL = read committed read consistency -# -# // Execution will have PLAN ORDER . -# // Worker starts with updating rows with ID = 1...4 but can not change row with ID = 5 because of locker-1. -# // Because of detecting update conflist, worker changes here its TIL to RC NO RECORD_VERSION. -# -# * session 'locker-2' ("FIRSTLAST" in TK article): -# (1) insert into test(id) values(-11); -# (2) commit; -# (3) update test set id=id where id = -11; -# // session-'worker' remains waiting at this point because row with ID = 5 is still occupied by by locker-1. -# // Record with (new) id = -11 will be seen further because worker's TIL was changed to RC NO RECORD_VERSION. -# -# * session 'locker-1': -# (1) commit; -# (2) insert into test(id) values(-12); -# (3) commit; -# (4) update test set id=id where id = -12; -# -# // This: '(1) commit' - will release record with ID = 5. Worker sees this record and put write-lock on it. -# // [DOC]: "b) engine put write lock on conflicted record" -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Worker resumes search for rows that must be updated with taking in account required order of its DML (i.e. 'ORDER BY ID'). -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // New record which is involved in DML (and did not exist before) *will be found*, its ID = -11. -# // Worker stops on this record (with ID = -11) because id is occupied by locker-2. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since -# // top-level statement execution starts and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# * session 'locker-2': -# (1) commit; -# (2) insert into test(id) values(-13); -# (3) commit; -# (4) update test set id=id where id = -13; -# -# // This: '(1) commit' - will release record with ID = -11. Worker sees this record and put write-lock on it. -# // [DOC]: "b) engine put write lock on conflicted record" -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Worker resumes search for rows that must be updated with taking in account required order of its DML (i.e. 'ORDER BY ID'). -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // New record which is involved in DML (and did not exist before) *will be found*, its ID = -12. -# // Worker stops on this record (with ID = -12) because id is occupied by locker-1. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since -# // top-level statement execution starts and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# -# * session 'locker-1': -# commit; -# // This commit will release record with ID = -12. Worker sees this record and put write-lock on it. -# // Worker resumes search for rows that must be updated with taking in account required order of its DML (i.e. 'ORDER BY ID'). -# // New record which is involved in DML (and did not exist before) *will be found*, its ID = -13. -# // Worker stops on this record (with ID = -13) because id is occupied by locker-2. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# -# * session 'locker-2': -# commit; -# // This releases row with ID = -13. Worker sees this record and put write-lock on it. -# // At this point there are no more records to be locked (by worker) that meet cursor condition: worker did put -# // write locks on all rows that meet its cursor conditions. -# // BECAUSE OF FACT THAT NO MORE RECORDS FOUND TO BE LOCKED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN -# // MAKES FIRST STATEMENT-LEVEL RESTART. This restart is also the last in this test. -# -# -# Expected result: -# * session-'worker' must update of all rows with reverting signs of their IDs. Records which were inserted must have positive IDs. -# -# * Two unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed UPDATE statement: first of them -# was created by initial statement start and second reflects SINGLE restart (this column has values which are evaluated using -# rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). -# It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. -# -# NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! -# This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. -# -# ################ -# -# Checked on 4.0.0.2195 -# 24.09.2020: added for-loop in order to check different target objects: TABLE ('test') and VIEW ('v_test'), see 'checked_mode'. -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: + +""" +ID: transactions.read-consist-sttm-restart-on-update-01 +TITLE: READ CONSISTENCY. Check creation of new statement-level snapshot and restarting changed caused by UPDATE. Test-01. +DESCRIPTION: + Initial article for reading: + https://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:11504247549852 + Note on terms which are used there: "BLOCKER", "LONG" and "FIRSTLAST" - their names are slightly changed here + to: LOCKER-1, WORKER and LOCKER-2 respectively. + + ********************************************** + + This test verifies that statement-level snapshot and restart will be performed when "main" session ("worker") + performs UPDATE statement and is involved in update conflicts. + ("When update conflict is detected <...> then engine <...> creates new statement-level snapshot and restart execution...") + + ::: NB ::: + This test uses script %FBT_REPO%/files/read-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. + Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual + results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any + outcome of test. + + ############### + Following scenario if executed here (see also: "doc/README.read_consistency.md"; hereafer is marked as "DOC"): + + * five rows are inserted into the table TEST, with IDs: 1,2,3,4,5 + * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): + update test set id = id where id = 5; + + * session 'worker' ("LONG" in TK article) has mission: + update test set id = -id order by id // using TIL = read committed read consistency + + // Execution will have PLAN ORDER . + // Worker starts with updating rows with ID = 1...4 but can not change row with ID = 5 because of locker-1. + // Because of detecting update conflist, worker changes here its TIL to RC NO RECORD_VERSION. + + * session 'locker-2' ("FIRSTLAST" in TK article): + (1) insert into test(id) values(-11); + (2) commit; + (3) update test set id=id where id = -11; + // session-'worker' remains waiting at this point because row with ID = 5 is still occupied by by locker-1. + // Record with (new) id = -11 will be seen further because worker's TIL was changed to RC NO RECORD_VERSION. + + * session 'locker-1': + (1) commit; + (2) insert into test(id) values(-12); + (3) commit; + (4) update test set id=id where id = -12; + + // This: '(1) commit' - will release record with ID = 5. Worker sees this record and put write-lock on it. + // [DOC]: "b) engine put write lock on conflicted record" + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Worker resumes search for rows that must be updated with taking in account required order of its DML (i.e. 'ORDER BY ID'). + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // New record which is involved in DML (and did not exist before) *will be found*, its ID = -11. + // Worker stops on this record (with ID = -11) because id is occupied by locker-2. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since + // top-level statement execution starts and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + * session 'locker-2': + (1) commit; + (2) insert into test(id) values(-13); + (3) commit; + (4) update test set id=id where id = -13; + + // This: '(1) commit' - will release record with ID = -11. Worker sees this record and put write-lock on it. + // [DOC]: "b) engine put write lock on conflicted record" + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Worker resumes search for rows that must be updated with taking in account required order of its DML (i.e. 'ORDER BY ID'). + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // New record which is involved in DML (and did not exist before) *will be found*, its ID = -12. + // Worker stops on this record (with ID = -12) because id is occupied by locker-1. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since + // top-level statement execution starts and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + + * session 'locker-1': + commit; + // This commit will release record with ID = -12. Worker sees this record and put write-lock on it. + // Worker resumes search for rows that must be updated with taking in account required order of its DML (i.e. 'ORDER BY ID'). + // New record which is involved in DML (and did not exist before) *will be found*, its ID = -13. + // Worker stops on this record (with ID = -13) because id is occupied by locker-2. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + + * session 'locker-2': + commit; + // This releases row with ID = -13. Worker sees this record and put write-lock on it. + // At this point there are no more records to be locked (by worker) that meet cursor condition: worker did put + // write locks on all rows that meet its cursor conditions. + // BECAUSE OF FACT THAT NO MORE RECORDS FOUND TO BE LOCKED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN + // MAKES FIRST STATEMENT-LEVEL RESTART. This restart is also the last in this test. + + + Expected result: + * session-'worker' must update of all rows with reverting signs of their IDs. Records which were inserted must have positive IDs. + + * Two unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed UPDATE statement: first of them + was created by initial statement start and second reflects SINGLE restart (this column has values which are evaluated using + rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). + It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. + + NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! + This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. + + ################ + + Checked on 4.0.0.2195 + 24.09.2020: added for-loop in order to check different target objects: TABLE ('test') and VIEW ('v_test'), see 'checked_mode'. +FBTEST: functional.transactions.read_consist_sttm_restart_on_update_01 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] +act = python_act('db', substitutions=[('=', ''), ('[ \t]+', ' ')]) -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import subprocess -# from subprocess import Popen -# from fdb import services -# import time -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') -# -# for checked_mode in('table', 'view'): -# -# target_obj = 'test' if checked_mode == 'table' else 'v_test' -# -# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') -# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') -# -# # RECREATION OF ALL DB OBJECTS: -# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) -# flush_and_close(f_init_log) -# flush_and_close(f_init_err) -# -# # add rows with ID = 1, 2, ..., 5: -# sql_addi=''' -# set term ^; -# execute block as -# begin -# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); -# end -# ^ -# set term ;^ -# insert into %(target_obj)s(id, x) -# select row_number()over(),row_number()over() -# from rdb$types rows 5; -# commit; -# ''' % locals() -# runProgram('isql', [ dsn, '-q' ], sql_addi) -# -# con_lock_1 = fdb.connect( dsn = dsn ) -# con_lock_2 = fdb.connect( dsn = dsn ) -# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) -# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) -# -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# -# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = 5' % locals()) -# -# sql_text=''' -# connect '%(dsn)s'; -# set list on; -# set autoddl off; -# set term ^; -# execute block returns (whoami varchar(30)) as -# begin -# whoami = 'WORKER'; -- , ATT#' || current_connection; -# rdb$set_context('USER_SESSION','WHO', whoami); -# -- suspend; -# end -# ^ -# set term ;^ -# commit; -# --set echo on; -# SET KEEP_TRAN_PARAMS ON; -# set transaction read committed read consistency; -# --select current_connection, current_transaction from rdb$database; -# set list off; -# set wng off; -# --set plan on; -# set count on; -# -# update %(target_obj)s set id = -id order by id; -- THIS MUST BE LOCKED -# -# -- check results: -# -- ############### -# -# select id from %(target_obj)s order by id; -- this will produce output only after all lockers do their commit/rollback -# -# select v.old_id, v.op, v.snap_no_rank -# from v_worker_log v -# where v.op = 'upd'; -# -# set width who 10; -# -- DO NOT check this! Values can differ here from one run to another! -# -- select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; -# -# rollback; -# -# ''' % dict(globals(), **locals()) -# -# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_update_01.sql'), 'w') -# f_worker_sql.write(sql_text) -# flush_and_close(f_worker_sql) -# -# -# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') -# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') -# -# ############################################################################ -# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### -# ############################################################################ -# -# p_worker = Popen( [ context['isql_path'], '-pag', '999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) -# time.sleep(1) -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(-11)' % locals()) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = -11' % locals()) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id) values(-12)' % locals() ) -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = -12' % locals() ) -# -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(-13)' % locals() ) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = -13' % locals() ) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# con_lock_1.commit() -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# con_lock_2.commit() # WORKER will complete his job after this -# -# -# # Here we wait for ISQL complete its mission: -# p_worker.wait() -# -# flush_and_close(f_worker_log) -# flush_and_close(f_worker_err) -# -# # Close lockers: -# ################ -# for c in (con_lock_1, con_lock_2): -# c.close() -# -# -# # CHECK RESULTS -# ############### -# with open(f_worker_log.name,'r') as f: -# for line in f: -# if line.strip(): -# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) -# -# for f in (f_init_err, f_worker_err): -# with open(f.name,'r') as g: -# for line in g: -# if line.strip(): -# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) -# -# #=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# +# import os +# import sys +# import subprocess +# from subprocess import Popen +# from fdb import services +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') +# +# for checked_mode in('table', 'view'): +# +# target_obj = 'test' if checked_mode == 'table' else 'v_test' +# +# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') +# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') +# +# # RECREATION OF ALL DB OBJECTS: +# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) +# flush_and_close(f_init_log) +# flush_and_close(f_init_err) +# +# # add rows with ID = 1, 2, ..., 5: +# sql_addi=''' +# set term ^; +# execute block as +# begin +# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); +# end +# ^ +# set term ;^ +# insert into %(target_obj)s(id, x) +# select row_number()over(),row_number()over() +# from rdb$types rows 5; +# commit; +# ''' % locals() +# runProgram('isql', [ dsn, '-q' ], sql_addi) +# +# con_lock_1 = fdb.connect( dsn = dsn ) +# con_lock_2 = fdb.connect( dsn = dsn ) +# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) +# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) +# +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# +# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = 5' % locals()) +# +# sql_text=''' +# connect '%(dsn)s'; +# set list on; +# set autoddl off; +# set term ^; +# execute block returns (whoami varchar(30)) as +# begin +# whoami = 'WORKER'; -- , ATT#' || current_connection; +# rdb$set_context('USER_SESSION','WHO', whoami); +# -- suspend; +# end +# ^ +# set term ;^ +# commit; +# --set echo on; +# SET KEEP_TRAN_PARAMS ON; +# set transaction read committed read consistency; +# --select current_connection, current_transaction from rdb$database; +# set list off; +# set wng off; +# --set plan on; +# set count on; +# +# update %(target_obj)s set id = -id order by id; -- THIS MUST BE LOCKED +# +# -- check results: +# -- ############### +# +# select id from %(target_obj)s order by id; -- this will produce output only after all lockers do their commit/rollback +# +# select v.old_id, v.op, v.snap_no_rank +# from v_worker_log v +# where v.op = 'upd'; +# +# set width who 10; +# -- DO NOT check this! Values can differ here from one run to another! +# -- select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; +# +# rollback; +# +# ''' % dict(globals(), **locals()) +# +# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_update_01.sql'), 'w') +# f_worker_sql.write(sql_text) +# flush_and_close(f_worker_sql) +# +# +# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') +# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') +# +# ############################################################################ +# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### +# ############################################################################ +# +# p_worker = Popen( [ context['isql_path'], '-pag', '999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) +# time.sleep(1) +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(-11)' % locals()) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = -11' % locals()) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id) values(-12)' % locals() ) +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = -12' % locals() ) +# +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(-13)' % locals() ) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = -13' % locals() ) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# con_lock_1.commit() +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# con_lock_2.commit() # WORKER will complete his job after this +# +# +# # Here we wait for ISQL complete its mission: +# p_worker.wait() +# +# flush_and_close(f_worker_log) +# flush_and_close(f_worker_err) +# +# # Close lockers: +# ################ +# for c in (con_lock_1, con_lock_2): +# c.close() +# +# +# # CHECK RESULTS +# ############### +# with open(f_worker_log.name,'r') as f: +# for line in f: +# if line.strip(): +# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) +# +# for f in (f_init_err, f_worker_err): +# with open(f.name,'r') as g: +# for line in g: +# if line.strip(): +# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) +# +# # then engine <...> creates new statement-level snapshot and restart execution...") -# -# ::: NB ::: -# This test uses script %FBT_REPO% -# iles -# ead-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. -# Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual -# results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any -# outcome of test. -# -# ############### -# Following scenario if executed here (see also: "doc\\README.read_consistency.md"; hereafer is marked as "DOC"): -# -# * five rows are inserted into the table TEST, with IDs: 1,2,3,4,5 -# * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): -# delete from test where id = 5; -# -# * session 'worker' ("LONG" in TK article) has mission: -# update test set id = -id where exists(select * from test where id < 0 or id = 5) order by id; // using TIL = read committed read consistency -# -# // Execution will have PLAN ORDER . -# // Worker starts with updating rows with ID = 1...4 but can not change row with ID = 5 because of locker-1. -# // Because of detecting update conflist, worker changes here its TIL to RC NO RECORD_VERSION. -# -# * session 'locker-2' ("FIRSTLAST" in TK article): -# (1) insert into test(id) values(-11); -# (2) commit; -# (3) delete from test where id = -11; -# -# // session-'worker' remains waiting at this point because row with ID = 5 is still occupied by by locker-1. -# // Record with (new) id = -11 will be seen further because worker's TIL was changed to RC NO RECORD_VERSION. -# -# * session 'locker-1': -# (1) commit; -# (2) insert into test(id) values(-12); -# (3) commit; -# (4) delete from test where id = -12; -# -# // This: '(1) commit' - removes record with id = 5. -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Thus worker sees record with id = -11 (which is locked now by locker-2) and puts write-lock on it. -# // [DOC]: "b) engine put write lock on conflicted record" -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since -# // top-level statement execution starts and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# * session 'locker-2': -# (1) commit; -# (2) insert into test(id) values(-13); -# (3) commit; -# (4) update test set id=id where id = -13; -# -# // This: '(1) commit' - removes record with id = -11. -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Thus worker sees record with id = -12 (which is locked now by locker-1) and puts write-lock on it. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# -# * session 'locker-1': -# (1) commit; -# (2) insert into test(id) values(-14); -# (3) commit; -# (4) update test set id=id where id = -14; -# -# // This: '(1) commit' - removes record with id = -12. -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Thus worker sees record with id = -13 (which is locked now by locker-2) and puts write-lock on it. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# -# * session 'locker-2': -# (1) commit; -# -# // This removes record with id = -13. -# // Worker still waits for record with id = -14 which is occupied by locker-1. -# -# * session 'locker-1': -# (1) commit; -# -# // This removes record with id = -14. -# // At this point there are no more records to be locked (by worker) that meet cursor condition: worker did put -# // write locks on all rows that meet its cursor conditions. -# // BECAUSE OF FACT THAT NO MORE RECORDS FOUND TO BE LOCKED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN -# // MAKES FIRST STATEMENT-LEVEL RESTART. This restart is also the last in this test. -# -# -# Expected result: -# * session-'worker' must update of only rows with ID = 1...4 (reverting sign of IDs value). -# -# * Two unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed UPDATE statement: first of them -# was created by initial statement start and second reflects SINGLE restart (this column has values which are evaluated using -# rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). -# It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. -# -# NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! -# This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. -# -# ################ -# -# Checked on 4.0.0.2195 -# 26.09.2020: added for-loop in order to check different target objects: TABLE ('test') and VIEW ('v_test'), see 'checked_mode'. -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: + +""" +ID: transactions.read-consist-sttm-restart-on-update-02 +TITLE: READ CONSISTENCY. Check creation of new statement-level snapshot and restarting changed caused by UPDATE. Test-02. +DESCRIPTION: + Initial article for reading: + https://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:11504247549852 + Note on terms which are used there: "BLOCKER", "LONG" and "FIRSTLAST" - their names are slightly changed here + to: LOCKER-1, WORKER and LOCKER-2 respectively. + + ********************************************** + + This test verifies that statement-level snapshot and restart will be performed when "main" session ("worker") + performs UPDATE statement and is involved in update conflicts. + ("When update conflict is detected <...> then engine <...> creates new statement-level snapshot and restart execution...") + + ::: NB ::: + This test uses script %FBT_REPO%/files/read-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. + Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual + results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any + outcome of test. + + ############### + Following scenario if executed here (see also: "doc/README.read_consistency.md"; hereafer is marked as "DOC"): + + * five rows are inserted into the table TEST, with IDs: 1,2,3,4,5 + * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): + delete from test where id = 5; + + * session 'worker' ("LONG" in TK article) has mission: + update test set id = -id where exists(select * from test where id < 0 or id = 5) order by id; // using TIL = read committed read consistency + + // Execution will have PLAN ORDER . + // Worker starts with updating rows with ID = 1...4 but can not change row with ID = 5 because of locker-1. + // Because of detecting update conflist, worker changes here its TIL to RC NO RECORD_VERSION. + + * session 'locker-2' ("FIRSTLAST" in TK article): + (1) insert into test(id) values(-11); + (2) commit; + (3) delete from test where id = -11; + + // session-'worker' remains waiting at this point because row with ID = 5 is still occupied by by locker-1. + // Record with (new) id = -11 will be seen further because worker's TIL was changed to RC NO RECORD_VERSION. + + * session 'locker-1': + (1) commit; + (2) insert into test(id) values(-12); + (3) commit; + (4) delete from test where id = -12; + + // This: '(1) commit' - removes record with id = 5. + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Thus worker sees record with id = -11 (which is locked now by locker-2) and puts write-lock on it. + // [DOC]: "b) engine put write lock on conflicted record" + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since + // top-level statement execution starts and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + * session 'locker-2': + (1) commit; + (2) insert into test(id) values(-13); + (3) commit; + (4) update test set id=id where id = -13; + + // This: '(1) commit' - removes record with id = -11. + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Thus worker sees record with id = -12 (which is locked now by locker-1) and puts write-lock on it. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + + * session 'locker-1': + (1) commit; + (2) insert into test(id) values(-14); + (3) commit; + (4) update test set id=id where id = -14; + + // This: '(1) commit' - removes record with id = -12. + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Thus worker sees record with id = -13 (which is locked now by locker-2) and puts write-lock on it. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + + * session 'locker-2': + (1) commit; + + // This removes record with id = -13. + // Worker still waits for record with id = -14 which is occupied by locker-1. + + * session 'locker-1': + (1) commit; + + // This removes record with id = -14. + // At this point there are no more records to be locked (by worker) that meet cursor condition: worker did put + // write locks on all rows that meet its cursor conditions. + // BECAUSE OF FACT THAT NO MORE RECORDS FOUND TO BE LOCKED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN + // MAKES FIRST STATEMENT-LEVEL RESTART. This restart is also the last in this test. + + + Expected result: + * session-'worker' must update of only rows with ID = 1...4 (reverting sign of IDs value). + + * Two unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed UPDATE statement: first of them + was created by initial statement start and second reflects SINGLE restart (this column has values which are evaluated using + rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). + It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. + + NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! + This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. + + ################ + + Checked on 4.0.0.2195 + 26.09.2020: added for-loop in order to check different target objects: TABLE ('test') and VIEW ('v_test'), see 'checked_mode'. +FBTEST: functional.transactions.read_consist_sttm_restart_on_update_02 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] +act = python_act('db', substitutions=[('=', ''), ('[ \t]+', ' ')]) -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import subprocess -# from subprocess import Popen -# from fdb import services -# import time -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') -# -# for checked_mode in('table', 'view'): -# -# target_obj = 'test' if checked_mode == 'table' else 'v_test' -# -# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') -# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') -# -# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) -# flush_and_close(f_init_log) -# flush_and_close(f_init_err) -# -# # add rows with ID = 1, 2, ..., 5: -# sql_addi=''' -# set term ^; -# execute block as -# begin -# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); -# end -# ^ -# set term ;^ -# insert into %(target_obj)s(id, x) select row_number()over(),row_number()over() from rdb$types rows 5; -# commit; -# ''' % locals() -# runProgram('isql', [ dsn, '-q' ], sql_addi) -# -# con_lock_1 = fdb.connect( dsn = dsn ) -# con_lock_2 = fdb.connect( dsn = dsn ) -# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) -# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) -# -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# -# con_lock_1.execute_immediate( 'delete from %(target_obj)s where id = 5' % locals()) -# -# sql_text=''' -# connect '%(dsn)s'; -# set list on; -# set autoddl off; -# set term ^; -# execute block returns (whoami varchar(30)) as -# begin -# whoami = 'WORKER'; -- , ATT#' || current_connection; -# rdb$set_context('USER_SESSION','WHO', whoami); -# -- suspend; -# end -# ^ -# set term ;^ -# commit; -# --set echo on; -# SET KEEP_TRAN_PARAMS ON; -# set transaction read committed read consistency; -# --select current_connection, current_transaction from rdb$database; -# set list off; -# set wng off; -# --set plan on; -# set count on; -# -# update %(target_obj)s set id = -id order by id; -- THIS MUST BE LOCKED -# -# -- check results: -# -- ############### -# -# select id from %(target_obj)s order by id; -- this will produce output only after all lockers do their commit/rollback -# -# select v.old_id, v.op, v.snap_no_rank from v_worker_log v where v.op = 'upd'; -# -# set width who 10; -# -- DO NOT check this! Values can differ here from one run to another! -# -- select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; -# -# rollback; -# -# ''' % dict(globals(), **locals()) -# -# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_update_02.sql'), 'w') -# f_worker_sql.write(sql_text) -# flush_and_close(f_worker_sql) -# -# -# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') -# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') -# -# ############################################################################ -# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### -# ############################################################################ -# -# p_worker = Popen( [ context['isql_path'], '-pag', '999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) -# time.sleep(1) -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(-11)' % locals()) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'delete from %(target_obj)s where id = -11' % locals()) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id) values(-12)' % locals()) -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'delete from %(target_obj)s where id = -12' % locals()) -# -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(-13)' % locals()) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'delete from %(target_obj)s where id = -13' % locals()) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id) values(-14)' % locals()) -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'delete from %(target_obj)s where id = -14' % locals()) -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# con_lock_1.commit() -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# con_lock_2.commit() # WORKER will complete his job after this -# -# -# # Here we wait for ISQL complete its mission: -# p_worker.wait() -# -# flush_and_close(f_worker_log) -# flush_and_close(f_worker_err) -# -# # Close lockers: -# ################ -# for c in (con_lock_1, con_lock_2): -# c.close() -# -# # CHECK RESULTS -# ############### -# with open(f_worker_log.name,'r') as f: -# for line in f: -# if line.strip(): -# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) -# -# for f in (f_init_err, f_worker_err): -# with open(f.name,'r') as g: -# for line in g: -# if line.strip(): -# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) -# -# #=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# +# import os +# import sys +# import subprocess +# from subprocess import Popen +# from fdb import services +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') +# +# for checked_mode in('table', 'view'): +# +# target_obj = 'test' if checked_mode == 'table' else 'v_test' +# +# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') +# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') +# +# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) +# flush_and_close(f_init_log) +# flush_and_close(f_init_err) +# +# # add rows with ID = 1, 2, ..., 5: +# sql_addi=''' +# set term ^; +# execute block as +# begin +# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); +# end +# ^ +# set term ;^ +# insert into %(target_obj)s(id, x) select row_number()over(),row_number()over() from rdb$types rows 5; +# commit; +# ''' % locals() +# runProgram('isql', [ dsn, '-q' ], sql_addi) +# +# con_lock_1 = fdb.connect( dsn = dsn ) +# con_lock_2 = fdb.connect( dsn = dsn ) +# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) +# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) +# +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# +# con_lock_1.execute_immediate( 'delete from %(target_obj)s where id = 5' % locals()) +# +# sql_text=''' +# connect '%(dsn)s'; +# set list on; +# set autoddl off; +# set term ^; +# execute block returns (whoami varchar(30)) as +# begin +# whoami = 'WORKER'; -- , ATT#' || current_connection; +# rdb$set_context('USER_SESSION','WHO', whoami); +# -- suspend; +# end +# ^ +# set term ;^ +# commit; +# --set echo on; +# SET KEEP_TRAN_PARAMS ON; +# set transaction read committed read consistency; +# --select current_connection, current_transaction from rdb$database; +# set list off; +# set wng off; +# --set plan on; +# set count on; +# +# update %(target_obj)s set id = -id order by id; -- THIS MUST BE LOCKED +# +# -- check results: +# -- ############### +# +# select id from %(target_obj)s order by id; -- this will produce output only after all lockers do their commit/rollback +# +# select v.old_id, v.op, v.snap_no_rank from v_worker_log v where v.op = 'upd'; +# +# set width who 10; +# -- DO NOT check this! Values can differ here from one run to another! +# -- select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; +# +# rollback; +# +# ''' % dict(globals(), **locals()) +# +# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_update_02.sql'), 'w') +# f_worker_sql.write(sql_text) +# flush_and_close(f_worker_sql) +# +# +# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') +# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') +# +# ############################################################################ +# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### +# ############################################################################ +# +# p_worker = Popen( [ context['isql_path'], '-pag', '999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) +# time.sleep(1) +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(-11)' % locals()) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'delete from %(target_obj)s where id = -11' % locals()) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id) values(-12)' % locals()) +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'delete from %(target_obj)s where id = -12' % locals()) +# +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(-13)' % locals()) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'delete from %(target_obj)s where id = -13' % locals()) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id) values(-14)' % locals()) +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'delete from %(target_obj)s where id = -14' % locals()) +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# con_lock_1.commit() +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# con_lock_2.commit() # WORKER will complete his job after this +# +# +# # Here we wait for ISQL complete its mission: +# p_worker.wait() +# +# flush_and_close(f_worker_log) +# flush_and_close(f_worker_err) +# +# # Close lockers: +# ################ +# for c in (con_lock_1, con_lock_2): +# c.close() +# +# # CHECK RESULTS +# ############### +# with open(f_worker_log.name,'r') as f: +# for line in f: +# if line.strip(): +# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) +# +# for f in (f_init_err, f_worker_err): +# with open(f.name,'r') as g: +# for line in g: +# if line.strip(): +# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) +# +# # then engine <...> creates new statement-level snapshot and restart execution...") -# -# ::: NB ::: -# This test uses script %FBT_REPO% -# iles -# ead-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. -# Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual -# results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any -# outcome of test. -# -# ############### -# Following scenario if executed here (see also: "doc\\README.read_consistency.md"; hereafer is marked as "DOC"): -# -# * two rows are inserted into the table TEST, with IDs: 1,2 -# * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): -# update test set id=id where id = 2; -# -# * session 'worker' ("LONG" in TK article) has mission: -# update test set id = -id order by id; // using TIL = read committed read consistency -# -# // Execution will have PLAN ORDER . -# // Worker starts with updating rows with ID = 1...2 but can not change row with ID = 2 because of locker-1. -# // Because of detecting update conflist, worker changes here its TIL to RC NO RECORD_VERSION. -# -# * session 'locker-2' ("FIRSTLAST" in TK article): -# (1) insert into test(id) values( 110); -# (2) insert into test(id) values(-11); -# (3) commit; -# (4) update test set id=-d where id = 110; -# (5) update test set id=-d where id = -11; -# -# // session-'worker' remains waiting at this point because row with ID = 2 is still occupied by by locker-1. -# // Records with (new) IDs = -11 and 110 will be seen further because worker's TIL was changed to RC NO RECORD_VERSION. -# -# * session 'locker-1': -# (1) commit; -# (2) insert into test(id) values(120); -# (3) insert into test(id) values(-12); -# (4) commit; -# (5) update test set id=-d where id = 120; -# (6) update test set id=-d where id = -12; -# -# // This: '(1) commit' - releases record with id = 2. -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Thus worker sees records with id = -11 and 110 (which is locked now by locker-2) and puts write-lock on them. -# // [DOC]: "b) engine put write lock on conflicted record" -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since -# // top-level statement execution starts and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# * session 'locker-2': -# (1) commit; -# (2) insert into test(id) values(130); -# (3) insert into test(id) values(-13); -# (4) commit; -# (5) update test set id=-d where id = 130; -# (6) update test set id=-d where id = -13; -# -# // This: '(1) commit' - releases records with IDs = -11 and 110. -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Thus worker sees records with id = -12 and 120 (which is locked now by locker-1) and puts write-lock on them. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# -# * session 'locker-1': -# (1) commit; -# (2) insert into test(id) values(140); -# (3) insert into test(id) values(-14); -# (4) commit; -# (5) update test set id=-d where id = 140; -# (6) update test set id=-d where id = -14; -# -# // This: '(1) commit' - releases records with IDs = -12 and 120. -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Thus worker sees records with id = -13 and 130 (which is locked now by locker-2) and puts write-lock on them. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# -# * session 'locker-2': -# (1) commit; -# -# // This releases records with id = -13 and 130. -# // Worker still waits for records with id = -14 and 140 which are occupied by locker-1. -# -# * session 'locker-1': -# (1) commit; -# -# // This releases records with id = -14 and 140. -# // At this point there are no more records to be locked (by worker) that meet cursor condition: worker did put -# // write locks on all rows that meet its cursor conditions. -# // BECAUSE OF FACT THAT NO MORE RECORDS FOUND TO BE LOCKED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN -# // MAKES FIRST STATEMENT-LEVEL RESTART. This restart is also the last in this test. -# -# -# Expected result: -# * session-'worker' must update all rows. -# -# * Two unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed UPDATE statement: first of them -# was created by initial statement start and second reflects SINGLE restart (this column has values which are evaluated using -# rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). -# It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. -# -# NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! -# This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. -# -# ################ -# -# Checked on 4.0.0.2195 -# 26.09.2020: added for-loop in order to check different target objects: TABLE ('test') and VIEW ('v_test'), see 'checked_mode'. -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: + +""" +ID: transactions.read-consist-sttm-restart-on-update-03 +TITLE: READ CONSISTENCY. Check creation of new statement-level snapshot and restarting changed caused by UPDATE. Test-03. +DESCRIPTION: + Initial article for reading: + https://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:11504247549852 + Note on terms which are used there: "BLOCKER", "LONG" and "FIRSTLAST" - their names are slightly changed here + to: LOCKER-1, WORKER and LOCKER-2 respectively. + + ********************************************** + + This test verifies that statement-level snapshot and restart will be performed when "main" session ("worker") + performs UPDATE statement and is involved in update conflicts. + ("When update conflict is detected <...> then engine <...> creates new statement-level snapshot and restart execution...") + + ::: NB ::: + This test uses script %FBT_REPO%/files/read-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. + Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual + results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any + outcome of test. + + ############### + Following scenario if executed here (see also: "doc/README.read_consistency.md"; hereafer is marked as "DOC"): + + * two rows are inserted into the table TEST, with IDs: 1,2 + * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): + update test set id=id where id = 2; + + * session 'worker' ("LONG" in TK article) has mission: + update test set id = -id order by id; // using TIL = read committed read consistency + + // Execution will have PLAN ORDER . + // Worker starts with updating rows with ID = 1...2 but can not change row with ID = 2 because of locker-1. + // Because of detecting update conflist, worker changes here its TIL to RC NO RECORD_VERSION. + + * session 'locker-2' ("FIRSTLAST" in TK article): + (1) insert into test(id) values( 110); + (2) insert into test(id) values(-11); + (3) commit; + (4) update test set id=-d where id = 110; + (5) update test set id=-d where id = -11; + + // session-'worker' remains waiting at this point because row with ID = 2 is still occupied by by locker-1. + // Records with (new) IDs = -11 and 110 will be seen further because worker's TIL was changed to RC NO RECORD_VERSION. + + * session 'locker-1': + (1) commit; + (2) insert into test(id) values(120); + (3) insert into test(id) values(-12); + (4) commit; + (5) update test set id=-d where id = 120; + (6) update test set id=-d where id = -12; + + // This: '(1) commit' - releases record with id = 2. + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Thus worker sees records with id = -11 and 110 (which is locked now by locker-2) and puts write-lock on them. + // [DOC]: "b) engine put write lock on conflicted record" + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since + // top-level statement execution starts and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + * session 'locker-2': + (1) commit; + (2) insert into test(id) values(130); + (3) insert into test(id) values(-13); + (4) commit; + (5) update test set id=-d where id = 130; + (6) update test set id=-d where id = -13; + + // This: '(1) commit' - releases records with IDs = -11 and 110. + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Thus worker sees records with id = -12 and 120 (which is locked now by locker-1) and puts write-lock on them. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + + * session 'locker-1': + (1) commit; + (2) insert into test(id) values(140); + (3) insert into test(id) values(-14); + (4) commit; + (5) update test set id=-d where id = 140; + (6) update test set id=-d where id = -14; + + // This: '(1) commit' - releases records with IDs = -12 and 120. + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Thus worker sees records with id = -13 and 130 (which is locked now by locker-2) and puts write-lock on them. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + + * session 'locker-2': + (1) commit; + + // This releases records with id = -13 and 130. + // Worker still waits for records with id = -14 and 140 which are occupied by locker-1. + + * session 'locker-1': + (1) commit; + + // This releases records with id = -14 and 140. + // At this point there are no more records to be locked (by worker) that meet cursor condition: worker did put + // write locks on all rows that meet its cursor conditions. + // BECAUSE OF FACT THAT NO MORE RECORDS FOUND TO BE LOCKED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN + // MAKES FIRST STATEMENT-LEVEL RESTART. This restart is also the last in this test. + + + Expected result: + * session-'worker' must update all rows. + + * Two unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed UPDATE statement: first of them + was created by initial statement start and second reflects SINGLE restart (this column has values which are evaluated using + rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). + It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. + + NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! + This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. + + ################ + + Checked on 4.0.0.2195 + 26.09.2020: added for-loop in order to check different target objects: TABLE ('test') and VIEW ('v_test'), see 'checked_mode'. +FBTEST: functional.transactions.read_consist_sttm_restart_on_update_03 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] +act = python_act('db', substitutions=[('=', ''), ('[ \t]+', ' ')]) -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import subprocess -# from subprocess import Popen -# from fdb import services -# import time -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') -# -# for checked_mode in('table', 'view'): -# -# target_obj = 'test' if checked_mode == 'table' else 'v_test' -# -# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') -# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') -# -# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) -# flush_and_close(f_init_log) -# flush_and_close(f_init_err) -# -# # add rows with ID = 1, 2: -# sql_addi=''' -# set term ^; -# execute block as -# begin -# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); -# end -# ^ -# set term ;^ -# insert into %(target_obj)s(id, x) select row_number()over(),row_number()over() from rdb$types rows 2; -# commit; -# ''' % locals() -# runProgram('isql', [ dsn, '-q' ], sql_addi) -# -# con_lock_1 = fdb.connect( dsn = dsn ) -# con_lock_2 = fdb.connect( dsn = dsn ) -# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) -# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) -# -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# -# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = 2' % locals() ) -# -# sql_text=''' -# connect '%(dsn)s'; -# set list on; -# set autoddl off; -# set term ^; -# execute block returns (whoami varchar(30)) as -# begin -# whoami = 'WORKER'; -- , ATT#' || current_connection; -# rdb$set_context('USER_SESSION','WHO', whoami); -# -- suspend; -# end -# ^ -# set term ;^ -# commit; -# --set echo on; -# SET KEEP_TRAN_PARAMS ON; -# set transaction read committed read consistency; -# --select current_connection, current_transaction from rdb$database; -# set list off; -# set wng off; -# --set plan on; -# set count on; -# -# update %(target_obj)s set id = -id order by id; -- THIS MUST BE LOCKED -# -# -- check results: -# -- ############### -# -# select id from %(target_obj)s order by id; -- this will produce output only after all lockers do their commit/rollback -# -# select v.old_id, v.op, v.snap_no_rank from v_worker_log v where v.op = 'upd'; -# -# set width who 10; -# -- DO NOT check this! Values can differ here from one run to another! -# -- select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; -# -# rollback; -# -# ''' % dict(globals(), **locals()) -# -# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_update_03.sql'), 'w') -# f_worker_sql.write(sql_text) -# flush_and_close(f_worker_sql) -# -# -# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') -# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') -# -# ############################################################################ -# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### -# ############################################################################ -# -# p_worker = Popen( [ context['isql_path'], '-pag', '999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) -# time.sleep(1) -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(110)' % locals() ) -# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(-11)' % locals() ) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = 110' % locals() ) -# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = -11' % locals() ) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id) values(120)' % locals() ) -# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id) values(-12)' % locals() ) -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = 120' % locals() ) -# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = -12' % locals() ) -# -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(130)' % locals() ) -# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(-13)' % locals() ) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = 130' % locals() ) -# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = -13' % locals() ) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id) values(140)' % locals() ) -# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id) values(-14)' % locals() ) -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = 140' % locals() ) -# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = -14' % locals() ) -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# con_lock_1.commit() -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# con_lock_2.commit() # WORKER will complete his job after this -# -# -# # Here we wait for ISQL complete its mission: -# p_worker.wait() -# -# flush_and_close(f_worker_log) -# flush_and_close(f_worker_err) -# -# # Close lockers: -# ################ -# for c in (con_lock_1, con_lock_2): -# c.close() -# -# # CHECK RESULTS -# ############### -# with open(f_worker_log.name,'r') as f: -# for line in f: -# if line.strip(): -# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) -# -# for f in (f_init_err, f_worker_err): -# with open(f.name,'r') as g: -# for line in g: -# if line.strip(): -# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) -# -# #=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# +# import os +# import sys +# import subprocess +# from subprocess import Popen +# from fdb import services +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') +# +# for checked_mode in('table', 'view'): +# +# target_obj = 'test' if checked_mode == 'table' else 'v_test' +# +# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') +# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') +# +# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) +# flush_and_close(f_init_log) +# flush_and_close(f_init_err) +# +# # add rows with ID = 1, 2: +# sql_addi=''' +# set term ^; +# execute block as +# begin +# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); +# end +# ^ +# set term ;^ +# insert into %(target_obj)s(id, x) select row_number()over(),row_number()over() from rdb$types rows 2; +# commit; +# ''' % locals() +# runProgram('isql', [ dsn, '-q' ], sql_addi) +# +# con_lock_1 = fdb.connect( dsn = dsn ) +# con_lock_2 = fdb.connect( dsn = dsn ) +# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) +# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) +# +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# +# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = 2' % locals() ) +# +# sql_text=''' +# connect '%(dsn)s'; +# set list on; +# set autoddl off; +# set term ^; +# execute block returns (whoami varchar(30)) as +# begin +# whoami = 'WORKER'; -- , ATT#' || current_connection; +# rdb$set_context('USER_SESSION','WHO', whoami); +# -- suspend; +# end +# ^ +# set term ;^ +# commit; +# --set echo on; +# SET KEEP_TRAN_PARAMS ON; +# set transaction read committed read consistency; +# --select current_connection, current_transaction from rdb$database; +# set list off; +# set wng off; +# --set plan on; +# set count on; +# +# update %(target_obj)s set id = -id order by id; -- THIS MUST BE LOCKED +# +# -- check results: +# -- ############### +# +# select id from %(target_obj)s order by id; -- this will produce output only after all lockers do their commit/rollback +# +# select v.old_id, v.op, v.snap_no_rank from v_worker_log v where v.op = 'upd'; +# +# set width who 10; +# -- DO NOT check this! Values can differ here from one run to another! +# -- select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; +# +# rollback; +# +# ''' % dict(globals(), **locals()) +# +# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_update_03.sql'), 'w') +# f_worker_sql.write(sql_text) +# flush_and_close(f_worker_sql) +# +# +# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') +# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') +# +# ############################################################################ +# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### +# ############################################################################ +# +# p_worker = Popen( [ context['isql_path'], '-pag', '999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) +# time.sleep(1) +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(110)' % locals() ) +# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(-11)' % locals() ) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = 110' % locals() ) +# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = -11' % locals() ) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id) values(120)' % locals() ) +# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id) values(-12)' % locals() ) +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = 120' % locals() ) +# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = -12' % locals() ) +# +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(130)' % locals() ) +# con_lock_2.execute_immediate( 'insert into %(target_obj)s(id) values(-13)' % locals() ) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = 130' % locals() ) +# con_lock_2.execute_immediate( 'update %(target_obj)s set id=id where id = -13' % locals() ) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id) values(140)' % locals() ) +# con_lock_1.execute_immediate( 'insert into %(target_obj)s(id) values(-14)' % locals() ) +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = 140' % locals() ) +# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id = -14' % locals() ) +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# con_lock_1.commit() +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# con_lock_2.commit() # WORKER will complete his job after this +# +# +# # Here we wait for ISQL complete its mission: +# p_worker.wait() +# +# flush_and_close(f_worker_log) +# flush_and_close(f_worker_err) +# +# # Close lockers: +# ################ +# for c in (con_lock_1, con_lock_2): +# c.close() +# +# # CHECK RESULTS +# ############### +# with open(f_worker_log.name,'r') as f: +# for line in f: +# if line.strip(): +# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) +# +# for f in (f_init_err, f_worker_err): +# with open(f.name,'r') as g: +# for line in g: +# if line.strip(): +# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) +# +# # then engine <...> creates new statement-level snapshot and restart execution...") -# -# ::: NB ::: -# This test uses script %FBT_REPO% -# iles -# ead-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. -# Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual -# results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any -# outcome of test. -# -# ############### -# Following scenario if executed here (see also: "doc\\README.read_consistency.md"; hereafer is marked as "DOC"): -# * five rows are inserted into the table TEST, with ID = 1...5 and x = 1...5. -# -# * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): -# update test set id = id where id = 1 -# -# * session 'worker' ("LONG" in TK article) has mission: -# update test where set id = -id where id <= 2 order by id DESC rows 4; // using TIL = read committed read consistency -# -# // Execution will have PLAN ORDER . -# // It will update rows starting with ID = 2 but can not change row with ID = 1 because of locker-1. -# // Update conflict appears here and, because of this, worker temporary changes its TIL to RC no record_version (RC NRV). -# // [DOC]: "a) transaction isolation mode temporarily switched to the READ COMMITTED *NO RECORD VERSION MODE*" -# // This (new) TIL allows worker further to see all committed versions, regardless of its own snapshot. -# -# * session 'locker-2' ("FIRSTLAST" in TK article): replaces ID = 5 with new value = -5, then commits -# and locks this record again: -# (1) commit; -# (2) update test set id = -5 where abs(id)=5; -# (3) commit; -# (4) update test set id = id where abs(id)=5; -# // session-'worker' remains waiting at this point because row with ID = 1 is still occupied by by locker-1. -# // but worker must further see record with (new) id = -5 because its TIL was changed to RC NO RECORD_VERSION. -# -# -# * session 'locker-1': replaces ID = 4 with new value = -4, then commits and locks this record again: -# (1) commit; -# (2) update test set id = -4 where abs(id)=4; -# (3) commit; -# (4) update test set id = id where abs(id)=4; -# -# // This: '(1) commit' - will release record with ID = 1. Worker sees this record and put write-lock on it. -# // [DOC]: "b) engine put write lock on conflicted record" -# // But it is only 2nd row of total 4 that worker must delete. -# // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. -# // Worker resumes search for any rows with ID < 2, and it does this with taking in account required order -# // of its DML (i.e. 'ORDER BY ID DESC ...') -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // Worker starts to search records which must be involved in its DML and *found* first sucn row with ID = -5. -# // NB. This row currently can NOT be deleted by worker because locker-2 has uncommitted update of it. -# // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. -# // :::!! NB, AGAIN !! ::: restart NOT occurs here because at least one records found, see: -# // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since -# // top-level statement execution starts and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# * session 'locker-2': replaces ID = 3 with new value = -3, then commits and locks this record again: -# (1) commit; -# (2) update test set id = -3 where abs(id)=3; -# (3) commit; -# (4) update test set id = id where abs(id)=3; -# -# // This: '(1) commit' - will release record with ID = -5. Worker sees this record and put write-lock on it. -# // But this is only 3rd row of total 4 that worker must update. -# // Because of worker TIL = RC NRV, he must see all committed records regardless on its own snapshot. -# // Worker resumes search for any rows with ID < -5, and it does this with taking in account required order -# // of its DML (i.e. 'ORDER BY ID DESC ...') -# // [DOC]: "c) engine continue to evaluate remaining records of update\\delete cursor and put write locks on it too" -# // There are no such rows in the table. -# // BECAUSE OF FACT THAT NO RECORDS FOUND, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES FIRST STATEMENT-LEVEL RESTART. -# // [DOC]: "d) when there is no more records to fetch, engine start to undo all actions ... and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# * session 'locker-1': -# commit; -# // This will release record with ID=-4. Worker sees this record and put write-lock on it. -# // At this point worker has proceeded all required number of rows for DML: 2, 1, -4 and -5. -# // BECAUSE OF FACT THAT ALL ROWS WERE PROCEEDED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES SECOND STATEMENT-LEVEL RESTART. -# // [DOC]: "d) when there is no more records to fetch, engine start to undo all actions ... and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# // After this restart worker will waiting for row with ID = -3 (it sees this because of TIL = RC NRV). -# -# * session 'locker-2': -# commit. -# // This releases row with ID=-3. Worker sees this record and put write-lock on it. -# // Records with ID = 2, 1, -4 and -5 already have been locked, but worker must update only FOUR rows (see its DML statement). -# // Thus only rows with ID = 2, 1, -3 and -4 will be updated. Record with ID = -5 must *remain* in the table. -# // At this point worker has proceeded all required rows that meet condition for DML: 2, 1, -3 and -4. -# // BECAUSE OF FACT THAT ALL ROWS WERE PROCEEDED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES THIRD STATEMENT-LEVEL RESTART. -# // [DOC]: "d) when there is no more records to fetch, engine start to undo all actions ... and preserve already taken write locks -# // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, -# // creates new statement-level snapshot and restart execution of top-level statement." -# -# Expected result: -# * session-'worker' must *successfully* complete deletion of 4 rows (but only two of them did exist at the starting point). -# Record with ID = -5 must remain in the table. -# -# * four unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed UPDATE statement: first of them -# was created by initial statement start and all others reflect three restarts (this column has values which are evaluated using -# rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). -# It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. -# -# NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! -# This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. -# -# ################ -# -# Checked on 4.0.0.2195 SS/CS -# 26.09.2020: added for-loop in order to check different target objects: TABLE ('test') and VIEW ('v_test'), see 'checked_mode'. -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: + +""" +ID: transactions.read-consist-sttm-restart-on-update-04 +TITLE: READ CONSISTENCY. Check creation of new statement-level snapshot and restarting changed caused by UPDATE. Test-04. +DESCRIPTION: + Initial article for reading: + https://asktom.oracle.com/pls/asktom/f?p=100:11:::::P11_QUESTION_ID:11504247549852 + Note on terms which are used there: "BLOCKER", "LONG" and "FIRSTLAST" - their names are slightly changed here + to: LOCKER-1, WORKER and LOCKER-2 respectively. + See also: doc/README.read_consistency.md + + ********************************************** + + This test verifies that statement-level snapshot and restart will be performed when "main" session ("worker") + performs UPDATE statement and is involved in update conflicts. + ("When update conflict is detected <...> then engine <...> creates new statement-level snapshot and restart execution...") + + ::: NB ::: + This test uses script %FBT_REPO%/files/read-consist-sttm-restart-DDL.sql which contains common DDL for all other such tests. + Particularly, it contains two TRIGGERS (TLOG_WANT and TLOG_DONE) which are used for logging of planned actions and actual + results against table TEST. These triggers use AUTONOMOUS transactions in order to have ability to see results in any + outcome of test. + + ############### + Following scenario if executed here (see also: "doc/README.read_consistency.md"; hereafer is marked as "DOC"): + * five rows are inserted into the table TEST, with ID = 1...5 and x = 1...5. + + * session 'locker-1' ("BLOCKER" in Tom Kyte's article ): + update test set id = id where id = 1 + + * session 'worker' ("LONG" in TK article) has mission: + update test where set id = -id where id <= 2 order by id DESC rows 4; // using TIL = read committed read consistency + + // Execution will have PLAN ORDER . + // It will update rows starting with ID = 2 but can not change row with ID = 1 because of locker-1. + // Update conflict appears here and, because of this, worker temporary changes its TIL to RC no record_version (RC NRV). + // [DOC]: "a) transaction isolation mode temporarily switched to the READ COMMITTED *NO RECORD VERSION MODE*" + // This (new) TIL allows worker further to see all committed versions, regardless of its own snapshot. + + * session 'locker-2' ("FIRSTLAST" in TK article): replaces ID = 5 with new value = -5, then commits + and locks this record again: + (1) commit; + (2) update test set id = -5 where abs(id)=5; + (3) commit; + (4) update test set id = id where abs(id)=5; + // session-'worker' remains waiting at this point because row with ID = 1 is still occupied by by locker-1. + // but worker must further see record with (new) id = -5 because its TIL was changed to RC NO RECORD_VERSION. + + + * session 'locker-1': replaces ID = 4 with new value = -4, then commits and locks this record again: + (1) commit; + (2) update test set id = -4 where abs(id)=4; + (3) commit; + (4) update test set id = id where abs(id)=4; + + // This: '(1) commit' - will release record with ID = 1. Worker sees this record and put write-lock on it. + // [DOC]: "b) engine put write lock on conflicted record" + // But it is only 2nd row of total 4 that worker must delete. + // Because of TIL = RC NRV session-'worker' must see all committed records regardless on its own snapshot. + // Worker resumes search for any rows with ID < 2, and it does this with taking in account required order + // of its DML (i.e. 'ORDER BY ID DESC ...') + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // Worker starts to search records which must be involved in its DML and *found* first sucn row with ID = -5. + // NB. This row currently can NOT be deleted by worker because locker-2 has uncommitted update of it. + // BECAUSE OF FACT THAT AT LEAST ONE ROW *WAS FOUND* - STATEMENT-LEVEL RESTART *NOT* YET OCCURS HERE. + // :::!! NB, AGAIN !! ::: restart NOT occurs here because at least one records found, see: + // [DOC]: "d) when there is *no more* records to fetch, engine start to undo all actions performed since + // top-level statement execution starts and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + * session 'locker-2': replaces ID = 3 with new value = -3, then commits and locks this record again: + (1) commit; + (2) update test set id = -3 where abs(id)=3; + (3) commit; + (4) update test set id = id where abs(id)=3; + + // This: '(1) commit' - will release record with ID = -5. Worker sees this record and put write-lock on it. + // But this is only 3rd row of total 4 that worker must update. + // Because of worker TIL = RC NRV, he must see all committed records regardless on its own snapshot. + // Worker resumes search for any rows with ID < -5, and it does this with taking in account required order + // of its DML (i.e. 'ORDER BY ID DESC ...') + // [DOC]: "c) engine continue to evaluate remaining records of update/delete cursor and put write locks on it too" + // There are no such rows in the table. + // BECAUSE OF FACT THAT NO RECORDS FOUND, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES FIRST STATEMENT-LEVEL RESTART. + // [DOC]: "d) when there is no more records to fetch, engine start to undo all actions ... and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + * session 'locker-1': + commit; + // This will release record with ID=-4. Worker sees this record and put write-lock on it. + // At this point worker has proceeded all required number of rows for DML: 2, 1, -4 and -5. + // BECAUSE OF FACT THAT ALL ROWS WERE PROCEEDED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES SECOND STATEMENT-LEVEL RESTART. + // [DOC]: "d) when there is no more records to fetch, engine start to undo all actions ... and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + // After this restart worker will waiting for row with ID = -3 (it sees this because of TIL = RC NRV). + + * session 'locker-2': + commit. + // This releases row with ID=-3. Worker sees this record and put write-lock on it. + // Records with ID = 2, 1, -4 and -5 already have been locked, but worker must update only FOUR rows (see its DML statement). + // Thus only rows with ID = 2, 1, -3 and -4 will be updated. Record with ID = -5 must *remain* in the table. + // At this point worker has proceeded all required rows that meet condition for DML: 2, 1, -3 and -4. + // BECAUSE OF FACT THAT ALL ROWS WERE PROCEEDED, WORKER DOES UNDO BUT KEEP LOCKS AND THEN MAKES THIRD STATEMENT-LEVEL RESTART. + // [DOC]: "d) when there is no more records to fetch, engine start to undo all actions ... and preserve already taken write locks + // e) then engine restores transaction isolation mode as READ COMMITTED *READ CONSISTENCY*, + // creates new statement-level snapshot and restart execution of top-level statement." + + Expected result: + * session-'worker' must *successfully* complete deletion of 4 rows (but only two of them did exist at the starting point). + Record with ID = -5 must remain in the table. + + * four unique values must be in the column TLOG_DONE.SNAP_NO for session-'worker' when it performed UPDATE statement: first of them + was created by initial statement start and all others reflect three restarts (this column has values which are evaluated using + rdb$get_context('SYSTEM', 'SNAPSHOT_NUMBER') -- see trigger TEST_AIUD). + It is enough to count these values using COUNT(*) or enumarate them by DENSE_RANK() function. + + NOTE: concrete values of fields TRN, GLOBAL_CN and SNAP_NO in the TLOG_DONE can differ from one to another run! + This is because of concurrent nature of connections that work against database. We must not assume that these values will be constant. + + ################ + + Checked on 4.0.0.2195 SS/CS + 26.09.2020: added for-loop in order to check different target objects: TABLE ('test') and VIEW ('v_test'), see 'checked_mode'. +FBTEST: functional.transactions.read_consist_sttm_restart_on_update_04 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None +db = db_factory() -substitutions_1 = [('=', ''), ('[ \t]+', ' ')] +act = python_act('db', substitutions=[('=', ''), ('[ \t]+', ' ')]) -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# -# import os -# import sys -# import subprocess -# from subprocess import Popen -# import shutil -# from fdb import services -# import time -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# # How long can we wait for session-worker completition, seconds -# # (ISQL often can not complete its job for several seconds!): -# MAX_TIME_FOR_WAITING_WORKER_FINISH = 60 -# -# ############################## -# # Temply, for debug obly: -# this_fdb=db_conn.database_name -# this_dbg=os.path.splitext(this_fdb)[0] + '.4debug.fdb' -# ############################## -# -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') -# -# for checked_mode in('table', 'view'): -# -# target_obj = 'test' if checked_mode == 'table' else 'v_test' -# -# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') -# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') -# -# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) -# flush_and_close(f_init_log) -# flush_and_close(f_init_err) -# -# sql_addi=''' -# set term ^; -# execute block as -# begin -# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); -# end -# ^ -# set term ;^ -# insert into %(target_obj)s(id, x) -# select row_number()over(),row_number()over() -# from rdb$types rows 5; -# commit; -# ''' % locals() -# runProgram('isql', [ dsn, '-q' ], sql_addi) -# -# locker_tpb = fdb.TPB() -# locker_tpb.lock_timeout = MAX_TIME_FOR_WAITING_WORKER_FINISH -# locker_tpb.lock_resolution = fdb.isc_tpb_wait -# -# con_lock_1 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) -# con_lock_2 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) -# -# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) -# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) -# -# ######################### -# ### L O C K E R - 1 ### -# ######################### -# -# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id=1' % locals() ) -# -# sql_text=''' -# connect '%(dsn)s'; -# set list on; -# set autoddl off; -# set term ^; -# execute block returns (whoami varchar(30)) as -# begin -# whoami = 'WORKER'; -- , ATT#' || current_connection; -# rdb$set_context('USER_SESSION','WHO', whoami); -# -- suspend; -# end -# ^ -# set term ;^ -# commit; -# SET KEEP_TRAN_PARAMS ON; -# set transaction read committed read consistency; -# --select current_connection, current_transaction from rdb$database; -# set list off; -# set wng off; -# -# --set plan on; -# set count on; -# update %(target_obj)s set id = -id where id <= 2 order by id DESC rows 4; -- THIS MUST HANG BECAUSE OF LOCKERs -# -# -- check results: -# -- ############### -# -# select id from %(target_obj)s order by id; -- one record must remain, with ID = -5 -# -# select v.old_id, v.op, v.snap_no_rank -- snap_no_rank must have four unique values: 1,2,3 and 4. -# from v_worker_log v -# where v.op = 'upd'; -# -# --set width who 10; -# -- DO NOT check this! Values can differ here from one run to another! -# -- select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; -# rollback; -# -# ''' % dict(globals(), **locals()) -# -# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_delete_04.sql'), 'w') -# f_worker_sql.write(sql_text) -# flush_and_close(f_worker_sql) -# -# -# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') -# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') -# -# ############################################################################ -# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### -# ############################################################################ -# -# p_worker = Popen( [ context['isql_path'], '-pag', '9999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) -# time.sleep(1) -# -# -# ######################### -# ### L O C K E R - 2 ### -# ######################### -# -# # Change ID so that it **will* be included in the set of rows that must be affected by session-worker: -# con_lock_2.execute_immediate( 'update %(target_obj)s set id = -5 where abs(id) = 5;' % locals() ) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'update %(target_obj)s set id = id where abs(id) = 5;' % locals() ) -# -# -# con_lock_1.commit() # releases record with ID=1 (allow it to be deleted by session-worker) -# -# # Change ID so that it **will* be included in the set of rows that must be affected by session-worker: -# con_lock_1.execute_immediate( 'update %(target_obj)s set id = -4 where abs(id) = 4;' % locals() ) -# con_lock_1.commit() -# con_lock_1.execute_immediate( 'update %(target_obj)s set id = id where abs(id) = 4;' % locals() ) -# -# -# con_lock_2.commit() # releases record with ID = -5, but session-worker is waiting for record with ID = -4 (that was changed by locker-1). -# con_lock_2.execute_immediate( 'update %(target_obj)s set id = -3 where abs(id) = 3;' % locals() ) -# con_lock_2.commit() -# con_lock_2.execute_immediate( 'update %(target_obj)s set id = id where abs(id) = 3;' % locals() ) -# -# con_lock_1.commit() # This releases row with ID=-4 but session-worker is waiting for ID = - 3 (changed by locker-2). -# con_lock_2.commit() # This releases row with ID=-3. No more locked rows so session-worker can finish its mission. -# -# # Here we wait for ISQL complete its mission: -# p_worker.wait() -# -# flush_and_close(f_worker_log) -# flush_and_close(f_worker_err) -# -# # Close lockers: -# ################ -# for c in (con_lock_1, con_lock_2): -# c.close() -# -# -# # CHECK RESULTS -# ############### -# with open(f_worker_log.name,'r') as f: -# for line in f: -# if line.strip(): -# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) -# -# for f in (f_init_err, f_worker_err): -# with open(f.name,'r') as g: -# for line in g: -# if line.strip(): -# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) -# -# #=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# +# import os +# import sys +# import subprocess +# from subprocess import Popen +# import shutil +# from fdb import services +# import time +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# # How long can we wait for session-worker completition, seconds +# # (ISQL often can not complete its job for several seconds!): +# MAX_TIME_FOR_WAITING_WORKER_FINISH = 60 +# +# ############################## +# # Temply, for debug obly: +# this_fdb=db_conn.database_name +# this_dbg=os.path.splitext(this_fdb)[0] + '.4debug.fdb' +# ############################## +# +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# sql_init_ddl = os.path.join(context['files_location'],'read-consist-sttm-restart-DDL.sql') +# +# for checked_mode in('table', 'view'): +# +# target_obj = 'test' if checked_mode == 'table' else 'v_test' +# +# f_init_log=open( os.path.join(context['temp_directory'],'read-consist-sttm-restart-DDL.log'), 'w') +# f_init_err=open( ''.join( ( os.path.splitext(f_init_log.name)[0], '.err') ), 'w') +# +# subprocess.call( [context['isql_path'], dsn, '-q', '-i', sql_init_ddl], stdout=f_init_log, stderr=f_init_err ) +# flush_and_close(f_init_log) +# flush_and_close(f_init_err) +# +# sql_addi=''' +# set term ^; +# execute block as +# begin +# rdb$set_context('USER_SESSION', 'WHO', 'INIT_DATA'); +# end +# ^ +# set term ;^ +# insert into %(target_obj)s(id, x) +# select row_number()over(),row_number()over() +# from rdb$types rows 5; +# commit; +# ''' % locals() +# runProgram('isql', [ dsn, '-q' ], sql_addi) +# +# locker_tpb = fdb.TPB() +# locker_tpb.lock_timeout = MAX_TIME_FOR_WAITING_WORKER_FINISH +# locker_tpb.lock_resolution = fdb.isc_tpb_wait +# +# con_lock_1 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) +# con_lock_2 = fdb.connect( dsn = dsn, isolation_level=locker_tpb ) +# +# con_lock_1.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #1'); end" ) +# con_lock_2.execute_immediate( "execute block as begin rdb$set_context('USER_SESSION', 'WHO', 'LOCKER #2'); end" ) +# +# ######################### +# ### L O C K E R - 1 ### +# ######################### +# +# con_lock_1.execute_immediate( 'update %(target_obj)s set id=id where id=1' % locals() ) +# +# sql_text=''' +# connect '%(dsn)s'; +# set list on; +# set autoddl off; +# set term ^; +# execute block returns (whoami varchar(30)) as +# begin +# whoami = 'WORKER'; -- , ATT#' || current_connection; +# rdb$set_context('USER_SESSION','WHO', whoami); +# -- suspend; +# end +# ^ +# set term ;^ +# commit; +# SET KEEP_TRAN_PARAMS ON; +# set transaction read committed read consistency; +# --select current_connection, current_transaction from rdb$database; +# set list off; +# set wng off; +# +# --set plan on; +# set count on; +# update %(target_obj)s set id = -id where id <= 2 order by id DESC rows 4; -- THIS MUST HANG BECAUSE OF LOCKERs +# +# -- check results: +# -- ############### +# +# select id from %(target_obj)s order by id; -- one record must remain, with ID = -5 +# +# select v.old_id, v.op, v.snap_no_rank -- snap_no_rank must have four unique values: 1,2,3 and 4. +# from v_worker_log v +# where v.op = 'upd'; +# +# --set width who 10; +# -- DO NOT check this! Values can differ here from one run to another! +# -- select id, trn, who, old_id, new_id, op, rec_vers, global_cn, snap_no from tlog_done order by id; +# rollback; +# +# ''' % dict(globals(), **locals()) +# +# f_worker_sql=open( os.path.join(context['temp_directory'],'tmp_sttm_restart_on_delete_04.sql'), 'w') +# f_worker_sql.write(sql_text) +# flush_and_close(f_worker_sql) +# +# +# f_worker_log=open( ''.join( ( os.path.splitext(f_worker_sql.name)[0], '.log') ), 'w') +# f_worker_err=open( ''.join( ( os.path.splitext(f_worker_log.name)[0], '.err') ), 'w') +# +# ############################################################################ +# ### L A U N C H W O R K E R U S I N G I S Q L, A S Y N C. ### +# ############################################################################ +# +# p_worker = Popen( [ context['isql_path'], '-pag', '9999999', '-q', '-i', f_worker_sql.name ],stdout=f_worker_log, stderr=f_worker_err) +# time.sleep(1) +# +# +# ######################### +# ### L O C K E R - 2 ### +# ######################### +# +# # Change ID so that it **will* be included in the set of rows that must be affected by session-worker: +# con_lock_2.execute_immediate( 'update %(target_obj)s set id = -5 where abs(id) = 5;' % locals() ) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'update %(target_obj)s set id = id where abs(id) = 5;' % locals() ) +# +# +# con_lock_1.commit() # releases record with ID=1 (allow it to be deleted by session-worker) +# +# # Change ID so that it **will* be included in the set of rows that must be affected by session-worker: +# con_lock_1.execute_immediate( 'update %(target_obj)s set id = -4 where abs(id) = 4;' % locals() ) +# con_lock_1.commit() +# con_lock_1.execute_immediate( 'update %(target_obj)s set id = id where abs(id) = 4;' % locals() ) +# +# +# con_lock_2.commit() # releases record with ID = -5, but session-worker is waiting for record with ID = -4 (that was changed by locker-1). +# con_lock_2.execute_immediate( 'update %(target_obj)s set id = -3 where abs(id) = 3;' % locals() ) +# con_lock_2.commit() +# con_lock_2.execute_immediate( 'update %(target_obj)s set id = id where abs(id) = 3;' % locals() ) +# +# con_lock_1.commit() # This releases row with ID=-4 but session-worker is waiting for ID = - 3 (changed by locker-2). +# con_lock_2.commit() # This releases row with ID=-3. No more locked rows so session-worker can finish its mission. +# +# # Here we wait for ISQL complete its mission: +# p_worker.wait() +# +# flush_and_close(f_worker_log) +# flush_and_close(f_worker_err) +# +# # Close lockers: +# ################ +# for c in (con_lock_1, con_lock_2): +# c.close() +# +# +# # CHECK RESULTS +# ############### +# with open(f_worker_log.name,'r') as f: +# for line in f: +# if line.strip(): +# print('checked_mode: %(checked_mode)s, STDLOG: %(line)s' % locals()) +# +# for f in (f_init_err, f_worker_err): +# with open(f.name,'r') as g: +# for line in g: +# if line.strip(): +# print( 'checked_mode: ', checked_mode, ' UNEXPECTED STDERR IN ' + g.name + ':', line) +# +# #=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/alter/test_02.py b/tests/functional/trigger/alter/test_02.py index 347391fd..92b3a46f 100644 --- a/tests/functional/trigger/alter/test_02.py +++ b/tests/functional/trigger/alter/test_02.py @@ -1,28 +1,16 @@ #coding:utf-8 -# -# id: functional.trigger.alter.02 -# title: ALTER TRIGGER - INACTIVE -# decription: ALTER TRIGGER - INACTIVE -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# CREATE TRIGGER -# SHOW TRIGGER -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.trigger.alter.alter_trigger_02 + +""" +ID: trigger.alter-02 +TITLE: ALTER TRIGGER - INACTIVE +DESCRIPTION: +FBTEST: functional.trigger.alter.02 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')] - -init_script_1 = """CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT unq UNIQUE, +init_script = """CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT unq UNIQUE, text VARCHAR(32)); commit; SET TERM ^; @@ -32,26 +20,28 @@ BEGIN new.id=1; END ^ SET TERM ;^ -commit;""" +commit; +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ALTER TRIGGER tg INACTIVE; -SHOW TRIGGER tg;""" +test_script = """ALTER TRIGGER tg INACTIVE; +SHOW TRIGGER tg; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')]) -expected_stdout_1 = """Triggers on Table TEST: +expected_stdout = """Triggers on Table TEST: TG, Sequence: 0, Type: BEFORE INSERT, Inactive AS BEGIN new.id=1; END -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/alter/test_03.py b/tests/functional/trigger/alter/test_03.py index 08be550a..6d40ffa4 100644 --- a/tests/functional/trigger/alter/test_03.py +++ b/tests/functional/trigger/alter/test_03.py @@ -1,28 +1,16 @@ #coding:utf-8 -# -# id: functional.trigger.alter.03 -# title: ALTER TRIGGER - BEFORE DELETE -# decription: ALTER TRIGGER - BEFORE DELETE -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# CREATE TRIGGER -# SHOW TRIGGER -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.trigger.alter.alter_trigger_03 + +""" +ID: trigger.alter-03 +TITLE: ALTER TRIGGER - BEFORE DELETE +DESCRIPTION: +FBTEST: functional.trigger.alter.03 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None - -substitutions_1 = [('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')] - -init_script_1 = """CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT unq UNIQUE, +init_script = """CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT unq UNIQUE, text VARCHAR(32)); SET TERM ^; CREATE TRIGGER tg FOR test AFTER UPDATE @@ -30,25 +18,27 @@ AS BEGIN END ^ SET TERM ;^ -commit;""" +commit; +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ALTER TRIGGER tg BEFORE DELETE; -SHOW TRIGGER tg;""" +test_script = """ALTER TRIGGER tg BEFORE DELETE; +SHOW TRIGGER tg; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')]) -expected_stdout_1 = """Triggers on Table TEST: +expected_stdout = """Triggers on Table TEST: TG, Sequence: 0, Type: BEFORE DELETE, Active AS BEGIN END -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++""" - -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/alter/test_04.py b/tests/functional/trigger/alter/test_04.py index ac501b77..61cc56a2 100644 --- a/tests/functional/trigger/alter/test_04.py +++ b/tests/functional/trigger/alter/test_04.py @@ -1,28 +1,16 @@ #coding:utf-8 -# -# id: functional.trigger.alter.04 -# title: ALTER TRIGGER - BEFORE INSERT -# decription: ALTER TRIGGER - BEFORE INSERT -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# CREATE TRIGGER -# SHOW TRIGGER -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.trigger.alter.alter_trigger_04 + +""" +ID: trigger.alter-04 +TITLE: ALTER TRIGGER - BEFORE INSERT +DESCRIPTION: +FBTEST: functional.trigger.alter.04 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None - -substitutions_1 = [('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')] - -init_script_1 = """CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT unq UNIQUE, +init_script = """CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT unq UNIQUE, text VARCHAR(32)); SET TERM ^; CREATE TRIGGER tg FOR test AFTER UPDATE @@ -30,25 +18,27 @@ AS BEGIN END ^ SET TERM ;^ -commit;""" +commit; +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ALTER TRIGGER tg BEFORE INSERT; -SHOW TRIGGER tg;""" +test_script = """ALTER TRIGGER tg BEFORE INSERT; +SHOW TRIGGER tg; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')]) -expected_stdout_1 = """Triggers on Table TEST: +expected_stdout = """Triggers on Table TEST: TG, Sequence: 0, Type: BEFORE INSERT, Active AS BEGIN END -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++""" - -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/alter/test_05.py b/tests/functional/trigger/alter/test_05.py index dba23872..392fe948 100644 --- a/tests/functional/trigger/alter/test_05.py +++ b/tests/functional/trigger/alter/test_05.py @@ -1,28 +1,16 @@ #coding:utf-8 -# -# id: functional.trigger.alter.05 -# title: ALTER TRIGGER - BEFORE UPDATE -# decription: ALTER TRIGGER - BEFORE UPDATE -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# CREATE TRIGGER -# SHOW TRIGGER -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.trigger.alter.alter_trigger_05 + +""" +ID: trigger.alter-05 +TITLE: ALTER TRIGGER - BEFORE UPDATE +DESCRIPTION: +FBTEST: functional.trigger.alter.05 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None - -substitutions_1 = [('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')] - -init_script_1 = """CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT unq UNIQUE, +init_script = """CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT unq UNIQUE, text VARCHAR(32)); SET TERM ^; CREATE TRIGGER tg FOR test AFTER INSERT @@ -30,25 +18,27 @@ AS BEGIN END ^ SET TERM ;^ -commit;""" +commit; +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ALTER TRIGGER tg BEFORE UPDATE; -SHOW TRIGGER tg;""" +test_script = """ALTER TRIGGER tg BEFORE UPDATE; +SHOW TRIGGER tg; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')]) -expected_stdout_1 = """Triggers on Table TEST: +expected_stdout = """Triggers on Table TEST: TG, Sequence: 0, Type: BEFORE UPDATE, Active AS BEGIN END -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++""" - -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/alter/test_06.py b/tests/functional/trigger/alter/test_06.py index 84943c81..a13c1b65 100644 --- a/tests/functional/trigger/alter/test_06.py +++ b/tests/functional/trigger/alter/test_06.py @@ -1,28 +1,16 @@ #coding:utf-8 -# -# id: functional.trigger.alter.06 -# title: ALTER TRIGGER - AFTER DELETE -# decription: ALTER TRIGGER - AFTER DELETE -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# CREATE TRIGGER -# SHOW TRIGGER -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.trigger.alter.alter_trigger_06 + +""" +ID: trigger.alter-06 +TITLE: ALTER TRIGGER - AFTER DELETE +DESCRIPTION: +FBTEST: functional.trigger.alter.06 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None - -substitutions_1 = [('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')] - -init_script_1 = """ +init_script = """ CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT unq UNIQUE, text VARCHAR(32)); SET TERM ^; @@ -32,18 +20,19 @@ init_script_1 = """ END ^ SET TERM ;^ commit; - """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +""" -test_script_1 = """ +db = db_factory(init=init_script) + +test_script = """ ALTER TRIGGER tg AFTER DELETE; SHOW TRIGGER tg; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ Triggers on Table TEST: TG, Sequence: 0, Type: AFTER DELETE, Active AS @@ -51,9 +40,8 @@ expected_stdout_1 = """ END """ -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/alter/test_07.py b/tests/functional/trigger/alter/test_07.py index 23b73e6d..b43aea8a 100644 --- a/tests/functional/trigger/alter/test_07.py +++ b/tests/functional/trigger/alter/test_07.py @@ -1,36 +1,17 @@ #coding:utf-8 -# -# id: functional.trigger.alter.07 -# title: ALTER TRIGGER - AFTER INSERT -# decription: -# ALTER TRIGGER - AFTER INSERT -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# CREATE TRIGGER -# SHOW TRIGGER -# -# Checked on: -# 2.5.9.27115: OK, 0.484s. -# 3.0.4.33021: OK, 1.000s. -# 4.0.0.1143: OK, 2.203s. -# NB: phrase 'attempted update of read-only column' contains name of table and column ('TEST.ID') on 4.0.x -# -# tracker_id: -# min_versions: [] -# versions: 2.5.0, 4.0.0 -# qmid: functional.trigger.alter.alter_trigger_07 + +""" +ID: trigger.alter-07 +TITLE: ALTER TRIGGER - AFTER INSERT +DESCRIPTION: + NB: phrase 'attempted update of read-only column' contains name of table and column ('TEST.ID') on 4.0.x +FBTEST: functional.trigger.alter.07 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5.0 -# resources: None - -substitutions_1 = [('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')] - -init_script_1 = """ +init_script = """ CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT unq UNIQUE, text VARCHAR(32)); SET TERM ^; CREATE TRIGGER tg FOR test BEFORE UPDATE @@ -40,16 +21,19 @@ init_script_1 = """ END ^ SET TERM ;^ commit; - """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +""" -test_script_1 = """ +db = db_factory(init=init_script) + +test_script = """ ALTER TRIGGER tg AFTER INSERT; SHOW TRIGGER tg; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')]) + +# version: 2.5.0 expected_stdout_1 = """ Triggers on Table TEST: @@ -60,45 +44,21 @@ expected_stdout_1 = """ END +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ """ + expected_stderr_1 = """ Statement failed, SQLSTATE = 42000 attempted update of read-only column """ -@pytest.mark.version('>=2.5.0,<4.0.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout +@pytest.mark.version('>=3.0,<4.0.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout_1 + act.expected_stderr = expected_stderr_1 + act.execute() + assert (act.clean_stdout == act.clean_expected_stdout and + act.clean_stderr == act.clean_expected_stderr) # version: 4.0.0 -# resources: None - -substitutions_2 = [('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')] - -init_script_2 = """ - CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT unq UNIQUE, text VARCHAR(32)); - SET TERM ^; - CREATE TRIGGER tg FOR test BEFORE UPDATE - AS - BEGIN - new.id=1; - END ^ - SET TERM ;^ - commit; - """ - -db_2 = db_factory(sql_dialect=3, init=init_script_2) - -test_script_2 = """ - ALTER TRIGGER tg AFTER INSERT; - SHOW TRIGGER tg; -""" - -act_2 = isql_act('db_2', test_script_2, substitutions=substitutions_2) expected_stdout_2 = """ Triggers on Table TEST: @@ -109,17 +69,16 @@ expected_stdout_2 = """ END +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ """ + expected_stderr_2 = """ Statement failed, SQLSTATE = 42000 attempted update of read-only column TEST.ID """ @pytest.mark.version('>=4.0.0') -def test_2(act_2: Action): - act_2.expected_stdout = expected_stdout_2 - act_2.expected_stderr = expected_stderr_2 - act_2.execute() - assert act_2.clean_stderr == act_2.clean_expected_stderr - - assert act_2.clean_stdout == act_2.clean_expected_stdout - +def test_2(act: Action): + act.expected_stdout = expected_stdout_2 + act.expected_stderr = expected_stderr_2 + act.execute() + assert (act.clean_stdout == act.clean_expected_stdout and + act.clean_stderr == act.clean_expected_stderr) diff --git a/tests/functional/trigger/alter/test_08.py b/tests/functional/trigger/alter/test_08.py index 4a3fa7f9..27ceadfd 100644 --- a/tests/functional/trigger/alter/test_08.py +++ b/tests/functional/trigger/alter/test_08.py @@ -1,28 +1,16 @@ #coding:utf-8 -# -# id: functional.trigger.alter.08 -# title: ALTER TRIGGER - POSITION -# decription: ALTER TRIGGER - POSITION -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# CREATE TRIGGER -# SHOW TRIGGER -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.trigger.alter.alter_trigger_08 + +""" +ID: trigger.alter-08 +TITLE: ALTER TRIGGER - POSITION +DESCRIPTION: +FBTEST: functional.trigger.alter.08 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')] - -init_script_1 = """ +init_script = """ CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT unq UNIQUE, text VARCHAR(32)); SET TERM ^; CREATE TRIGGER tg FOR test BEFORE UPDATE @@ -32,18 +20,19 @@ init_script_1 = """ END ^ SET TERM ;^ commit; - """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +""" -test_script_1 = """ +db = db_factory(init=init_script) + +test_script = """ ALTER TRIGGER tg POSITION 20; SHOW TRIGGER tg; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ Triggers on Table TEST: TG, Sequence: 20, Type: BEFORE UPDATE, Active AS @@ -52,9 +41,8 @@ expected_stdout_1 = """ END """ -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/alter/test_09.py b/tests/functional/trigger/alter/test_09.py index 67e5c18a..edf63563 100644 --- a/tests/functional/trigger/alter/test_09.py +++ b/tests/functional/trigger/alter/test_09.py @@ -1,30 +1,17 @@ #coding:utf-8 -# -# id: functional.trigger.alter.09 -# title: ALTER TRIGGER - POSITION -# decription: ALTER TRIGGER - POSITION -# Test by checking trigger seqeunce -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# CREATE TRIGGER -# INSERT -# Basic SELECT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.trigger.alter.alter_trigger_09 + +""" +ID: trigger.alter-09 +TITLE: ALTER TRIGGER - POSITION +DESCRIPTION: + Test by checking trigger seqeunce +FBTEST: functional.trigger.alter.09 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT unq UNIQUE, +init_script = """CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT unq UNIQUE, text VARCHAR(32)); SET TERM ^; CREATE TRIGGER tg1 FOR test BEFORE INSERT POSITION 1 @@ -39,25 +26,27 @@ BEGIN new.text=new.text||'tg2 '; END ^ SET TERM ;^ -commit;""" +commit; +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ALTER TRIGGER tg2 POSITION 0; +test_script = """ALTER TRIGGER tg2 POSITION 0; INSERT INTO test VALUES(0,''); COMMIT; -SELECT text FROM test;""" +SELECT text FROM test; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEXT +expected_stdout = """TEXT ================================ -tg2 tg1""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +tg2 tg1 +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/alter/test_10.py b/tests/functional/trigger/alter/test_10.py index 916248f5..1bea93ce 100644 --- a/tests/functional/trigger/alter/test_10.py +++ b/tests/functional/trigger/alter/test_10.py @@ -1,28 +1,16 @@ #coding:utf-8 -# -# id: functional.trigger.alter.10 -# title: ALTER TRIGGER - AS -# decription: ALTER TRIGGER - AS -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# CREATE TRIGGER -# SHOW TRIGGER -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.trigger.alter.alter_trigger_10 + +""" +ID: trigger.alter-10 +TITLE: ALTER TRIGGER - AS +DESCRIPTION: +FBTEST: functional.trigger.alter.10 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')] - -init_script_1 = """CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT unq UNIQUE, +init_script = """CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT unq UNIQUE, text VARCHAR(32)); SET TERM ^; CREATE TRIGGER tg FOR test BEFORE INSERT POSITION 1 @@ -31,32 +19,34 @@ BEGIN new.text=new.text||'tg1 '; END ^ SET TERM ;^ -commit;""" +commit; +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """SET TERM ^; +test_script = """SET TERM ^; ALTER TRIGGER tg AS BEGIN new.text='altered trigger'; END ^ SET TERM ;^ -SHOW TRIGGER tg;""" +SHOW TRIGGER tg; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')]) -expected_stdout_1 = """Triggers on Table TEST: +expected_stdout = """Triggers on Table TEST: TG, Sequence: 1, Type: BEFORE INSERT, Active AS BEGIN new.text='altered trigger'; END -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/alter/test_11.py b/tests/functional/trigger/alter/test_11.py index 34d9b351..b0751390 100644 --- a/tests/functional/trigger/alter/test_11.py +++ b/tests/functional/trigger/alter/test_11.py @@ -1,29 +1,16 @@ #coding:utf-8 -# -# id: functional.trigger.alter.11 -# title: ALTER TRIGGER - AS -# decription: ALTER TRIGGER - AS -# Test by insert data -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# CREATE TRIGGER -# SHOW TRIGGER -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.trigger.alter.alter_trigger_11 + +""" +ID: trigger.alter-11 +TITLE: ALTER TRIGGER - AS +DESCRIPTION: +FBTEST: functional.trigger.alter.11 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT unq UNIQUE, +init_script = """CREATE TABLE test( id INTEGER NOT NULL CONSTRAINT unq UNIQUE, text VARCHAR(32)); SET TERM ^; CREATE TRIGGER tg FOR test BEFORE INSERT POSITION 1 @@ -32,11 +19,12 @@ BEGIN new.text=new.text||'tg1 '; END ^ SET TERM ;^ -commit;""" +commit; +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """SET TERM ^; +test_script = """SET TERM ^; ALTER TRIGGER tg AS BEGIN new.text='altered trigger'; @@ -44,18 +32,19 @@ END ^ SET TERM ;^ INSERT INTO test VALUES(0,null); -SELECT text FROM test;""" +SELECT text FROM test; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """TEXT +expected_stdout = """TEXT ================================ -altered trigger""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +altered trigger +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/create/test_01.py b/tests/functional/trigger/create/test_01.py index 32ea3940..93b9ec26 100644 --- a/tests/functional/trigger/create/test_01.py +++ b/tests/functional/trigger/create/test_01.py @@ -1,52 +1,44 @@ #coding:utf-8 -# -# id: functional.trigger.create.01 -# title: CREATE TRIGGER -# decription: CREATE TRIGGER -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.trigger.create.create_trigger_01 + +""" +ID: trigger.create-01 +TITLE: CREATE TRIGGER +DESCRIPTION: +FBTEST: functional.trigger.create.01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +init_script = """CREATE TABLE tb(id INT); +commit; +""" -substitutions_1 = [('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')] +db = db_factory(init=init_script) -init_script_1 = """CREATE TABLE tb(id INT); -commit;""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SET TERM ^; +test_script = """SET TERM ^; /* Tested command: */ CREATE TRIGGER test FOR tb BEFORE INSERT AS BEGIN new.id=1; END^ SET TERM ;^ -SHOW TRIGGER test;""" +SHOW TRIGGER test; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')]) -expected_stdout_1 = """Triggers on Table TB: +expected_stdout = """Triggers on Table TB: TEST, Sequence: 0, Type: BEFORE INSERT, Active AS BEGIN new.id=1; END -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/create/test_02.py b/tests/functional/trigger/create/test_02.py index dc775321..d8ce6e94 100644 --- a/tests/functional/trigger/create/test_02.py +++ b/tests/functional/trigger/create/test_02.py @@ -1,49 +1,41 @@ #coding:utf-8 -# -# id: functional.trigger.create.02 -# title: CREATE TRIGGER AFTER INSERT -# decription: CREATE TRIGGER AFTER INSERT -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.trigger.create.create_trigger_02 + +""" +ID: trigger.create-02 +TITLE: CREATE TRIGGER AFTER INSERT +DESCRIPTION: +FBTEST: functional.trigger.create.02 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None +init_script = """CREATE TABLE tb(id INT); +commit; +""" -substitutions_1 = [('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')] +db = db_factory(init=init_script) -init_script_1 = """CREATE TABLE tb(id INT); -commit;""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SET TERM ^; +test_script = """SET TERM ^; CREATE TRIGGER test FOR tb AFTER INSERT AS BEGIN END^ SET TERM ;^ -SHOW TRIGGER test;""" +SHOW TRIGGER test; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')]) -expected_stdout_1 = """Triggers on Table TB: +expected_stdout = """Triggers on Table TB: TEST, Sequence: 0, Type: AFTER INSERT, Active AS BEGIN END -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++""" - -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/create/test_03.py b/tests/functional/trigger/create/test_03.py index 8f4a5c8f..afde6fe0 100644 --- a/tests/functional/trigger/create/test_03.py +++ b/tests/functional/trigger/create/test_03.py @@ -1,50 +1,42 @@ #coding:utf-8 -# -# id: functional.trigger.create.03 -# title: CREATE TRIGGER BEFORE UPDATE -# decription: CREATE TRIGGER BEFORE UPDATE -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.trigger.create.create_trigger_03 + +""" +ID: trigger.create-03 +TITLE: CREATE TRIGGER BEFORE UPDATE +DESCRIPTION: +FBTEST: functional.trigger.create.03 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +init_script = """CREATE TABLE tb(id INT); +""" -substitutions_1 = [('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')] +db = db_factory(init=init_script) -init_script_1 = """CREATE TABLE tb(id INT);""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SET TERM ^; +test_script = """SET TERM ^; CREATE TRIGGER test FOR tb BEFORE UPDATE AS BEGIN new.id=1; END^ SET TERM ;^ -SHOW TRIGGER test;""" +SHOW TRIGGER test; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')]) -expected_stdout_1 = """Triggers on Table TB: +expected_stdout = """Triggers on Table TB: TEST, Sequence: 0, Type: BEFORE UPDATE, Active AS BEGIN new.id=1; END -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/create/test_04.py b/tests/functional/trigger/create/test_04.py index 9624e7d6..ecf3dfcc 100644 --- a/tests/functional/trigger/create/test_04.py +++ b/tests/functional/trigger/create/test_04.py @@ -1,48 +1,40 @@ #coding:utf-8 -# -# id: functional.trigger.create.04 -# title: CREATE TRIGGER AFTER UPDATE -# decription: CREATE TRIGGER AFTER UPDATE -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.trigger.create.create_trigger_04 + +""" +ID: trigger.create-04 +TITLE: CREATE TRIGGER AFTER UPDATE +DESCRIPTION: +FBTEST: functional.trigger.create.04 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None +init_script = """CREATE TABLE tb(id INT); +""" -substitutions_1 = [('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')] +db = db_factory(init=init_script) -init_script_1 = """CREATE TABLE tb(id INT);""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SET TERM ^; +test_script = """SET TERM ^; CREATE TRIGGER test FOR tb AFTER UPDATE AS BEGIN END^ SET TERM ;^ -SHOW TRIGGER test;""" +SHOW TRIGGER test; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')]) -expected_stdout_1 = """Triggers on Table TB: +expected_stdout = """Triggers on Table TB: TEST, Sequence: 0, Type: AFTER UPDATE, Active AS BEGIN END -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++""" - -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/create/test_05.py b/tests/functional/trigger/create/test_05.py index 8eac494f..b4d18ff1 100644 --- a/tests/functional/trigger/create/test_05.py +++ b/tests/functional/trigger/create/test_05.py @@ -1,49 +1,41 @@ #coding:utf-8 -# -# id: functional.trigger.create.05 -# title: CREATE TRIGGER BEFORE DELETE -# decription: CREATE TRIGGER BEFORE DELETE -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.trigger.create.create_trigger_05 + +""" +ID: trigger.create-05 +TITLE: CREATE TRIGGER BEFORE DELETE +DESCRIPTION: +FBTEST: functional.trigger.create.05 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None +init_script = """CREATE TABLE tb(id INT); +commit; +""" -substitutions_1 = [('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')] +db = db_factory(init=init_script) -init_script_1 = """CREATE TABLE tb(id INT); -commit;""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SET TERM ^; +test_script = """SET TERM ^; CREATE TRIGGER test FOR tb BEFORE DELETE AS BEGIN END^ SET TERM ;^ -SHOW TRIGGER test;""" +SHOW TRIGGER test; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')]) -expected_stdout_1 = """Triggers on Table TB: +expected_stdout = """Triggers on Table TB: TEST, Sequence: 0, Type: BEFORE DELETE, Active AS BEGIN END -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++""" - -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/create/test_06.py b/tests/functional/trigger/create/test_06.py index dd4edf72..09db8ea2 100644 --- a/tests/functional/trigger/create/test_06.py +++ b/tests/functional/trigger/create/test_06.py @@ -1,48 +1,40 @@ #coding:utf-8 -# -# id: functional.trigger.create.06 -# title: CREATE TRIGGER AFTER DELETE -# decription: CREATE TRIGGER AFTER DELETE -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.trigger.create.create_trigger_06 + +""" +ID: trigger.create-06 +TITLE: CREATE TRIGGER AFTER DELETE +DESCRIPTION: +FBTEST: functional.trigger.create.06 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None +init_script = """CREATE TABLE tb(id INT); +""" -substitutions_1 = [('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')] +db = db_factory(init=init_script) -init_script_1 = """CREATE TABLE tb(id INT);""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SET TERM ^; +test_script = """SET TERM ^; CREATE TRIGGER test FOR tb AFTER DELETE AS BEGIN END^ SET TERM ;^ -SHOW TRIGGER test;""" +SHOW TRIGGER test; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')]) -expected_stdout_1 = """Triggers on Table TB: +expected_stdout = """Triggers on Table TB: TEST, Sequence: 0, Type: AFTER DELETE, Active AS BEGIN END -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++""" - -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/create/test_07.py b/tests/functional/trigger/create/test_07.py index 761c7985..7c2a80a9 100644 --- a/tests/functional/trigger/create/test_07.py +++ b/tests/functional/trigger/create/test_07.py @@ -1,52 +1,43 @@ #coding:utf-8 -# -# id: functional.trigger.create.07 -# title: CREATE TRIGGER INACTIVE AFTER DELETE -# decription: CREATE TRIGGER INACTIVE AFTER DELETE -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.trigger.create.create_trigger_07 + +""" +ID: trigger.create-07 +TITLE: CREATE TRIGGER INACTIVE AFTER DELETE +DESCRIPTION: +FBTEST: functional.trigger.create.07 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None - -substitutions_1 = [('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')] - -init_script_1 = """ +init_script = """ CREATE TABLE tb(id INT); commit; - """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +""" -test_script_1 = """ +db = db_factory(init=init_script) + +test_script = """ SET TERM ^; CREATE TRIGGER test FOR tb INACTIVE AFTER DELETE AS BEGIN END^ SET TERM ;^ -SHOW TRIGGER test;""" +SHOW TRIGGER test; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')]) -expected_stdout_1 = """Triggers on Table TB: +expected_stdout = """Triggers on Table TB: TEST, Sequence: 0, Type: AFTER DELETE, Inactive AS BEGIN END """ -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/create/test_08.py b/tests/functional/trigger/create/test_08.py index 57aca36d..2b82a280 100644 --- a/tests/functional/trigger/create/test_08.py +++ b/tests/functional/trigger/create/test_08.py @@ -1,48 +1,40 @@ #coding:utf-8 -# -# id: functional.trigger.create.08 -# title: CREATE TRIGGER AFTER DELETE POSITION 12 -# decription: CREATE TRIGGER AFTER DELETE POSITION 12 -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 2.0 -# qmid: functional.trigger.create.create_trigger_08 + +""" +ID: trigger.create-08 +TITLE: CREATE TRIGGER AFTER DELETE POSITION 12 +DESCRIPTION: +FBTEST: functional.trigger.create.08 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.0 -# resources: None +init_script = """CREATE TABLE tb(id INT); +""" -substitutions_1 = [('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')] +db = db_factory(init=init_script) -init_script_1 = """CREATE TABLE tb(id INT);""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SET TERM ^; +test_script = """SET TERM ^; CREATE TRIGGER test FOR tb AFTER DELETE POSITION 12 AS BEGIN END^ SET TERM ;^ -SHOW TRIGGER test;""" +SHOW TRIGGER test; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')]) -expected_stdout_1 = """Triggers on Table TB: +expected_stdout = """Triggers on Table TB: TEST, Sequence: 12, Type: AFTER DELETE, Active AS BEGIN END -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++""" - -@pytest.mark.version('>=2.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/create/test_09.py b/tests/functional/trigger/create/test_09.py index 0ccfed4f..87c2a6dc 100644 --- a/tests/functional/trigger/create/test_09.py +++ b/tests/functional/trigger/create/test_09.py @@ -1,30 +1,21 @@ #coding:utf-8 -# -# id: functional.trigger.create.09 -# title: CREATE TRIGGER BEFORE INSERT DECLARE VARIABLE -# decription: CREATE TRIGGER BEFORE INSERT DECLARE VARIABLE -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.trigger.create.create_trigger_09 + +""" +ID: trigger.create-09 +TITLE: CREATE TRIGGER BEFORE INSERT DECLARE VARIABLE +DESCRIPTION: +FBTEST: functional.trigger.create.09 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +init_script = """CREATE TABLE tb(id INT); +""" -substitutions_1 = [('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')] +db = db_factory(init=init_script) -init_script_1 = """CREATE TABLE tb(id INT);""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """SET TERM ^; +test_script = """SET TERM ^; CREATE TRIGGER test FOR tb BEFORE INSERT AS DECLARE VARIABLE v1 SMALLINT; DECLARE VARIABLE v2 INTEGER; @@ -51,11 +42,12 @@ BEGIN new.id=1; END^ SET TERM ;^ -SHOW TRIGGER test;""" +SHOW TRIGGER test; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')]) -expected_stdout_1 = """Triggers on Table TB: +expected_stdout = """Triggers on Table TB: TEST, Sequence: 0, Type: BEFORE INSERT, Active AS DECLARE VARIABLE v1 SMALLINT; @@ -82,11 +74,11 @@ DECLARE VARIABLE v21 NATIONAL CHAR VARYING(30); BEGIN new.id=1; END -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/create/test_10.py b/tests/functional/trigger/create/test_10.py index d17ee67f..2fa12b3a 100644 --- a/tests/functional/trigger/create/test_10.py +++ b/tests/functional/trigger/create/test_10.py @@ -1,36 +1,25 @@ #coding:utf-8 -# -# id: functional.trigger.create.10 -# title: CREATE TRIGGER BEFORE INSERT DECLARE VARIABLE, block stataments -# decription: CREATE TRIGGER BEFORE INSERT DECLARE VARIABLE, block stataments -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# CREATE EXCEPTION -# CREATE PROCEDURE -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.trigger.create.create_trigger_10 + +""" +ID: trigger.create-10 +TITLE: CREATE TRIGGER BEFORE INSERT DECLARE VARIABLE, block stataments +DESCRIPTION: +FBTEST: functional.trigger.create.10 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')] - -init_script_1 = """ +init_script = """ CREATE TABLE tb(id INT); CREATE EXCEPTION test 'test exception'; commit; - """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +""" -test_script_1 = """ +db = db_factory(init=init_script) + +test_script = """ SET TERM ^; CREATE PROCEDURE test (id INT) RETURNS(d FLOAT)AS BEGIN @@ -85,9 +74,9 @@ test_script_1 = """ SHOW TRIGGER test; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ Triggers on Table TB: TEST, Sequence: 0, Type: BEFORE INSERT, Active AS @@ -136,9 +125,8 @@ expected_stdout_1 = """ END """ -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/create/test_17.py b/tests/functional/trigger/create/test_17.py index df894b73..f6eacfb1 100644 --- a/tests/functional/trigger/create/test_17.py +++ b/tests/functional/trigger/create/test_17.py @@ -1,29 +1,24 @@ #coding:utf-8 -# -# id: functional.trigger.create.17 -# title: CREATE TRIGGER SQL2003 -# decription: CREATE TRIGGER SQL2003 -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.trigger.create.create_trigger_17 + +""" +ID: trigger.create-11 +TITLE: CREATE TRIGGER SQL2003 +DESCRIPTION: +FBTEST: functional.trigger.create.17 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None - -substitutions_1 = [('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')] - -init_script_1 = """ +init_script = """ CREATE TABLE tb(id INT); commit; - """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +""" -test_script_1 = """ +db = db_factory(init=init_script) + +test_script = """ SET TERM ^; /* Tested command: */ CREATE TRIGGER test BEFORE INSERT @@ -35,9 +30,9 @@ test_script_1 = """ SHOW TRIGGER test; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('\\+.*', ''), ('\\=.*', ''), ('Trigger text.*', '')]) -expected_stdout_1 = """ +expected_stdout = """ Triggers on Table TB: TEST, Sequence: 0, Type: BEFORE INSERT, Active AS @@ -46,9 +41,8 @@ expected_stdout_1 = """ END """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/database/test_connect_01.py b/tests/functional/trigger/database/test_connect_01.py index 5f8cd5f0..2150c5fa 100644 --- a/tests/functional/trigger/database/test_connect_01.py +++ b/tests/functional/trigger/database/test_connect_01.py @@ -1,22 +1,17 @@ #coding:utf-8 -# -# id: functional.trigger.database.connect_01 -# title: Trigger on database connect. See also CORE-745 -# decription: This tests normal operation of database CONNECT trigger. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.trigger.database.connect_01 + +""" +ID: trigger.database.connect-01 +TITLE: Trigger on database connect. See also CORE-745 +DESCRIPTION: + This tests normal operation of database CONNECT trigger. +FBTEST: functional.trigger.database.connect_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None - -substitutions_1 = [('[ \t]+', ' ')] - -init_script_1 = """ +init_script = """ create table LOG (ID integer, MSG varchar(100)); create generator LOGID; set term ^; @@ -36,25 +31,25 @@ init_script_1 = """ ^ set term ;^ commit; - """ + +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """ +test_script = """ set list on; select * from LOG; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ ID 1 MSG Connect """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/database/test_connect_02.py b/tests/functional/trigger/database/test_connect_02.py index 3ae21332..9967795f 100644 --- a/tests/functional/trigger/database/test_connect_02.py +++ b/tests/functional/trigger/database/test_connect_02.py @@ -1,22 +1,18 @@ #coding:utf-8 -# -# id: functional.trigger.database.connect_02 -# title: Error handling in trigger on database connect -# decription: This test verifies the proper error handling. Uncaught exceptions in trigger roll back the transaction, disconnect the attachment and are returned to the client. -# tracker_id: CORE-745 -# min_versions: [] -# versions: 2.1 -# qmid: functional.trigger.database.connect_02 + +""" +ID: trigger.database.connect-02 +TITLE: Error handling in trigger on database connect +DESCRIPTION: + This test verifies the proper error handling. Uncaught exceptions in trigger roll back + the transaction, disconnect the attachment and are returned to the client. +FBTEST: functional.trigger.database.connect_02 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None - -substitutions_1 = [('line:.*', '')] - -init_script_1 = """create table LOG (ID integer, MSG varchar(100)); +init_script = """create table LOG (ID integer, MSG varchar(100)); create generator LOGID; create exception CONNECTERROR 'Exception in ON CONNECT trigger'; create role TEST; @@ -42,29 +38,29 @@ set term ;^ commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# try: -# con = kdb.connect(dsn=dsn.encode(),user=user_name.encode(),password=user_password.encode(),role='TEST') -# except Exception,e: -# for msg in e: print (msg) -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) +act = python_act('db', substitutions=[('line:.*', '')]) -expected_stdout_1 = """Error while connecting to database: +expected_stdout = """Error while connecting to database: - SQLCODE: -836 - exception 1 - CONNECTERROR - Exception in ON CONNECT trigger - At trigger 'ONCONNECT' line: 5, col: 29 -836 -335544517""" - -@pytest.mark.version('>=2.1') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") +335544517 +""" +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=3.0') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# try: +# con = kdb.connect(dsn=dsn.encode(),user=user_name.encode(),password=user_password.encode(),role='TEST') +# except Exception,e: +# for msg in e: print (msg) +# ----------------------------------- diff --git a/tests/functional/trigger/database/test_connect_03.py b/tests/functional/trigger/database/test_connect_03.py index 56ce3ea6..7a5c354c 100644 --- a/tests/functional/trigger/database/test_connect_03.py +++ b/tests/functional/trigger/database/test_connect_03.py @@ -1,22 +1,19 @@ #coding:utf-8 -# -# id: functional.trigger.database.connect_03 -# title: Multiple triggers on database connect. See also CORE-745 -# decription: This tests normal operation of database CONNECT triggers when there are more of them. -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: functional.trigger.database.connect_03 + +""" +ID: trigger.database.connect-03 +ISSUE: 1120 +TITLE: Multiple triggers on database connect +DESCRIPTION: + This tests normal operation of database CONNECT triggers when there are more of them. +FBTEST: functional.trigger.database.connect_03 +JIRA: CORE-745 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None - -substitutions_1 = [('[ \t]+', ' ')] - -init_script_1 = """ +init_script = """ create table LOG (ID integer, MSG varchar(100)); create generator LOGID; set term ^; @@ -42,27 +39,27 @@ init_script_1 = """ set term ;^ commit; - """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +""" -test_script_1 = """ +db = db_factory(init=init_script) + +test_script = """ set list on; select * from LOG; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ ID 1 MSG Connect T1 ID 2 - MSG Connect T2 + MSG Connect T2 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/database/test_connect_04.py b/tests/functional/trigger/database/test_connect_04.py index 17e0b179..84c3c045 100644 --- a/tests/functional/trigger/database/test_connect_04.py +++ b/tests/functional/trigger/database/test_connect_04.py @@ -1,22 +1,21 @@ #coding:utf-8 -# -# id: functional.trigger.database.connect_04 -# title: Error handling in trigger on database connect - multiple triggers. -# decription: This test verifies the proper error handling. Uncaught exceptions in trigger roll back the transaction, disconnect the attachment and are returned to the client. Because this test is implemented in Python, our test support class creates a database connection for our test code (db_conn) that attach to the database without role specification. We verify that this connection was properly logged for convenience. -# tracker_id: CORE-745 -# min_versions: [] -# versions: 2.1 -# qmid: functional.trigger.database.connect_04 + +""" +ID: trigger.database.connect-04 +TITLE: Error handling in trigger on database connect - multiple triggers. +DESCRIPTION: + This test verifies the proper error handling. Uncaught exceptions in trigger roll back + the transaction, disconnect the attachment and are returned to the client. Because this + test is implemented in Python, our test support class creates a database connection for + our test code (db_conn) that attach to the database without role specification. We verify + that this connection was properly logged for convenience. +FBTEST: functional.trigger.database.connect_04 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None - -substitutions_1 = [('line:.*', '')] - -init_script_1 = """create table LOG (ID integer, MSG varchar(100)); +init_script = """create table LOG (ID integer, MSG varchar(100)); create generator LOGID; create exception CONNECTERROR 'Exception in ON CONNECT trigger'; create role TEST; @@ -54,22 +53,11 @@ set term ;^ commit; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -# test_script_1 -#--- -# try: -# con = kdb.connect(dsn=dsn.encode(),user=user_name.encode(),password=user_password.encode(),role='TEST') -# except Exception,e: -# for msg in e: print (msg) -# -# c = db_conn.cursor() -# c.execute('select * from LOG') -# printData(c) -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) +act = python_act('db', substitutions=[('line:.*', '')]) -expected_stdout_1 = """Error while connecting to database: +expected_stdout = """Error while connecting to database: - SQLCODE: -836 - exception 1 - CONNECTERROR @@ -81,11 +69,22 @@ ID MSG ----------- ---------------------------------------------------------------------------------------------------- 1 Connect T1 as NONE 2 Connect T2 as NONE -3 Connect T3 as NONE""" - -@pytest.mark.version('>=2.1') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") +3 Connect T3 as NONE +""" +@pytest.mark.skip('FIXME: Not IMPLEMENTED') +@pytest.mark.version('>=3.0') +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# try: +# con = kdb.connect(dsn=dsn.encode(),user=user_name.encode(),password=user_password.encode(),role='TEST') +# except Exception,e: +# for msg in e: print (msg) +# +# c = db_conn.cursor() +# c.execute('select * from LOG') +# printData(c) +# ----------------------------------- diff --git a/tests/functional/trigger/database/test_disconnect_01.py b/tests/functional/trigger/database/test_disconnect_01.py index daee0e79..8aabf557 100644 --- a/tests/functional/trigger/database/test_disconnect_01.py +++ b/tests/functional/trigger/database/test_disconnect_01.py @@ -1,31 +1,22 @@ #coding:utf-8 -# -# id: functional.trigger.database.disconnect_01 -# title: Trigger on database disconnect: check that exception that raised when trigger fires is written to firebird.log -# decription: -# Discussed with Alex, 16.12.2020 functionality that was not specified in the documentation: -# exception that raises in a trigger on DISCONNECT reflects in the firebird.log. -# -# Test creates trigger on disconnect and put in its body statement which always will fail: 1/0. -# Then we get content of firebird.log before disconnect and after. -# Finally we compare these logs and search in the difference lines about error message. -# -# Checked on 4.0.0.2303 SS/CS. -# -# tracker_id: -# min_versions: [] -# versions: 4.0 -# qmid: + +""" +ID: trigger.database.disconnect +TITLE: Trigger on database disconnect: check that exception that raised when trigger fires is written to firebird.log +DESCRIPTION: + Discussed with Alex, 16.12.2020 functionality that was not specified in the documentation: + exception that raises in a trigger on DISCONNECT reflects in the firebird.log. + + Test creates trigger on disconnect and put in its body statement which always will fail: 1/0. + Then we get content of firebird.log before disconnect and after. + Finally we compare these logs and search in the difference lines about error message. +FBTEST: functional.trigger.database.disconnect_01 +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None - -substitutions_1 = [('[ \t]+', ' ')] - -init_script_1 = """ +init_script = """ set term ^; create trigger trg_disconnect on disconnect as declare n int; @@ -35,118 +26,118 @@ init_script_1 = """ ^ set term ;^ commit; - """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +""" -# test_script_1 -#--- -# -# import os -# import subprocess -# import difflib -# import re -# import time -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #-------------------------------------------- -# -# def svc_get_fb_log( fb_home, f_fb_log ): -# -# global subprocess -# subprocess.call( [ context['fbsvcmgr_path'], -# "localhost:service_mgr", -# "action_get_fb_log" -# ], -# stdout=f_fb_log, stderr=subprocess.STDOUT -# ) -# return -# -# #-------------------------------------------- -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# f_init_log = open( os.path.join(context['temp_directory'],'tmp_fb_old.log'), 'w') -# subprocess.call( [ context['fbsvcmgr_path'],"localhost:service_mgr", "action_get_fb_log" ], stdout = f_init_log, stderr = subprocess.STDOUT) -# flush_and_close( f_init_log ) -# -# db_conn.close() # this leads to zero divide error in trg_disconnect which must be reflected in the firebird.log -# -# time.sleep(1) -# -# f_curr_log = open( os.path.join(context['temp_directory'],'tmp_fb_new.log'), 'w') -# subprocess.call( [ context['fbsvcmgr_path'],"localhost:service_mgr", "action_get_fb_log" ], stdout = f_curr_log, stderr = subprocess.STDOUT) -# flush_and_close( f_curr_log ) -# -# f_init_log=open(f_init_log.name, 'r') -# f_curr_log=open(f_curr_log.name, 'r') -# difftext = ''.join(difflib.unified_diff( -# f_init_log.readlines(), -# f_curr_log.readlines() -# )) -# flush_and_close( f_init_log ) -# flush_and_close( f_curr_log ) -# -# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_fb_diff.txt'), 'w') -# f_diff_txt.write(difftext) -# flush_and_close( f_diff_txt ) -# -# p = re.compile('\\+\\s?((Error at disconnect)|(arithmetic exception)|(Integer divide by zero)|(At trigger))') -# with open( f_diff_txt.name,'r') as f: -# for line in f: -# if line.startswith('+') and line.strip() != '+++' and p.search(line): -# print( line ) -# # print( 'DIFF in firebird.log: %(line)s' % locals() ) -# -# ############################### -# # Cleanup. -# time.sleep(1) -# cleanup( (f_init_log,f_curr_log,f_diff_txt) ) -# -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) +db = db_factory(init=init_script) -expected_stdout_1 = """ +act = python_act('db', substitutions=[('[ \t]+', ' ')]) + +expected_stdout = """ + Error at disconnect: + arithmetic exception, numeric overflow, or string truncation + Integer divide by zero. The code attempted to divide an integer value by an integer divisor of zero. + At trigger 'TRG_DISCONNECT' line: 4, col: 9 """ +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# +# import os +# import subprocess +# import difflib +# import re +# import time +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #-------------------------------------------- +# +# def svc_get_fb_log( fb_home, f_fb_log ): +# +# global subprocess +# subprocess.call( [ context['fbsvcmgr_path'], +# "localhost:service_mgr", +# "action_get_fb_log" +# ], +# stdout=f_fb_log, stderr=subprocess.STDOUT +# ) +# return +# +# #-------------------------------------------- +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# f_init_log = open( os.path.join(context['temp_directory'],'tmp_fb_old.log'), 'w') +# subprocess.call( [ context['fbsvcmgr_path'],"localhost:service_mgr", "action_get_fb_log" ], stdout = f_init_log, stderr = subprocess.STDOUT) +# flush_and_close( f_init_log ) +# +# db_conn.close() # this leads to zero divide error in trg_disconnect which must be reflected in the firebird.log +# +# time.sleep(1) +# +# f_curr_log = open( os.path.join(context['temp_directory'],'tmp_fb_new.log'), 'w') +# subprocess.call( [ context['fbsvcmgr_path'],"localhost:service_mgr", "action_get_fb_log" ], stdout = f_curr_log, stderr = subprocess.STDOUT) +# flush_and_close( f_curr_log ) +# +# f_init_log=open(f_init_log.name, 'r') +# f_curr_log=open(f_curr_log.name, 'r') +# difftext = ''.join(difflib.unified_diff( +# f_init_log.readlines(), +# f_curr_log.readlines() +# )) +# flush_and_close( f_init_log ) +# flush_and_close( f_curr_log ) +# +# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_fb_diff.txt'), 'w') +# f_diff_txt.write(difftext) +# flush_and_close( f_diff_txt ) +# +# p = re.compile('\\+\\s?((Error at disconnect)|(arithmetic exception)|(Integer divide by zero)|(At trigger))') +# with open( f_diff_txt.name,'r') as f: +# for line in f: +# if line.startswith('+') and line.strip() != '+++' and p.search(line): +# print( line ) +# # print( 'DIFF in firebird.log: %(line)s' % locals() ) +# +# ############################### +# # Cleanup. +# time.sleep(1) +# cleanup( (f_init_log,f_curr_log,f_diff_txt) ) +# +# ----------------------------------- diff --git a/tests/functional/trigger/database/test_transaction_start_multiple_with_normal_finish.py b/tests/functional/trigger/database/test_transaction_start_multiple_with_normal_finish.py index 6647c1ad..3a385cb6 100644 --- a/tests/functional/trigger/database/test_transaction_start_multiple_with_normal_finish.py +++ b/tests/functional/trigger/database/test_transaction_start_multiple_with_normal_finish.py @@ -1,43 +1,23 @@ #coding:utf-8 -# -# id: functional.trigger.database.transaction_start_multiple_with_normal_finish -# title: Multiple triggers on start transaction -# decription: -# This tests normal operation of database TRANSACTION START trigger when: -# 1) more than one such triggers are defined -# 2) NO exception raise within any trigger during its work, i.e. all of them shoudl finish Ok -# -# Triggers must work within the same Tx as "parent" statement (which launched this Tx). -# -# Results (23-05-2017): -# FB25Cs, build 2.5.8.27062: OK, 1.688ss. -# FB25SC, build 2.5.8.27062: OK, 0.469ss. -# fb25sS, build 2.5.8.27062: OK, 1.265ss. -# fb30Cs, build 3.0.3.32726: OK, 2.938ss. -# fb30SC, build 3.0.3.32726: OK, 2.250ss. -# FB30SS, build 3.0.3.32726: OK, 1.937ss. -# FB40CS, build 4.0.0.649: OK, 3.718ss. -# FB40SC, build 4.0.0.649: OK, 1.875ss. -# FB40SS, build 4.0.0.649: OK, 1.984ss. -# -# tracker_id: CORE-745 -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: + +""" +ID: trigger.database.transaction-start-multiple-with-normal-finish +TITLE: Multiple triggers on start transaction +DESCRIPTION: + This tests normal operation of database TRANSACTION START trigger when: + 1) more than one such triggers are defined + 2) NO exception raise within any trigger during its work, i.e. all of them shoudl finish Ok + + Triggers must work within the same Tx as "parent" statement (which launched this Tx). +FBTEST: functional.trigger.database.transaction_start_multiple_with_normal_finish +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set bail on; set list on; @@ -53,7 +33,7 @@ test_script_1 = """ declare i int = 1; declare ddl varchar(255); begin - + rdb$set_context('USER_SESSION','TRIGGERS_TO_CREATE', 5); -- <<< ---------------- HOW MANY TRIGGERS ARE DEFINED n = cast( rdb$get_context('USER_SESSION','TRIGGERS_TO_CREATE') as int); @@ -75,16 +55,16 @@ test_script_1 = """ commit; --select current_timestamp from rdb$database; - -- Following 'select' sttm leads to implicit Tx start ==> all db-level triggers 'on transaction start' will be fired + -- Following 'select' sttm leads to implicit Tx start ==> all db-level triggers 'on transaction start' will be fired -- before engine begin to execute this statement. - -- Table trg_log should contain rdb$get_context('USER_SESSION','TRIGGERS_TO_CREATE') records and they all had to be + -- Table trg_log should contain rdb$get_context('USER_SESSION','TRIGGERS_TO_CREATE') records and they all had to be -- created within current Tx: select iif( count(distinct t.id) = max(t.expected_count), - 'EXPECTED.', + 'EXPECTED.', 'WRONG: ' || count(distinct id) || ' instead of ' || max(t.expected_count) ) as triggers_fired_count - from + from ( select t.id, t.trg_tx, cast( rdb$get_context('USER_SESSION','TRIGGERS_TO_CREATE') as int) as expected_count from trg_log t @@ -103,11 +83,11 @@ test_script_1 = """ commit; -- Previous commit should save in trg_log two records that were inserted there by triggers: - select iif( count(distinct t.id) = max(expected_count), - 'EXPECTED.', + select iif( count(distinct t.id) = max(expected_count), + 'EXPECTED.', 'WRONG: ' || count(distinct id) || ' instead of ' || max(t.expected_count) ) as rows_after_commit - from + from ( select t.id, t.trg_tx, cast( rdb$get_context('USER_SESSION','TRIGGERS_TO_CREATE') as int) as expected_count from trg_log t @@ -115,9 +95,9 @@ test_script_1 = """ where trg_tx is distinct from current_transaction; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ TRIGGERS_FIRED_COUNT EXPECTED. ID 1 @@ -135,9 +115,8 @@ expected_stdout_1 = """ ROWS_AFTER_COMMIT EXPECTED. """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/database/test_transactioncommit_01.py b/tests/functional/trigger/database/test_transactioncommit_01.py index e532390f..0d1e81b1 100644 --- a/tests/functional/trigger/database/test_transactioncommit_01.py +++ b/tests/functional/trigger/database/test_transactioncommit_01.py @@ -1,22 +1,17 @@ #coding:utf-8 -# -# id: functional.trigger.database.transactioncommit_01 -# title: Trigger on commit transaction. See also CORE-645 -# decription: Test trigger on commit transaction -# tracker_id: -# min_versions: [] -# versions: 2.1 -# qmid: + +""" +ID: trigger.database.transaction-commit +TITLE: Trigger on commit transaction. See also CORE-645 +DESCRIPTION: + Test trigger on commit transaction +FBTEST: functional.trigger.database.transactioncommit_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None - -substitutions_1 = [('[ \t]+', ' ')] - -init_script_1 = """ +init_script = """ SET AUTODDL OFF; CREATE TABLE T1 ( @@ -39,14 +34,15 @@ init_script_1 = """ SET TERM ; ^ COMMIT; - """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +""" + +db = db_factory(init=init_script) + +test_script = """SET AUTODDL OFF; -test_script_1 = """SET AUTODDL OFF; - SET LIST ON; - + INSERT INTO T1 VALUES (1,'val1'); SELECT RDB$GET_CONTEXT('USER_SESSION', 'Trn_ID') AS CTX_VAR FROM RDB$DATABASE; COMMIT; @@ -59,18 +55,17 @@ test_script_1 = """SET AUTODDL OFF; SELECT RDB$GET_CONTEXT('USER_SESSION', 'Trn_ID') AS CTX_VAR FROM RDB$DATABASE; """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('[ \t]+', ' ')]) -expected_stdout_1 = """ +expected_stdout = """ CTX_VAR CTX_VAR 3 CTX_VAR 3 - CTX_VAR 4 + CTX_VAR 4 """ -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/database/test_transactionrollback_01.py b/tests/functional/trigger/database/test_transactionrollback_01.py index 4bf19ff8..fbb396bb 100644 --- a/tests/functional/trigger/database/test_transactionrollback_01.py +++ b/tests/functional/trigger/database/test_transactionrollback_01.py @@ -1,45 +1,26 @@ #coding:utf-8 -# -# id: functional.trigger.database.transactionrollback_01 -# title: Trigger on rollback transaction -# decription: -# Test trigger on rollback transaction -# Checked 17.05.2017 on: -# FB25Cs, build 2.5.8.27056: OK, 1.047ss. -# FB25SC, build 2.5.8.27061: OK, 0.516ss. -# fb25sS, build 2.5.7.27038: OK, 0.719ss. -# fb30Cs, build 3.0.3.32721: OK, 2.516ss. -# fb30SC, build 3.0.3.32721: OK, 1.297ss. -# FB30SS, build 3.0.3.32721: OK, 1.578ss. -# FB40CS, build 4.0.0.639: OK, 2.656ss. -# FB40SC, build 4.0.0.639: OK, 1.844ss. -# FB40SS, build 4.0.0.639: OK, 1.735ss. -# -# tracker_id: CORE-645 -# min_versions: ['2.5.0'] -# versions: 2.5 -# qmid: functional.trigger.database.transactionrollback_01 + +""" +ID: trigger.database.transaction-rollback +TITLE: Trigger on rollback transaction +DESCRIPTION: + Test trigger on rollback transaction +FBTEST: functional.trigger.database.transactionrollback_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +db = db_factory() -substitutions_1 = [] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set list on; create sequence g; commit; - create view v_check as - select rdb$get_context('USER_SESSION', 'TRG_TX') as ctx + create view v_check as + select rdb$get_context('USER_SESSION', 'TRG_TX') as ctx from rdb$database; commit; @@ -47,7 +28,7 @@ test_script_1 = """ create or alter trigger trg_tx_rbak inactive on transaction rollback position 0 as begin --- nop --- - end + end ^ set term ;^ commit; @@ -62,7 +43,7 @@ test_script_1 = """ create or alter trigger trg_tx_rbak inactive on transaction rollback position 0 as begin rdb$set_context('USER_SESSION', 'TRG_TX', gen_id(g,1)); - end + end ^ set term ;^ commit; @@ -73,24 +54,24 @@ test_script_1 = """ select gen_id(g,0) as curr_g, v.* from v_check v; -- 0, - rollback; -- this should increase value of sequence 'g' by 1 and assign new value to context var. 'TRG_TX' + rollback; -- this should increase value of sequence 'g' by 1 and assign new value to context var. 'TRG_TX' set transaction no wait; select gen_id(g,0) as curr_g, v.* from v_check v; -- 1, 1 (the same! becase Tx START should not be watched by trg_tx_rbak) - commit; + commit; select gen_id(g,0) as curr_g, v.* from v_check v; -- 1, 1 (the same! because COMMIT should not be watched by trg_tx_rbak) set term ^; create or alter trigger trg_tx_rbak active on transaction rollback position 0 as begin rdb$set_context('USER_SESSION', 'TRG_TX', gen_id(g,1234)/0); - end + end ^ set term ;^ commit; - -- this should increase value of sequence 'g' by 1234 but context var. 'TRG_TX' + -- this should increase value of sequence 'g' by 1234 but context var. 'TRG_TX' -- will store old value because of zero-divide exception rollback; @@ -99,9 +80,9 @@ test_script_1 = """ select gen_id(g,0) as curr_g, v.* from v_check v; -- 1235, 1 """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ +expected_stdout = """ CURR_G 0 CTX @@ -115,9 +96,8 @@ expected_stdout_1 = """ CTX 1 """ -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout - +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/trigger/database/test_transactionstart_01.py b/tests/functional/trigger/database/test_transactionstart_01.py index 5ce4c819..9eb72f36 100644 --- a/tests/functional/trigger/database/test_transactionstart_01.py +++ b/tests/functional/trigger/database/test_transactionstart_01.py @@ -1,46 +1,25 @@ #coding:utf-8 -# -# id: functional.trigger.database.transactionstart_01 -# title: Trigger on start tansaction -# decription: -# This tests normal operation of database TRANSACTION START trigger. -# -# Checked 17.05.2017 on: -# FB25Cs, build 2.5.8.27056: OK, 1.375ss. -# FB25SC, build 2.5.8.27061: OK, 0.407ss. -# fb25sS, build 2.5.7.27038: OK, 0.953ss. -# fb30Cs, build 3.0.3.32721: OK, 2.937ss. -# fb30SC, build 3.0.3.32721: OK, 1.906ss. -# FB30SS, build 3.0.3.32721: OK, 1.125ss. -# FB40CS, build 4.0.0.639: OK, 3.422ss. -# FB40SC, build 4.0.0.639: OK, 1.859ss. -# FB40SS, build 4.0.0.639: OK, 1.266ss. -# -# -# tracker_id: CORE-745 -# min_versions: ['2.5.0'] -# versions: 3.0 -# qmid: functional.trigger.database.transactionstart_01 + +""" +ID: trigger.database.transaction-start +TITLE: Trigger on start tansaction +DESCRIPTION: + This tests normal operation of database TRANSACTION START trigger. +FBTEST: functional.trigger.database.transactionstart_01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +db = db_factory() -substitutions_1 = [('line: \\d+, col: \\d+', '')] - -init_script_1 = """""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """ +test_script = """ set autoddl off; create table trg_log(id integer, trg_tx int default current_transaction); create view v_check as select count(*) as cnt_chk_tx, count(iif(trg_tx=current_transaction,1,null)) as cnt_chk_trg - from trg_log + from trg_log where trg_tx = current_transaction; set term ^; @@ -48,7 +27,7 @@ test_script_1 = """ begin insert into trg_log default values; --insert into trg_log(trg_tx) values (current_transaction); - end + end ^ set term ;^ commit; @@ -60,7 +39,7 @@ test_script_1 = """ set autoddl off; select 'Tx to be rolled back' as phase - --, current_transaction + --, current_transaction from rdb$database; --select a.* from trg_log a; select * from v_check; @@ -68,14 +47,14 @@ test_script_1 = """ select 'Tx to be committed' as phase - --, current_transaction + --, current_transaction from rdb$database; --select a.* from trg_log a; select * from v_check; commit; select 'Final select' as phase - --, current_transaction + --, current_transaction from rdb$database; --select a.* from trg_log a order by id desc rows 1; select * from v_check; @@ -84,7 +63,7 @@ test_script_1 = """ alter trigger trg_start_tx inactive position 0 as begin insert into trg_log(trg_tx) values (1/0); - end + end ^ set term ;^ commit; @@ -95,9 +74,9 @@ test_script_1 = """ commit; -- this should raise exception in trg_start_tx and this exception SHOULD PASS to the client. """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('line: \\d+, col: \\d+', '')]) -expected_stdout_1 = """ +expected_stdout = """ PHASE Tx to be rolled back CNT_CHK_TX 1 CNT_CHK_TRG 1 @@ -108,7 +87,8 @@ expected_stdout_1 = """ CNT_CHK_TX 1 CNT_CHK_TRG 1 """ -expected_stderr_1 = """ + +expected_stderr = """ Statement failed, SQLSTATE = 22012 arithmetic exception, numeric overflow, or string truncation -Integer divide by zero. The code attempted to divide an integer value by an integer divisor of zero. @@ -116,11 +96,9 @@ expected_stderr_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - - assert act_1.clean_stdout == act_1.clean_expected_stdout - +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.expected_stderr = expected_stderr + act.execute() + assert (act.clean_stdout == act.clean_expected_stdout and + act.clean_stderr == act.clean_expected_stderr) diff --git a/tests/functional/trigger/table/test_alter_12.py b/tests/functional/trigger/table/test_alter_12.py index 91ba7ecf..bfa10b58 100644 --- a/tests/functional/trigger/table/test_alter_12.py +++ b/tests/functional/trigger/table/test_alter_12.py @@ -1,28 +1,17 @@ #coding:utf-8 -# -# id: functional.trigger.table.alter_12 -# title: ALTER TRIGGER - AS -# decription: ALTER TRIGGER - AS -# Try use old prefix in INSERT trigger -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# CREATE TRIGGER -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.trigger.alter.alter_trigger_12 + +""" +ID: trigger.table.alter-01 +TITLE: ALTER TRIGGER - AS +DESCRIPTION: + Try use old prefix in INSERT trigger +FBTEST: functional.trigger.table.alter_12 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None - -substitutions_1 = [('At line.*', '')] - -init_script_1 = """ +init_script = """ create table test(id integer not null constraint unq unique, text varchar(32)); commit; set term ^; @@ -33,11 +22,12 @@ init_script_1 = """ ^ set term ;^ commit; - """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +""" -test_script_1 = """ +db = db_factory(init=init_script) + +test_script = """ -- Since WI-T3.0.0.31733 content of STDERR has been changed: source position of -- problematic statement is displayed now on seperate line, like this: -- "-At line 4, column 1" @@ -50,9 +40,9 @@ test_script_1 = """ set term ;^ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('At line.*', '')]) -expected_stderr_1 = """ +expected_stderr = """ Statement failed, SQLSTATE = 42S22 unsuccessful metadata update -ALTER TRIGGER TG failed @@ -64,8 +54,7 @@ expected_stderr_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/trigger/table/test_alter_13.py b/tests/functional/trigger/table/test_alter_13.py index 35869528..fd34dff3 100644 --- a/tests/functional/trigger/table/test_alter_13.py +++ b/tests/functional/trigger/table/test_alter_13.py @@ -1,28 +1,17 @@ #coding:utf-8 -# -# id: functional.trigger.table.alter_13 -# title: ALTER TRIGGER - AS -# decription: ALTER TRIGGER - AS -# Try use new prefix in DELETE trigger -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# CREATE TRIGGER -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.trigger.alter.alter_trigger_13 + +""" +ID: trigger.table.alter-02 +TITLE: ALTER TRIGGER - AS +DESCRIPTION: + Try use new prefix in DELETE trigger +FBTEST: functional.trigger.table.alter_13 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None - -substitutions_1 = [('At line.*', '')] - -init_script_1 = """ +init_script = """ create table test( id integer not null constraint unq unique, text varchar(32)); commit; set term ^; @@ -31,11 +20,12 @@ init_script_1 = """ end ^ set term ;^ commit; - """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +""" -test_script_1 = """ +db = db_factory(init=init_script) + +test_script = """ -- Since WI-T3.0.0.31733 content of STDERR has been changed: source position of -- problematic statement is displayed now on seperate line, like this: -- "-At line 4, column 1" @@ -48,9 +38,9 @@ test_script_1 = """ set term ;^ """ -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script, substitutions=[('At line.*', '')]) -expected_stderr_1 = """ +expected_stderr = """ Statement failed, SQLSTATE = 42S22 unsuccessful metadata update -ALTER TRIGGER TG failed @@ -62,8 +52,7 @@ expected_stderr_1 = """ """ @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/util/test_gbak_zip.py b/tests/functional/util/test_gbak_zip.py index c76771dc..bd477a26 100644 --- a/tests/functional/util/test_gbak_zip.py +++ b/tests/functional/util/test_gbak_zip.py @@ -1,647 +1,636 @@ #coding:utf-8 -# -# id: functional.util.gbak_zip -# title: gbak utility: check ability to use -ZIP command switch when create backup -# decription: -# We create some metadata, extracte them into .SQL script, make backup with '-ZIP' switch. -# Then we try to restore this DB, validate it and again extract metadata with saving to new .SQL. -# Comparing old and new metadata must show that they are equal. -# All STDERR logs must be empty. Logs of backup, restore and validation must also be empty. -# To make test more complex non-ascii metadata are used. -# -# Checked on: -# 4.0.0.1694 SS: 4.921s. -# 4.0.0.1691 CS: 7.796s. -# -# NOTE. Command key '-zip' does not convert .fbk to .zip format; rather it just produces .fbk -# which content is compressed using LZ-algorothm and sign (flag) that this was done. -# -# Beside of that, database is encrypted before backup using IBSurgeon Demo Encryption package -# ( https://ib-aid.com/download-demo-firebird-encryption-plugin/ ; https://ib-aid.com/download/crypt/CryptTest.zip ) -# License file plugins\\dbcrypt.conf with unlimited expiration was provided by IBSurgeon to Firebird Foundation (FF). -# This file was preliminary stored in FF Test machine. -# Test assumes that this file and all neccessary libraries already were stored into FB_HOME and %FB_HOME%\\plugins. -# -# Anyone who wants to run this test on his own machine must -# 1) download https://ib-aid.com/download/crypt/CryptTest.zip AND -# 2) PURCHASE LICENSE and get from IBSurgeon file plugins\\dbcrypt.conf with apropriate expiration date and other info. -# -# ################################################ ! ! ! N O T E ! ! ! ############################################## -# FF tests storage (aka "fbt-repo") does not (and will not) contain any license file for IBSurgeon Demo Encryption package! -# ######################################################################################################################### -# -# tracker_id: -# min_versions: ['4.0'] -# versions: 4.0 -# qmid: None + +""" +ID: util.gbak-zip +TITLE: gbak utility: check ability to use -ZIP command switch when create backup +DESCRIPTION: + We create some metadata, extracte them into .SQL script, make backup with '-ZIP' switch. + Then we try to restore this DB, validate it and again extract metadata with saving to new .SQL. + Comparing old and new metadata must show that they are equal. + All STDERR logs must be empty. Logs of backup, restore and validation must also be empty. + To make test more complex non-ascii metadata are used. + + Checked on: + 4.0.0.1694 SS: 4.921s. + 4.0.0.1691 CS: 7.796s. + + NOTE. Command key '-zip' does not convert .fbk to .zip format; rather it just produces .fbk + which content is compressed using LZ-algorothm and sign (flag) that this was done. + + Beside of that, database is encrypted before backup using IBSurgeon Demo Encryption package + ( https://ib-aid.com/download-demo-firebird-encryption-plugin/ ; https://ib-aid.com/download/crypt/CryptTest.zip ) + License file plugins/dbcrypt.conf with unlimited expiration was provided by IBSurgeon to Firebird Foundation (FF). + This file was preliminary stored in FF Test machine. + Test assumes that this file and all neccessary libraries already were stored into FB_HOME and %FB_HOME%/plugins. + + Anyone who wants to run this test on his own machine must + 1) download https://ib-aid.com/download/crypt/CryptTest.zip AND + 2) PURCHASE LICENSE and get from IBSurgeon file plugins/dbcrypt.conf with apropriate expiration date and other info. + + ################################################ ! ! ! N O T E ! ! ! ############################################## + FF tests storage (aka "fbt-repo") does not (and will not) contain any license file for IBSurgeon Demo Encryption package! + ######################################################################################################################### +FBTEST: functional.util.gbak_zip +""" import pytest -from firebird.qa import db_factory, python_act, Action +from firebird.qa import * -# version: 4.0 -# resources: None - -substitutions_1 = [('[ \t]+', ' ')] - -init_script_1 = """""" - -db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1) - -# test_script_1 -#--- -# import os -# import time -# import difflib -# import subprocess -# from fdb import services -# -# os.environ["ISC_USER"] = user_name -# os.environ["ISC_PASSWORD"] = user_password -# -# this_db = db_conn.database_name -# tmpfbk='$(DATABASE_LOCATION)'+'tmp_zipped_backup.fbk' -# tmpfdb='$(DATABASE_LOCATION)'+'tmp_zipped_restore.fdb' -# -# # 27.02.2021. -# # Name of encryption plugin depends on OS: -# # * for Windows we (currently) use plugin by IBSurgeon, its name is 'dbcrypt'; -# # later it can be replaced with built-in plugin 'fbSampleDbCrypt' -# # but currently it is included only in FB 4.x builds (not in FB 3.x). -# # Discussed tih Dimitr, Alex, Vlad, letters since: 08-feb-2021 00:22 -# # "Windows-distributive FB 3.x: it is desired to see sub-folder 'examples\\prebuild' with files for encryption, like it is in FB 4.x -# # *** DEFERRED *** -# # * for Linux we use: -# # ** 'DbCrypt_example' for FB 3.x -# # ** 'fbSampleDbCrypt' for FB 4.x+ -# # -# PLUGIN_NAME = 'dbcrypt' if os.name == 'nt' else ( '"DbCrypt_example"' if db_conn.engine_version < 4 else '"fbSampleDbCrypt"' ) -# -# ################################################ -# ### e n c r y p t d a t a b a s e ### -# ################################################ -# runProgram('isql', [ dsn ], 'alter database encrypt with %(PLUGIN_NAME)s key Red;' % locals()) -# time.sleep(1) -# db_conn.close() -# -# #-------------------------------------------- -# -# def flush_and_close( file_handle ): -# # https://docs.python.org/2/library/os.html#os.fsync -# # If you're starting with a Python file object f, -# # first do f.flush(), and -# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. -# global os -# -# file_handle.flush() -# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: -# # otherwise: "OSError: [Errno 9] Bad file descriptor"! -# os.fsync(file_handle.fileno()) -# file_handle.close() -# -# #-------------------------------------------- -# -# def cleanup( f_names_list ): -# global os -# for f in f_names_list: -# if type(f) == file: -# del_name = f.name -# elif type(f) == str: -# del_name = f -# else: -# print('Unrecognized type of element:', f, ' - can not be treated as file.') -# del_name = None -# -# if del_name and os.path.isfile( del_name ): -# os.remove( del_name ) -# -# #------------------------------------------- -# sql_ddl=''' -# set bail on; -# -# -- ###################################################### -# -- ################ C O R E 0 9 8 6 ############# -# -- ###################################################### -# -# create collation "Циферки" for utf8 from unicode case insensitive 'NUMERIC-SORT=1'; -# create collation "Испания" for iso8859_1 from es_es_ci_ai 'SPECIALS-FIRST=1';; -# commit; -# -# create domain "ИД'шники" int; -# create domain "Группы" varchar(30) check( value in ('Электрика', 'Ходовая', 'Арматурка', 'Кузовщина') ); -# create domain "Артикулы" varchar(12) character set utf8 check( value = upper(value) ) -# collate "Циферки" -- enabled since core-5220 was fixed (30.04.2016) -# ; -# create domain "Комрады" varchar(40) character set iso8859_1 -# collate "Испания" -- enabled since core-5220 was fixed (30.04.2016) -# ; -# create domain "Кол-во" numeric(12,3) not null; -# -# create sequence generilka; -# create sequence "Генерилка"; -# -# --create role "манагер"; -# --create role "начсклд"; -# -# -- TEMPLY COMMENTED UNTIL CORE-5209 IS OPEN: -# -- ISQL -X ignores connection charset for text of EXCEPTION message (restoring it in initial charset when exception was created) -# --recreate exception "Невзлет" 'Запись обломалась, ваши не пляшут. Но не стесняйтесь и обязательно заходите еще, мы всегда рады видеть вас. До скорой встречи, товарищ!'; -# commit; -# -# ------------------------------------------------- -# recreate table "склад" ( -# "ИД'шник" "ИД'шники" -# ,"Откудова" "Группы" -# ,"Номенклатура" "Артикулы" -# ,"ИД'родителя" "ИД'шники" -# ,"сколько там" "Кол-во" -# ,constraint "ПК-ИД'шник" primary key ("ИД'шник") using index "склад_ПК" -# ,constraint "ФК-на-родока" foreign key("ИД'родителя") references "склад" ("ИД'шник") using index "склад_ФК" -# ,constraint "остаток >=0" check ("сколько там" >= 0) -# ); -# -# recreate view "Электрика"("ид изделия", "Название", "Запас") as -# select -# "ИД'шник" -# ,"Номенклатура" -# ,"сколько там" -# from "склад" -# where "Откудова" = 'Электрика' -# ; -# -# set term ^; -# create or alter trigger "склад би" for "склад" active before insert as -# begin -# --new."ИД'шник" = coalesce( new."ИД'шник", gen_id(generilka, 1) ); -# -- not avail up to 2.5.6: -# new."ИД'шник" = coalesce( new."ИД'шник", gen_id("Генерилка", 1) ); -# end -# ^ -# -# create or alter procedure "Доб на склад"( -# "Откудова" varchar(30) -# ,"Номенклатура" varchar(30) -# ,"ИД'родителя" int -# ,"сколько там" numeric(12,3) -# ) returns ( -# "код возврата" int -# ) as -# begin -# insert into "склад"( -# "Откудова" -# ,"Номенклатура" -# ,"ИД'родителя" -# ,"сколько там" -# ) values ( -# :"Откудова" -# ,:"Номенклатура" -# ,:"ИД'родителя" -# ,:"сколько там" -# ); -# -# end -# ^ -# create or alter procedure "Удалить" as -# begin -# /* -# Антон Павлович Чехов. Каштанка -# -# 1. Дурное поведение -# -# Молодая рыжая собака - помесь такса с дворняжкой - очень похожая мордой -# на лисицу, бегала взад и вперед по тротуару и беспокойно оглядывалась по -# сторонам. Изредка она останавливалась и, плача, приподнимая то одну озябшую -# лапу, то другую, старалась дать себе отчет: как это могло случиться, что она -# заблудилась? -# */ -# end -# ^ -# set term ;^ -# -# --grant select on "склад" to "манагер"; -# --grant select, insert, update, delete on "склад" to "начсклд"; -# -# comment on sequence "Генерилка" is 'Генератор простых идей'; -# comment on table "склад" is 'Это всё, что мы сейчас имеем в наличии'; -# comment on view "Электрика" is 'Не суй пальцы в розетку, будет бо-бо!'; -# comment on procedure "Доб на склад" is 'Процедурка добавления изделия на склад'; -# comment on parameter "Доб на склад"."Откудова" is 'Группа изделия, которое собираемся добавить'; -# -# comment on parameter "Доб на склад"."ИД'родителя" is ' -# Федор Михайлович Достоевский -# -# Преступление и наказание -# -# Роман в шести частях с эпилогом -# -# -# Часть первая -# -# I -# В начале июля, в чрезвычайно жаркое время, под вечер, один молодой человек вышел из своей каморки, которую нанимал от жильцов в С -- м переулке, на улицу и медленно, как бы в нерешимости, отправился к К -- ну мосту. -# Он благополучно избегнул встречи с своею хозяйкой на лестнице. Каморка его приходилась под самою кровлей высокого пятиэтажного дома и походила более на шкаф, чем на квартиру. Квартирная же хозяйка его, у которой он нанимал эту каморку с обедом и прислугой, помещалась одною лестницей ниже, в отдельной квартире, и каждый раз, при выходе на улицу, ему непременно надо было проходить мимо хозяйкиной кухни, почти всегда настежь отворенной на лестницу. И каждый раз молодой человек, проходя мимо, чувствовал какое-то болезненное и трусливое ощущение, которого стыдился и от которого морщился. Он был должен кругом хозяйке и боялся с нею встретиться. -# Не то чтоб он был так труслив и забит, совсем даже напротив; но с некоторого времени он был в раздражительном и напряженном состоянии, похожем на ипохондрию. Он до того углубился в себя и уединился от всех, что боялся даже всякой встречи, не только встречи с хозяйкой. Он был задавлен бедностью; но даже стесненное положение перестало в последнее время тяготить его. Насущными делами своими он совсем перестал и не хотел заниматься. Никакой хозяйки, в сущности, он не боялся, что бы та ни замышляла против него. Но останавливаться на лестнице, слушать всякий вздор про всю эту обыденную дребедень, до которой ему нет никакого дела, все эти приставания о платеже, угрозы, жалобы, и при этом самому изворачиваться, извиняться, лгать, -- нет уж, лучше проскользнуть как-нибудь кошкой по лестнице и улизнуть, чтобы никто не видал. -# Впрочем, на этот раз страх встречи с своею кредиторшей даже его самого поразил по выходе на улицу. -# "На какое дело хочу покуситься и в то же время каких пустяков боюсь! -- подумал он с странною улыбкой. -- Гм... да... всё в руках человека, и всё-то он мимо носу проносит, единственно от одной трусости... это уж аксиома... Любопытно, чего люди больше всего боятся? Нового шага, нового собственного слова они всего больше боятся... А впрочем, я слишком много болтаю. Оттого и ничего не делаю, что болтаю. Пожалуй, впрочем, и так: оттого болтаю, что ничего не делаю. Это я в этот последний месяц выучился болтать, лежа по целым суткам в углу и думая... о царе Горохе. Ну зачем я теперь иду? Разве я способен на это? Разве это серьезно? Совсем не серьезно. Так, ради фантазии сам себя тешу; игрушки! Да, пожалуй что и игрушки!" -# На улице жара стояла страшная, к тому же духота, толкотня, всюду известка, леса, кирпич, пыль и та особенная летняя вонь, столь известная каждому петербуржцу, не имеющему возможности нанять дачу, -- всё это разом неприятно потрясло и без того уже расстроенные нервы юноши. Нестерпимая же вонь из распивочных, которых в этой части города особенное множество, и пьяные, поминутно попадавшиеся, несмотря на буднее время, довершили отвратительный и грустный колорит картины. Чувство глубочайшего омерзения мелькнуло на миг в тонких чертах молодого человека. Кстати, он был замечательно хорош собою, с прекрасными темными глазами, темно-рус, ростом выше среднего, тонок и строен. Но скоро он впал как бы в глубокую задумчивость, даже, вернее сказать, как бы в какое-то забытье, и пошел, уже не замечая окружающего, да и не желая его замечать. Изредка только бормотал он что-то про себя, от своей привычки к монологам, в которой он сейчас сам себе признался. В эту же минуту он и сам сознавал, что мысли его порою мешаются и что он очень слаб: второй день как уж он почти совсем ничего не ел. -# Он был до того худо одет, что иной, даже и привычный человек, посовестился бы днем выходить в таких лохмотьях на улицу. Впрочем, квартал был таков, что костюмом здесь было трудно кого-нибудь удивить. Близость Сенной, обилие известных заведений и, по преимуществу, цеховое и ремесленное население, скученное в этих серединных петербургских улицах и переулках, пестрили иногда общую панораму такими субъектами, что странно было бы и удивляться при встрече с иною фигурой. Но столько злобного презрения уже накопилось в душе молодого человека, что, несмотря на всю свою, иногда очень молодую, щекотливость, он менее всего совестился своих лохмотьев на улице. Другое дело при встрече с иными знакомыми или с прежними товарищами, с которыми вообще он не любил встречаться... А между тем, когда один пьяный, которого неизвестно почему и куда провозили в это время по улице в огромной телеге, запряженной огромною ломовою лошадью, крикнул ему вдруг, проезжая: "Эй ты, немецкий шляпник!" -- и заорал во всё горло, указывая на него рукой, -- молодой человек вдруг остановился и судорожно схватился за свою шляпу. Шляпа эта была высокая, круглая, циммермановская, но вся уже изношенная, совсем рыжая, вся в дырах и пятнах, без полей и самым безобразнейшим углом заломившаяся на сторону. Но не стыд, а совсем другое чувство, похожее даже на испуг, охватило его. -# "Я так и знал! -- бормотал он в смущении, -- я так и думал! Это уж всего сквернее! Вот эдакая какая-нибудь глупость, какая-нибудь пошлейшая мелочь, весь замысел может испортить! Да, слишком приметная шляпа... Смешная, потому и приметная... К моим лохмотьям непременно нужна фуражка, хотя бы старый блин какой-нибудь, а не этот урод. Никто таких не носит, за версту заметят, запомнят... главное, потом запомнят, ан и улика. Тут нужно быть как можно неприметнее... Мелочи, мелочи главное!.. Вот эти-то мелочи и губят всегда и всё..." -# '; -# -# commit; -# -# -# -- ###################################################### -# -- ################ C O R E 2 2 3 8 ############# -# -- ###################################################### -# create domain dm_long_utf8 as varchar(8191) character set utf8; -# create table test (long_text dm_long_utf8, long_descr blob sub_type text character set utf8 ); -# commit; -# set count on; -# -- Length of this literal is exact 8191 characters: -# insert into test(long_text) -# values( -# 'Kaikki neuvon-antajat ja etevimmät päälliköt ja mollat ja imamit ja kadit ja kaupungin päähenkilöt olivat suuresti hämmästyksissään. Hänen tunnettu hurskautensa vaati kaikkia äänettömyyteen, sillä välin kuin hän itse lausui pitkän rukouksen, pyytäen Allah''ta ja Profeettaa hämmentämään kaikkia häväiseviä Juutalaisia ja uskottomia ja vuodattamaan totuuden sanoja jumalisten ihmisten suuhun. Ja nyt kunnian-arvoisa sheiki kutsui esiin kaikki todistajat David Alroy''ta vastaan. Heti Kisloch, Kurdilainen, joka oli koroitettu Bagdadin kadiksi, astui esiin, veti sametti-kukkarostansa paperikääryn ja luki semmoisen todistuksen, jossa arvoisa Kisloch vakuutti, että hän ensin tutustui vangin, David Alroy''n kanssa joissakin erämaan raunioissa -- muutamain rosvojen pesässä, joita Alroy johdatti; että hän, Kisloch, oli rehellinen kauppias ja että nämät konnat olivat ryöstäneet hänen karavaninsa ja hän itse joutunut vankeuteen; että hänen vankeutensa toisena yönä Alroy oli ilmestynyt hänen eteensä leijonan muodossa ja kolmantena tuimasilmäisenä härkänä; että hänen oli tapa alinomaa muuttaa itsensä; että hän usein nosti henkiä; että viimein eräänä kauheana yönä Eblis itse tuli suurella juhlasaatolla ja antoi Alroy''lle Salomonin, Davidin pojan valtikan; ja että tämä seuraavana päivänä kohotti lippunsa ja kohta sen jälkeen kukisti Hassan Subah''n ja tämän Seldshukit useitten hirmuisten paholaisten silminnähtävällä avulla. Kalidaan, Indialaisen, Guebriläisen ja Neekerin ja muutamien muitten saman hengen lapsien todistukset vetivät täysin määrin vertoja Kisloch Kurdilaisen uhkealle kertomukselle. Hebrealaisen valloittajan vastustamaton menestys oli kieltämättömällä tavalla selitetty, Mahomettilaisten aseitten kunnia ja Moslemin uskon puhtaus olivat asetetut jälleen entiseen loistoonsa ja saastuttamattomaan maineesensa. Todeksi saatiin, että David Alroy oli Ebliin lapsi, noitamies, taikakalujen ja myrkkyjen käyttäjä. Kansa kuunteli kauhulla ja harmilla. He olisivat tunkeneet vartiaväen läpitse ja repineet hänet kappaleiksi, jolleivät olisi pelänneet Karamanialaisten sotatapparoita. Niin he lohduttivat mieltänsä sillä, että he ennen pitkää saisivat nähdä hänen kidutuksensa. Bagdadin kadi kumarsi Karamanian kuningasta ja kuiskasi soveliaan matkan päästä jotakin kuninkaalliseen korvaan. Torvet kaikkuivat, kuuluttajat vaativat äänettömyyttä ja kuninkaan huulet liikkuivat taas. "Kuule, oi kansa, ja ole viisas. Pääkadi aikoo nyt lukea kuninkaallisen prinsessan Schirenen, noiturin etevimmän uhrin todistuksen." Ja todistus luettiin, joka vakuutti, että David Alroy omisti ja kantoi lähinnä sydäntänsä erästä talismania, jonka Eblis oli antanut hänelle ja jonka voima oli niin suuri, että, jos sitä kerta painettiin naisen rintaa vastaan, tämä ei enää voinut hallita tahtoansa. Tämmöinen kova onni oli kohdannut oikeauskoisten hallitsian tytärtä. "Onko siinä niin kirjoitettu?" vanki kysyi. "On", kadi vastasi, "ja sen alla on vielä prinsessan kuninkaallinen allekirjoitus." "Se on väärennetty." Karamanian kuningas kavahti valta-istuimeltansa ja oli vihoissansa astumallaan sen portaita alas. Hänen kasvonsa olivat veripunaiset, hänen partansa kuin tulen liekki. Joku lempiministeri rohkeni vienosti pidättää häntä hänen kuninkaallisesta vaipastansa. "Tapa paikalla pois se koira", Karamanian kuningas mutisi. "Prinsessa on itse täällä", lausui kadi, "todistamassa niitä noitakeinoja, joitten alaisena hän oli, vaan joitten vaikutuksesta hän nyt Allah''n ja Profeetan voiman kautta on pääsnyt." Alroy''ta vävähti! "Astu esiin, kuninkaallinen prinsessa", kadi sanoi, "ja jos se todistus, jonka kuulit, on perustettu, nosta ylös se kuninkaallinen käsi, joka koristi sen allekirjoituksellaan." Lähellä valta-istuinta oleva eunukkien joukko teki tilaa; naishaamu, joka oli verhottu hunnulla jalkoihin saakka, astui esiin. Hän nosti ylös kätensä; koko kerääntynyt kansa tuskin hengitti mielenliikutuksesta; eunukkien rivit ummistuivat jälleen; huuto kuului ja hunnustettu haamu katosi. "Minä odotan kidutuskoneitasi, kuningas", Alroy lausui raskaan surun äänellä. Hänen lujuutensa näytti luopuneen hänestä. Hänen silmänsä olivat luodut maahan. Hän oli nähtävästi vaipunut syvään miettimiseen taikka heittäynyt epätoivoon. "Valmistakaat seipäät", käski Alp Arslan. Koko kansan joukkoa värisytti vasten mieltäkin. Yksi orja lähestyi ja tarjosi paperikääryä Alroy''lle. Hän tunsi Nubialaisen, joka oli Honainin palveluksessa. Hänen entinen ministerinsä ilmoitti hänelle, että hän oli saapuvilla; että ne ehdot, joita hän vankihuoneessa tarjosi, vielä myönnettäisiin; että jos Alroy, jota asiaa hän ei epäillyt ja jota hän rukoili, suostuisi niitä vastaan-ottamaan, hänen tuli pistää paperikäärö poveensa, mutta, jos hän yhä oli taipumaton, jos hänen yhä oli mieletön päätös kuolla hirveä ja häväisevä kuolema, hänen tuli repiä se rikki ja heittää se tanterelle. Silmänräpäyksellä Alroy otti paperikääryn ja repi sen kiivaasti tuhansiin palasiin. Tuulen puuska levitti kappaleet laajalle yliympäri. Alhaiso riiteli näistä David Alroy''n viimeisistä muistoista; ja tämä vähäinen tapaus tuotti paljon hämminkiä. Tällä välin Neekerit varustivat kidutuksen ja kuoleman koneita. "Tuon juutalaisen koiran itsepintaisuus tekee minun hulluksi", lausui Karamanian kuningas hovimiehillensä. "Minua haluttaa puhutella häntä vähän, ennenkuin hän kuolee." Lempiministeri pyysi hallitsiaansa olemaan levollisena; mutta kuninkaallinen parta kävi niin punaiseksi, ja kuninkaalliset silmät iskivät niin kauheata tulta, että lempiministerikin lopulta myöntyi. Torvi kaikkui, kuuluttajat vaativat vaiti-oloa, ja Alp Arslanin ääni eroitettiin jälleen. "Senkin koira, näetkö sinä, mikä on tarjonasi? Tiedätkö sinä, mikä vartoo sinua sinun herrasi Ebliin asunnoissa? Voiko väärä ylpeys viehättää Juutalaistakin? Eikö elämä ole suloista? Eikö olisi parempi olla minun varvaskenkieni kantaja kuin tulla seivästetyksi?" "Jalomielinen Alp Arslan", vastasi Alroy ilmeisen ylenkatseen äänellä; "luuletko, että mikään kidutus rasittaa niin, kuin se muisto, että sinä olet voittanut minun?" "Partani kautta, hän ivaa minua!" Karamanialaisten hallitsia huudahti; "hän tekee kiusaa minulle! Älkäät koskeko vaippaani. Minä tahdon puhua hänen kanssaan. Te ette näe kauemmaksi kuin hunnustettu haukka, te sokean äidin lapset. Se on noita; hänellä on vielä jälellä joku päätaika; hän pelastaa vielä henkensä. Hän lentää ilmaan taikka vaipuu maan sisään. Hän nauraa meidän kidutuksiamme." Karamanian kuningas astui tuota pikaa valta-istuimensa portaita alaspäin; häntä seurasivat hänen lempiministerinsä ja hänen neuvon-antajansa ja hänen etevimmät päällikkönsä ja kadit ja mollat ja imamit ja kaupungin päähenkilöt. "Sinä noita!" Alp Arslan huudahti, "hävytön noita! halvan äidin halpa poika! koirien koira! niskotteletko sinä meitä vastaan? Kuiskaako herrasi Eblis toivoa sinun korviisi? Nauratko meidän rangaistuksiamme? Aiotko lentää ylös ilmaan? vai painua alas maahan? Niinkö, niinkö?" Hengästyneenä ja vihastansa uupuneena hallitsia vaikeni. Hän repi partaansa ja polki maata rajussa vimmassaan. "Sinä olet viisaampi kuin neuvon-antajasi, kuningas Arslan; minä en nöyrry sinun edessäsi. Minun Herrani, vaikka hän ei ole Eblis, ei ole hylännyt minua. Minä nauran sinun rangaistuksiasi. Sinun kidutuksiasi minä ylenkatson. Minä sekä vaivun maan sisään että kohoan ilmaan. Tyydytkö nyt vastaukseeni?" "Partani kautta", huudahti tulistunut Arslan, "minä tyydyn vastaukseesi. Pelastakoon Eblis sinut, jos hän voi;" ja Karamanian kuningas, Aasian mainioin miekan piteliä veti säilänsä, ikäänkuin salaman, tupesta ja silpaisi yhdellä säväyksellä Alroy''lta pään. Se kaatui, vaan, kun se kaatui, riemuitsevan pilkan hymy näytti vivahtelevan sankarin kylmenevillä kasvoilla ja kysyvän hänen vihollisiltansa: "missä kaikki teidän kidutuksenne nyt ovat?" Do Dzieci Gołąbki i Dziewczynka Dziecię i Koza Wróbel i Jaskółka Osieł i Chłopczyk Nieposłuszny Zajączek Kotek Brytan i Pudelek Egzamin Małego "Misia" Wilk i Owce Lis i Gąski Chłopczyk i Źrebię Gęsia Kapela Lew i Piesek Niedźwiedź i Pszczółka Śniadanie Artysta Z Zimowych Rozrywek Leniwy Chłopczyk Przygoda z Indykiem O hämmästyksissään. Leniwy ЙЦУКЕН' -# ); -# insert into test(long_text) -# values( -# 'Kaikki neuvon-antajat ja etevimmät päälliköt ja mollat ja imamit ja kadit ja kaupungin päähenkilöt olivat suuresti hämmästyksissään. Hänen tunnettu hurskautensa vaati kaikkia äänettömyyteen, sillä välin kuin hän itse lausui pitkän rukouksen, pyytäen Allah''ta ja Profeettaa hämmentämään kaikkia häväiseviä Juutalaisia ja uskottomia ja vuodattamaan totuuden sanoja jumalisten ihmisten suuhun. Ja nyt kunnian-arvoisa sheiki kutsui esiin kaikki todistajat David Alroy''ta vastaan. Heti Kisloch, Kurdilainen, joka oli koroitettu Bagdadin kadiksi, astui esiin, veti sametti-kukkarostansa paperikääryn ja luki semmoisen todistuksen, jossa arvoisa Kisloch vakuutti, että hän ensin tutustui vangin, David Alroy''n kanssa joissakin erämaan raunioissa -- muutamain rosvojen pesässä, joita Alroy johdatti; että hän, Kisloch, oli rehellinen kauppias ja että nämät konnat olivat ryöstäneet hänen karavaninsa ja hän itse joutunut vankeuteen; että hänen vankeutensa toisena yönä Alroy oli ilmestynyt hänen eteensä leijonan muodossa ja kolmantena tuimasilmäisenä härkänä; että hänen oli tapa alinomaa muuttaa itsensä; että hän usein nosti henkiä; että viimein eräänä kauheana yönä Eblis itse tuli suurella juhlasaatolla ja antoi Alroy''lle Salomonin, Davidin pojan valtikan; ja että tämä seuraavana päivänä kohotti lippunsa ja kohta sen jälkeen kukisti Hassan Subah''n ja tämän Seldshukit useitten hirmuisten paholaisten silminnähtävällä avulla. Kalidaan, Indialaisen, Guebriläisen ja Neekerin ja muutamien muitten saman hengen lapsien todistukset vetivät täysin määrin vertoja Kisloch Kurdilaisen uhkealle kertomukselle. Hebrealaisen valloittajan vastustamaton menestys oli kieltämättömällä tavalla selitetty, Mahomettilaisten aseitten kunnia ja Moslemin uskon puhtaus olivat asetetut jälleen entiseen loistoonsa ja saastuttamattomaan maineesensa. Todeksi saatiin, että David Alroy oli Ebliin lapsi, noitamies, taikakalujen ja myrkkyjen käyttäjä. Kansa kuunteli kauhulla ja harmilla. He olisivat tunkeneet vartiaväen läpitse ja repineet hänet kappaleiksi, jolleivät olisi pelänneet Karamanialaisten sotatapparoita. Niin he lohduttivat mieltänsä sillä, että he ennen pitkää saisivat nähdä hänen kidutuksensa. Bagdadin kadi kumarsi Karamanian kuningasta ja kuiskasi soveliaan matkan päästä jotakin kuninkaalliseen korvaan. Torvet kaikkuivat, kuuluttajat vaativat äänettömyyttä ja kuninkaan huulet liikkuivat taas. "Kuule, oi kansa, ja ole viisas. Pääkadi aikoo nyt lukea kuninkaallisen prinsessan Schirenen, noiturin etevimmän uhrin todistuksen." Ja todistus luettiin, joka vakuutti, että David Alroy omisti ja kantoi lähinnä sydäntänsä erästä talismania, jonka Eblis oli antanut hänelle ja jonka voima oli niin suuri, että, jos sitä kerta painettiin naisen rintaa vastaan, tämä ei enää voinut hallita tahtoansa. Tämmöinen kova onni oli kohdannut oikeauskoisten hallitsian tytärtä. "Onko siinä niin kirjoitettu?" vanki kysyi. "On", kadi vastasi, "ja sen alla on vielä prinsessan kuninkaallinen allekirjoitus." "Se on väärennetty." Karamanian kuningas kavahti valta-istuimeltansa ja oli vihoissansa astumallaan sen portaita alas. Hänen kasvonsa olivat veripunaiset, hänen partansa kuin tulen liekki. Joku lempiministeri rohkeni vienosti pidättää häntä hänen kuninkaallisesta vaipastansa. "Tapa paikalla pois se koira", Karamanian kuningas mutisi. "Prinsessa on itse täällä", lausui kadi, "todistamassa niitä noitakeinoja, joitten alaisena hän oli, vaan joitten vaikutuksesta hän nyt Allah''n ja Profeetan voiman kautta on pääsnyt." Alroy''ta vävähti! "Astu esiin, kuninkaallinen prinsessa", kadi sanoi, "ja jos se todistus, jonka kuulit, on perustettu, nosta ylös se kuninkaallinen käsi, joka koristi sen allekirjoituksellaan." Lähellä valta-istuinta oleva eunukkien joukko teki tilaa; naishaamu, joka oli verhottu hunnulla jalkoihin saakka, astui esiin. Hän nosti ylös kätensä; koko kerääntynyt kansa tuskin hengitti mielenliikutuksesta; eunukkien rivit ummistuivat jälleen; huuto kuului ja hunnustettu haamu katosi. "Minä odotan kidutuskoneitasi, kuningas", Alroy lausui raskaan surun äänellä. Hänen lujuutensa näytti luopuneen hänestä. Hänen silmänsä olivat luodut maahan. Hän oli nähtävästi vaipunut syvään miettimiseen taikka heittäynyt epätoivoon. "Valmistakaat seipäät", käski Alp Arslan. Koko kansan joukkoa värisytti vasten mieltäkin. Yksi orja lähestyi ja tarjosi paperikääryä Alroy''lle. Hän tunsi Nubialaisen, joka oli Honainin palveluksessa. Hänen entinen ministerinsä ilmoitti hänelle, että hän oli saapuvilla; että ne ehdot, joita hän vankihuoneessa tarjosi, vielä myönnettäisiin; että jos Alroy, jota asiaa hän ei epäillyt ja jota hän rukoili, suostuisi niitä vastaan-ottamaan, hänen tuli pistää paperikäärö poveensa, mutta, jos hän yhä oli taipumaton, jos hänen yhä oli mieletön päätös kuolla hirveä ja häväisevä kuolema, hänen tuli repiä se rikki ja heittää se tanterelle. Silmänräpäyksellä Alroy otti paperikääryn ja repi sen kiivaasti tuhansiin palasiin. Tuulen puuska levitti kappaleet laajalle yliympäri. Alhaiso riiteli näistä David Alroy''n viimeisistä muistoista; ja tämä vähäinen tapaus tuotti paljon hämminkiä. Tällä välin Neekerit varustivat kidutuksen ja kuoleman koneita. "Tuon juutalaisen koiran itsepintaisuus tekee minun hulluksi", lausui Karamanian kuningas hovimiehillensä. "Minua haluttaa puhutella häntä vähän, ennenkuin hän kuolee." Lempiministeri pyysi hallitsiaansa olemaan levollisena; mutta kuninkaallinen parta kävi niin punaiseksi, ja kuninkaalliset silmät iskivät niin kauheata tulta, että lempiministerikin lopulta myöntyi. Torvi kaikkui, kuuluttajat vaativat vaiti-oloa, ja Alp Arslanin ääni eroitettiin jälleen. "Senkin koira, näetkö sinä, mikä on tarjonasi? Tiedätkö sinä, mikä vartoo sinua sinun herrasi Ebliin asunnoissa? Voiko väärä ylpeys viehättää Juutalaistakin? Eikö elämä ole suloista? Eikö olisi parempi olla minun varvaskenkieni kantaja kuin tulla seivästetyksi?" "Jalomielinen Alp Arslan", vastasi Alroy ilmeisen ylenkatseen äänellä; "luuletko, että mikään kidutus rasittaa niin, kuin se muisto, että sinä olet voittanut minun?" "Partani kautta, hän ivaa minua!" Karamanialaisten hallitsia huudahti; "hän tekee kiusaa minulle! Älkäät koskeko vaippaani. Minä tahdon puhua hänen kanssaan. Te ette näe kauemmaksi kuin hunnustettu haukka, te sokean äidin lapset. Se on noita; hänellä on vielä jälellä joku päätaika; hän pelastaa vielä henkensä. Hän lentää ilmaan taikka vaipuu maan sisään. Hän nauraa meidän kidutuksiamme." Karamanian kuningas astui tuota pikaa valta-istuimensa portaita alaspäin; häntä seurasivat hänen lempiministerinsä ja hänen neuvon-antajansa ja hänen etevimmät päällikkönsä ja kadit ja mollat ja imamit ja kaupungin päähenkilöt. "Sinä noita!" Alp Arslan huudahti, "hävytön noita! halvan äidin halpa poika! koirien koira! niskotteletko sinä meitä vastaan? Kuiskaako herrasi Eblis toivoa sinun korviisi? Nauratko meidän rangaistuksiamme? Aiotko lentää ylös ilmaan? vai painua alas maahan? Niinkö, niinkö?" Hengästyneenä ja vihastansa uupuneena hallitsia vaikeni. Hän repi partaansa ja polki maata rajussa vimmassaan. "Sinä olet viisaampi kuin neuvon-antajasi, kuningas Arslan; minä en nöyrry sinun edessäsi. Minun Herrani, vaikka hän ei ole Eblis, ei ole hylännyt minua. Minä nauran sinun rangaistuksiasi. Sinun kidutuksiasi minä ylenkatson. Minä sekä vaivun maan sisään että kohoan ilmaan. Tyydytkö nyt vastaukseeni?" "Partani kautta", huudahti tulistunut Arslan, "minä tyydyn vastaukseesi. Pelastakoon Eblis sinut, jos hän voi;" ja Karamanian kuningas, Aasian mainioin miekan piteliä veti säilänsä, ikäänkuin salaman, tupesta ja silpaisi yhdellä säväyksellä Alroy''lta pään. Se kaatui, vaan, kun se kaatui, riemuitsevan pilkan hymy näytti vivahtelevan sankarin kylmenevillä kasvoilla ja kysyvän hänen vihollisiltansa: "missä kaikki teidän kidutuksenne nyt ovat?" Do Dzieci Gołąbki i Dziewczynka Dziecię i Koza Wróbel i Jaskółka Osieł i Chłopczyk Nieposłuszny Zajączek Kotek Brytan i Pudelek Egzamin Małego "Misia" Wilk i Owce Lis i Gąski Chłopczyk i Źrebię Gęsia Kapela Lew i Piesek Niedźwiedź i Pszczółka Śniadanie Artysta Z Zimowych Rozrywek Leniwy Chłopczyk Przygoda z Indykiem O hämmästyksissään. Leniwy НЕКУЦЙ' -# ); -# -# update test set long_descr = long_text; -# commit; -# -# -# -# -- ################################################### -# -- ############## C O R E 3 4 4 6 ############## -# -- ################################################### -# recreate table test( s varchar(8187) character set utf8 collate unicode_ci_ai, b blob sub_type 1 character set utf8 collate unicode_ci_ai); -# commit; -# -# insert into test (s, b ) -# values( -# 'Sur le boulevard Montmorency, au n° 53, s''élève une maison portant, -# encastré dans son balcon, un profil lauré de Louis XV, en bronze -# doré, qui a tout l''air d''être le médaillon, dont était décorée la -# tribune de musique de la salle à manger de Luciennes, représenté dans -# l''aquarelle de Moreau que l''on voit au Louvre. Cette tête, que quelques -# promeneurs regardent d''un œil farouche, n''est point,--ai-je besoin de -# le dire?--une affiche des opinions politiques du propriétaire, elle est -# tout bonnement l''enseigne d''un des nids les plus pleins de choses du -# XVIIIe siècle qui existent à Paris. -# -# La porte noire, que surmonte un élégant dessus de grille de chapelle -# jésuite en fer forgé, la porte ouverte, du bas de l''escalier, de -# l''entrée du vestibule, du seuil de la maison, le visiteur est accueilli -# par des terres cuites, des bronzes, des dessins, des porcelaines du -# siècle aimable par excellence, mêlés à des objets de l''Extrême-Orient, -# qui se trouvaient faire si bon ménage dans les collections de Madame de -# Pompadour et de tous les _curieux_ et les _curiolets_ du temps. -# -# La vie d''aujourd''hui est une vie de combattivité; elle demande dans -# toutes les carrières une concentration, un effort, un travail, qui, en -# son foyer enferment l''homme, dont l''existence n''est plus extérieure -# comme au XVIIIe siècle, n''est plus papillonnante parmi la société -# depuis ses dix-sept ans jusqu''à sa mort. De notre temps on va bien -# encore dans le monde, mais toute la vie ne s''y dépense plus, et le -# _chez-soi_ a cessé d''être l''hôtel garni où l''on ne faisait que coucher. -# Dans cette vie assise au coin du feu, renfermée, sédentaire, la -# créature humaine, et la première venue, a été poussée à vouloir les -# quatre murs de son _home_ agréables, plaisants, amusants aux yeux; et -# cet entour et ce décor de son intérieur, elle l''a cherché et trouvé -# naturellement dans l''objet d''art pur ou dans l''objet d''art industriel, -# plus accessible au goût de tous. Du même coup, ces habitudes moins -# mondaines amenaient un amoindrissement du rôle de la femme dans la -# pensée masculine; elle n''était plus pour nous l''occupation galante de -# toute notre existence, cette occupation qui était autrefois la carrière -# du plus grand nombre, et, à la suite de cette modification dans les -# mœurs, il arrivait ceci: c''est que l''intérêt de l''homme, s''en allant -# de l''être charmant, se reportait en grande partie sur les jolis objets -# inanimés dont la passion revêt un peu de la nature et du caractère -# de l''amour. Au XVIIIe siècle, il n''y a pas de _bibeloteurs_ jeunes: -# c''est là la différence des deux siècles. Pour notre génération, la -# _bricabracomanie_ n''est qu''un bouche-trou de la femme qui ne possède -# plus l''imagination de l''homme, et j''ai fait à mon égard cette remarque, -# que, lorsque par hasard mon cœur s''est trouvé occupé, l''objet d''art ne -# m''était de rien. -# -# Oui, cette passion devenue générale, ce plaisir solitaire, auquel se -# livre presque toute une nation, doit son développement au vide, à -# l''ennui du cœur, et aussi, il faut le reconnaître, à la tristesse -# des jours actuels, à l''incertitude des lendemains, à l''enfantement, -# les pieds devant, de la société nouvelle, à des soucis et à des -# préoccupations qui poussent, comme à la veille d''un déluge, les désirs -# et les envies à se donner la jouissance immédiate de tout ce qui les -# charme, les séduit, les tente: l''oubli du moment dans l''assouvissement -# artistique. -# -# Ce sont ces causes, et incontestablement l''éducation de l''œil des -# gens du XIXe siècle, et encore un sentiment tout nouveau, la tendresse -# presque humaine pour les _choses_, qui font, à l''heure qu''il est, de -# presque tout le monde, des collectionneurs et de moi en particulier le -# plus passionné de tous les collectionneurs. -# -# Un riant pavé en marbre blanc et en marbre rouge du Languedoc, avec, -# pour revêtement aux murs et au plafond, un cuir moderne peuplé de -# perroquets fantastiques dorés et peints sur un fond vert d''eau. -# -# Sur ce cuir, dans un désordre cherché, dans un pittoresque -# d''antichambre et d''atelier, toutes sortes de choses voyantes et -# claquantes, de brillants cuivres découpés, des poteries dorées, des -# broderies du Japon et encore des objets bizarres, inattendus, étonnant -# par leur originalité, leur exotisme, et vis-à-vis d''un certain nombre -# desquels je me fais un peu l''effet du bon Père Buffier quand il disait: -# «Voilà des choses que je ne sais pas, il faut que je fasse un livre -# dessus.» -# -# Ça, une petite jardinière à suspension, fabriquée d''une coloquinte -# excentrique, dont la tige tournante et recroquevillée est une tige de -# bronze qui a la flexibilité d''une liane; cette grande planchette fruste -# de bois, toute parcourue des tortils d''un feuillage de lierre, exécuté -# en nacre et en écaille: le porte-éventail qui tient dans l''appartement -# l''éventail ouvert contre le mur; cette petite boule de porcelaine -# jaune impérial si délicatement treillagée: la cage au grillon ou à -# la mouche bourdonnante, que le Chinois aime suspendre au chevet de -# son lit; et cette plaque de faïence figurant une branche de pêcher en -# fleur, modelée à jour dans un cadre de bois en forme d''écran, vous -# représente la décoration de l''angle religieux et mystique d''une chambre -# de prostituée de maison de thé, l''espèce de tableau d''autel devant -# lequel elle place une fleur dans un vase. -# -# Des broderies du Japon, ai-je dit plus haut, c''est là, dans leurs -# cadres de bambous, la riche, la splendide, l''_éclairante_ décoration -# des murs du vestibule et un peu de toute la maison. Ces carrés de soie -# brodés appelés _fusha_ ou _foukousa_ font la chatoyante couverture -# sous laquelle on a l''habitude, dans l''Empire du Lever du Soleil, -# d''envoyer tout présent quelconque, et le plus minime, fût-il même de -# deux œufs[1]. Les anciens _foukousas_ fabriqués à Kioto[2] sont des -# produits d''un art tout particulier au Japon, et auxquels l''Europe -# ne peut rien opposer: de la peinture, de vrais tableaux composés -# et exécutés en soie par un brodeur, où sur les fonds aux adorables -# nuances, et telles qu''en donne le satin ou le crêpe, un oiseau, un -# poisson, une fleur se détache dans le haut relief d''une broderie. -# Et rien là dedans du travail d''un art mécanique, du dessin bête de -# vieille fille de nos broderies à nous, mais des silhouettes d''êtres -# pleins de vie, avec leurs pattes d''oiseau d''un si grand style, avec -# leurs nageoires de poisson d''un si puissant contournement. Quelquefois -# des parties peintes, peintes à l''encre de Chine, s''associent de la -# manière la plus heureuse à la broderie. Je connais, chez Mme Auguste -# Sichel, une fusée de fleurs brodée dans un vase en sparterie peint ou -# imprimé, qui est bien la plus harmonieuse chose qu''il soit possible -# de voir. M. de Nittis a fait un écran, d''un admirable et singulier -# carré, où deux grues, brodées en noir sur un fond rose saumoné, ont, -# comme accompagnement et adoucissement de la broderie, des demi-teintes -# doucement lavées d''encre de Chine sur l''étoffe enchanteresse. Et dans -# ce vestibule, il y a, sur un fond lilas, des carpes nageant au milieu -# de branchages de presle brodées en or, et dont le ventre apparaît comme -# argenté par un reflet de bourbe: un effet obtenu par une réserve au -# milieu du fond tout teinté et obscuré d''encre de Chine. Il est même un -# certain nombre de foukousas absolument peints. J''ai coloriée, sur un -# crêpe gris, dans l''orbe d''un soleil rouge comme du feu, l''échancrure -# pittoresque d''un passage de sept grues, exécuté avec la science que les -# Japonais possèdent du vol de l''échassier. J''ai encore, jetées sur un -# fond maïs, sans aucun détail de terrain, deux grandes grues blanches, -# à la petite crête rougie de vermillon, au cou, aux pattes, à la queue, -# teintés d''encre de Chine. Et ne vous étonnez pas de rencontrer si -# souvent sur les broderies la grue, cet oiseau qui apparaît dans le -# haut du ciel aux Japonais comme un messager céleste, et qu''ils saluent -# de l''appellation: _O Tsouri Sama_, Sa Seigneurie la Grue. -# -# [1] Il n''est guère besoin de dire que le carré est toujours -# rapporté à son maître par le porteur du présent. -# -# [2] Les foukousas modernes seraient aujourd''hui fabriqués à -# Togané, d''où on les expédierait à Yedo. -# ' -# , ----------- -# 'Sur le boulevard Montmorency, au n° 53, s''élève une maison portant, -# encastré dans son balcon, un profil lauré de Louis XV, en bronze -# doré, qui a tout l''air d''être le médaillon, dont était décorée la -# tribune de musique de la salle à manger de Luciennes, représenté dans -# l''aquarelle de Moreau que l''on voit au Louvre. Cette tête, que quelques -# promeneurs regardent d''un œil farouche, n''est point,--ai-je besoin de -# le dire?--une affiche des opinions politiques du propriétaire, elle est -# tout bonnement l''enseigne d''un des nids les plus pleins de choses du -# XVIIIe siècle qui existent à Paris. -# -# La porte noire, que surmonte un élégant dessus de grille de chapelle -# jésuite en fer forgé, la porte ouverte, du bas de l''escalier, de -# l''entrée du vestibule, du seuil de la maison, le visiteur est accueilli -# par des terres cuites, des bronzes, des dessins, des porcelaines du -# siècle aimable par excellence, mêlés à des objets de l''Extrême-Orient, -# qui se trouvaient faire si bon ménage dans les collections de Madame de -# Pompadour et de tous les _curieux_ et les _curiolets_ du temps. -# -# La vie d''aujourd''hui est une vie de combattivité; elle demande dans -# toutes les carrières une concentration, un effort, un travail, qui, en -# son foyer enferment l''homme, dont l''existence n''est plus extérieure -# comme au XVIIIe siècle, n''est plus papillonnante parmi la société -# depuis ses dix-sept ans jusqu''à sa mort. De notre temps on va bien -# encore dans le monde, mais toute la vie ne s''y dépense plus, et le -# _chez-soi_ a cessé d''être l''hôtel garni où l''on ne faisait que coucher. -# Dans cette vie assise au coin du feu, renfermée, sédentaire, la -# créature humaine, et la première venue, a été poussée à vouloir les -# quatre murs de son _home_ agréables, plaisants, amusants aux yeux; et -# cet entour et ce décor de son intérieur, elle l''a cherché et trouvé -# naturellement dans l''objet d''art pur ou dans l''objet d''art industriel, -# plus accessible au goût de tous. Du même coup, ces habitudes moins -# mondaines amenaient un amoindrissement du rôle de la femme dans la -# pensée masculine; elle n''était plus pour nous l''occupation galante de -# toute notre existence, cette occupation qui était autrefois la carrière -# du plus grand nombre, et, à la suite de cette modification dans les -# mœurs, il arrivait ceci: c''est que l''intérêt de l''homme, s''en allant -# de l''être charmant, se reportait en grande partie sur les jolis objets -# inanimés dont la passion revêt un peu de la nature et du caractère -# de l''amour. Au XVIIIe siècle, il n''y a pas de _bibeloteurs_ jeunes: -# c''est là la différence des deux siècles. Pour notre génération, la -# _bricabracomanie_ n''est qu''un bouche-trou de la femme qui ne possède -# plus l''imagination de l''homme, et j''ai fait à mon égard cette remarque, -# que, lorsque par hasard mon cœur s''est trouvé occupé, l''objet d''art ne -# m''était de rien. -# -# Oui, cette passion devenue générale, ce plaisir solitaire, auquel se -# livre presque toute une nation, doit son développement au vide, à -# l''ennui du cœur, et aussi, il faut le reconnaître, à la tristesse -# des jours actuels, à l''incertitude des lendemains, à l''enfantement, -# les pieds devant, de la société nouvelle, à des soucis et à des -# préoccupations qui poussent, comme à la veille d''un déluge, les désirs -# et les envies à se donner la jouissance immédiate de tout ce qui les -# charme, les séduit, les tente: l''oubli du moment dans l''assouvissement -# artistique. -# -# Ce sont ces causes, et incontestablement l''éducation de l''œil des -# gens du XIXe siècle, et encore un sentiment tout nouveau, la tendresse -# presque humaine pour les _choses_, qui font, à l''heure qu''il est, de -# presque tout le monde, des collectionneurs et de moi en particulier le -# plus passionné de tous les collectionneurs. -# -# Un riant pavé en marbre blanc et en marbre rouge du Languedoc, avec, -# pour revêtement aux murs et au plafond, un cuir moderne peuplé de -# perroquets fantastiques dorés et peints sur un fond vert d''eau. -# -# Sur ce cuir, dans un désordre cherché, dans un pittoresque -# d''antichambre et d''atelier, toutes sortes de choses voyantes et -# claquantes, de brillants cuivres découpés, des poteries dorées, des -# broderies du Japon et encore des objets bizarres, inattendus, étonnant -# par leur originalité, leur exotisme, et vis-à-vis d''un certain nombre -# desquels je me fais un peu l''effet du bon Père Buffier quand il disait: -# «Voilà des choses que je ne sais pas, il faut que je fasse un livre -# dessus.» -# -# Ça, une petite jardinière à suspension, fabriquée d''une coloquinte -# excentrique, dont la tige tournante et recroquevillée est une tige de -# bronze qui a la flexibilité d''une liane; cette grande planchette fruste -# de bois, toute parcourue des tortils d''un feuillage de lierre, exécuté -# en nacre et en écaille: le porte-éventail qui tient dans l''appartement -# l''éventail ouvert contre le mur; cette petite boule de porcelaine -# jaune impérial si délicatement treillagée: la cage au grillon ou à -# la mouche bourdonnante, que le Chinois aime suspendre au chevet de -# son lit; et cette plaque de faïence figurant une branche de pêcher en -# fleur, modelée à jour dans un cadre de bois en forme d''écran, vous -# représente la décoration de l''angle religieux et mystique d''une chambre -# de prostituée de maison de thé, l''espèce de tableau d''autel devant -# lequel elle place une fleur dans un vase. -# -# Des broderies du Japon, ai-je dit plus haut, c''est là, dans leurs -# cadres de bambous, la riche, la splendide, l''_éclairante_ décoration -# des murs du vestibule et un peu de toute la maison. Ces carrés de soie -# brodés appelés _fusha_ ou _foukousa_ font la chatoyante couverture -# sous laquelle on a l''habitude, dans l''Empire du Lever du Soleil, -# d''envoyer tout présent quelconque, et le plus minime, fût-il même de -# deux œufs[1]. Les anciens _foukousas_ fabriqués à Kioto[2] sont des -# produits d''un art tout particulier au Japon, et auxquels l''Europe -# ne peut rien opposer: de la peinture, de vrais tableaux composés -# et exécutés en soie par un brodeur, où sur les fonds aux adorables -# nuances, et telles qu''en donne le satin ou le crêpe, un oiseau, un -# poisson, une fleur se détache dans le haut relief d''une broderie. -# Et rien là dedans du travail d''un art mécanique, du dessin bête de -# vieille fille de nos broderies à nous, mais des silhouettes d''êtres -# pleins de vie, avec leurs pattes d''oiseau d''un si grand style, avec -# leurs nageoires de poisson d''un si puissant contournement. Quelquefois -# des parties peintes, peintes à l''encre de Chine, s''associent de la -# manière la plus heureuse à la broderie. Je connais, chez Mme Auguste -# Sichel, une fusée de fleurs brodée dans un vase en sparterie peint ou -# imprimé, qui est bien la plus harmonieuse chose qu''il soit possible -# de voir. M. de Nittis a fait un écran, d''un admirable et singulier -# carré, où deux grues, brodées en noir sur un fond rose saumoné, ont, -# comme accompagnement et adoucissement de la broderie, des demi-teintes -# doucement lavées d''encre de Chine sur l''étoffe enchanteresse. Et dans -# ce vestibule, il y a, sur un fond lilas, des carpes nageant au milieu -# de branchages de presle brodées en or, et dont le ventre apparaît comme -# argenté par un reflet de bourbe: un effet obtenu par une réserve au -# milieu du fond tout teinté et obscuré d''encre de Chine. Il est même un -# certain nombre de foukousas absolument peints. J''ai coloriée, sur un -# crêpe gris, dans l''orbe d''un soleil rouge comme du feu, l''échancrure -# pittoresque d''un passage de sept grues, exécuté avec la science que les -# Japonais possèdent du vol de l''échassier. J''ai encore, jetées sur un -# fond maïs, sans aucun détail de terrain, deux grandes grues blanches, -# à la petite crête rougie de vermillon, au cou, aux pattes, à la queue, -# teintés d''encre de Chine. Et ne vous étonnez pas de rencontrer si -# souvent sur les broderies la grue, cet oiseau qui apparaît dans le -# haut du ciel aux Japonais comme un messager céleste, et qu''ils saluent -# de l''appellation: _O Tsouri Sama_, Sa Seigneurie la Grue. -# -# [1] Il n''est guère besoin de dire que le carré est toujours -# rapporté à son maître par le porteur du présent. -# -# [2] Les foukousas modernes seraient aujourd''hui fabriqués à -# Togané, d''où on les expédierait à Yedo. -# ' -# ); -# commit; -# set count off; -# set heading off; -# set list off; -# select 'All OK.' from rdb$database; -# ''' -# #------------------------------------------- -# f_init_ddl=open( os.path.join(context['temp_directory'],'tmp_check_ddl.sql'), 'w') -# f_init_ddl.write(sql_ddl) -# flush_and_close( f_init_ddl ) -# -# f_init_log = open( os.path.join(context['temp_directory'],'tmp_check_ddl.log'), 'w') -# f_init_err = open( os.path.join(context['temp_directory'],'tmp_check_ddl.err'), 'w') -# subprocess.call( [ context['isql_path'], dsn, '-ch', 'utf8', '-i', f_init_ddl.name ], stdout = f_init_log,stderr = f_init_err) -# flush_and_close( f_init_log ) -# flush_and_close( f_init_err ) -# -# f_meta_log1 = open( os.path.join(context['temp_directory'],'tmp_initial_meta.sql'), 'w') -# f_meta_err1 = open( os.path.join(context['temp_directory'],'tmp_initial_meta.err'), 'w') -# subprocess.call( [ context['isql_path'], dsn, '-x', '-ch', 'utf8' ], stdout = f_meta_log1, stderr = f_meta_err1) -# flush_and_close( f_meta_log1 ) -# flush_and_close( f_meta_err1 ) -# -# -# # Backup with '-ZIP' command switch -# ######## -# -# f_backup_log = open( os.path.join(context['temp_directory'],'tmp_backup.log'), 'w') -# f_backup_err = open( os.path.join(context['temp_directory'],'tmp_backup.err'), 'w') -# subprocess.call( [ context['gbak_path'], '-b', '-zip', dsn, tmpfbk ], stdout = f_backup_log, stderr = f_backup_err) -# flush_and_close( f_backup_log ) -# flush_and_close( f_backup_err ) -# -# # Restore: -# ########## -# -# f_restore_log = open( os.path.join(context['temp_directory'],'tmp_restore.log'), 'w') -# f_restore_err = open( os.path.join(context['temp_directory'],'tmp_restore.err'), 'w') -# subprocess.call( [ context['gbak_path'], '-rep', tmpfbk, 'localhost:' + tmpfdb ], stdout = f_restore_log, stderr = f_restore_err) -# flush_and_close( f_restore_log ) -# flush_and_close( f_restore_err ) -# -# # Validate restored database: -# ########## -# -# f_validate_log = open( os.path.join(context['temp_directory'],'tmp_validate.log'), 'w') -# f_validate_err = open( os.path.join(context['temp_directory'],'tmp_validate.err'), 'w') -# subprocess.call( [ context['gfix_path'], '-v', '-full', 'localhost:' + tmpfdb ], stdout = f_validate_log, stderr = f_validate_err) -# flush_and_close( f_validate_log ) -# flush_and_close( f_validate_err ) -# -# f_meta_log2 = open( os.path.join(context['temp_directory'],'tmp_restored_meta.sql'), 'w') -# f_meta_err2 = open( os.path.join(context['temp_directory'],'tmp_restored_meta.err'), 'w') -# subprocess.call( [ context['isql_path'], dsn, '-x', '-ch', 'utf8' ], stdout = f_meta_log2, stderr = f_meta_err2) -# flush_and_close( f_meta_log2 ) -# flush_and_close( f_meta_err2 ) -# -# -# # Compare extracted metadata: -# ######### -# -# f_meta_log1 = open( f_meta_log1.name,'r' ) -# f_meta_log2 = open( f_meta_log2.name,'r') -# -# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_meta_diff.txt'), 'w') -# f_diff_txt.write( ''.join( difflib.unified_diff( f_meta_log1.readlines(), f_meta_log2.readlines() ) ) ) -# flush_and_close( f_diff_txt ) -# -# flush_and_close( f_meta_log1 ) -# flush_and_close( f_meta_log2 ) -# -# # Check: all files from following set must be EMPTY: -# ######## -# -# f_list = set( (f_init_err, f_meta_err1, f_meta_err2, f_backup_err, f_restore_err, f_validate_log, f_validate_err, f_diff_txt) ) -# for x in f_list: -# with open(x.name, 'r') as f: -# for line in f: -# if line.split(): -# print('UNEXPECTED CONTENT in '+ x.name + ': '+line) -# -# -# ###################################################################### -# # Cleanup: -# f_list |= set( ( f_init_ddl, f_init_log, f_meta_log1, f_meta_log2, f_backup_log, f_restore_log, tmpfbk, tmpfdb) ) -# cleanup( f_list ) -# -#--- -act_1 = python_act('db_1', substitutions=substitutions_1) +db = db_factory(charset='UTF8') +act = python_act('db', substitutions=[('[ \t]+', ' ')]) +@pytest.mark.skip('FIXME: Not IMPLEMENTED') @pytest.mark.version('>=4.0') -@pytest.mark.xfail -def test_1(act_1: Action): - pytest.fail("Test not IMPLEMENTED") - +def test_1(act: Action): + pytest.fail("Not IMPLEMENTED") +# Original python code for this test: +# ----------------------------------- +# import os +# import time +# import difflib +# import subprocess +# from fdb import services +# +# os.environ["ISC_USER"] = user_name +# os.environ["ISC_PASSWORD"] = user_password +# +# this_db = db_conn.database_name +# tmpfbk='$(DATABASE_LOCATION)'+'tmp_zipped_backup.fbk' +# tmpfdb='$(DATABASE_LOCATION)'+'tmp_zipped_restore.fdb' +# +# # 27.02.2021. +# # Name of encryption plugin depends on OS: +# # * for Windows we (currently) use plugin by IBSurgeon, its name is 'dbcrypt'; +# # later it can be replaced with built-in plugin 'fbSampleDbCrypt' +# # but currently it is included only in FB 4.x builds (not in FB 3.x). +# # Discussed tih Dimitr, Alex, Vlad, letters since: 08-feb-2021 00:22 +# # "Windows-distributive FB 3.x: it is desired to see sub-folder 'examples\\prebuild' with files for encryption, like it is in FB 4.x +# # *** DEFERRED *** +# # * for Linux we use: +# # ** 'DbCrypt_example' for FB 3.x +# # ** 'fbSampleDbCrypt' for FB 4.x+ +# # +# PLUGIN_NAME = 'dbcrypt' if os.name == 'nt' else ( '"DbCrypt_example"' if db_conn.engine_version < 4 else '"fbSampleDbCrypt"' ) +# +# ################################################ +# ### e n c r y p t d a t a b a s e ### +# ################################################ +# runProgram('isql', [ dsn ], 'alter database encrypt with %(PLUGIN_NAME)s key Red;' % locals()) +# time.sleep(1) +# db_conn.close() +# +# #-------------------------------------------- +# +# def flush_and_close( file_handle ): +# # https://docs.python.org/2/library/os.html#os.fsync +# # If you're starting with a Python file object f, +# # first do f.flush(), and +# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. +# global os +# +# file_handle.flush() +# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: +# # otherwise: "OSError: [Errno 9] Bad file descriptor"! +# os.fsync(file_handle.fileno()) +# file_handle.close() +# +# #-------------------------------------------- +# +# def cleanup( f_names_list ): +# global os +# for f in f_names_list: +# if type(f) == file: +# del_name = f.name +# elif type(f) == str: +# del_name = f +# else: +# print('Unrecognized type of element:', f, ' - can not be treated as file.') +# del_name = None +# +# if del_name and os.path.isfile( del_name ): +# os.remove( del_name ) +# +# #------------------------------------------- +# sql_ddl=''' +# set bail on; +# +# -- ###################################################### +# -- ################ C O R E 0 9 8 6 ############# +# -- ###################################################### +# +# create collation "Циферки" for utf8 from unicode case insensitive 'NUMERIC-SORT=1'; +# create collation "Испания" for iso8859_1 from es_es_ci_ai 'SPECIALS-FIRST=1';; +# commit; +# +# create domain "ИД'шники" int; +# create domain "Группы" varchar(30) check( value in ('Электрика', 'Ходовая', 'Арматурка', 'Кузовщина') ); +# create domain "Артикулы" varchar(12) character set utf8 check( value = upper(value) ) +# collate "Циферки" -- enabled since core-5220 was fixed (30.04.2016) +# ; +# create domain "Комрады" varchar(40) character set iso8859_1 +# collate "Испания" -- enabled since core-5220 was fixed (30.04.2016) +# ; +# create domain "Кол-во" numeric(12,3) not null; +# +# create sequence generilka; +# create sequence "Генерилка"; +# +# --create role "манагер"; +# --create role "начсклд"; +# +# -- TEMPLY COMMENTED UNTIL CORE-5209 IS OPEN: +# -- ISQL -X ignores connection charset for text of EXCEPTION message (restoring it in initial charset when exception was created) +# --recreate exception "Невзлет" 'Запись обломалась, ваши не пляшут. Но не стесняйтесь и обязательно заходите еще, мы всегда рады видеть вас. До скорой встречи, товарищ!'; +# commit; +# +# ------------------------------------------------- +# recreate table "склад" ( +# "ИД'шник" "ИД'шники" +# ,"Откудова" "Группы" +# ,"Номенклатура" "Артикулы" +# ,"ИД'родителя" "ИД'шники" +# ,"сколько там" "Кол-во" +# ,constraint "ПК-ИД'шник" primary key ("ИД'шник") using index "склад_ПК" +# ,constraint "ФК-на-родока" foreign key("ИД'родителя") references "склад" ("ИД'шник") using index "склад_ФК" +# ,constraint "остаток >=0" check ("сколько там" >= 0) +# ); +# +# recreate view "Электрика"("ид изделия", "Название", "Запас") as +# select +# "ИД'шник" +# ,"Номенклатура" +# ,"сколько там" +# from "склад" +# where "Откудова" = 'Электрика' +# ; +# +# set term ^; +# create or alter trigger "склад би" for "склад" active before insert as +# begin +# --new."ИД'шник" = coalesce( new."ИД'шник", gen_id(generilka, 1) ); +# -- not avail up to 2.5.6: +# new."ИД'шник" = coalesce( new."ИД'шник", gen_id("Генерилка", 1) ); +# end +# ^ +# +# create or alter procedure "Доб на склад"( +# "Откудова" varchar(30) +# ,"Номенклатура" varchar(30) +# ,"ИД'родителя" int +# ,"сколько там" numeric(12,3) +# ) returns ( +# "код возврата" int +# ) as +# begin +# insert into "склад"( +# "Откудова" +# ,"Номенклатура" +# ,"ИД'родителя" +# ,"сколько там" +# ) values ( +# :"Откудова" +# ,:"Номенклатура" +# ,:"ИД'родителя" +# ,:"сколько там" +# ); +# +# end +# ^ +# create or alter procedure "Удалить" as +# begin +# /* +# Антон Павлович Чехов. Каштанка +# +# 1. Дурное поведение +# +# Молодая рыжая собака - помесь такса с дворняжкой - очень похожая мордой +# на лисицу, бегала взад и вперед по тротуару и беспокойно оглядывалась по +# сторонам. Изредка она останавливалась и, плача, приподнимая то одну озябшую +# лапу, то другую, старалась дать себе отчет: как это могло случиться, что она +# заблудилась? +# */ +# end +# ^ +# set term ;^ +# +# --grant select on "склад" to "манагер"; +# --grant select, insert, update, delete on "склад" to "начсклд"; +# +# comment on sequence "Генерилка" is 'Генератор простых идей'; +# comment on table "склад" is 'Это всё, что мы сейчас имеем в наличии'; +# comment on view "Электрика" is 'Не суй пальцы в розетку, будет бо-бо!'; +# comment on procedure "Доб на склад" is 'Процедурка добавления изделия на склад'; +# comment on parameter "Доб на склад"."Откудова" is 'Группа изделия, которое собираемся добавить'; +# +# comment on parameter "Доб на склад"."ИД'родителя" is ' +# Федор Михайлович Достоевский +# +# Преступление и наказание +# +# Роман в шести частях с эпилогом +# +# +# Часть первая +# +# I +# В начале июля, в чрезвычайно жаркое время, под вечер, один молодой человек вышел из своей каморки, которую нанимал от жильцов в С -- м переулке, на улицу и медленно, как бы в нерешимости, отправился к К -- ну мосту. +# Он благополучно избегнул встречи с своею хозяйкой на лестнице. Каморка его приходилась под самою кровлей высокого пятиэтажного дома и походила более на шкаф, чем на квартиру. Квартирная же хозяйка его, у которой он нанимал эту каморку с обедом и прислугой, помещалась одною лестницей ниже, в отдельной квартире, и каждый раз, при выходе на улицу, ему непременно надо было проходить мимо хозяйкиной кухни, почти всегда настежь отворенной на лестницу. И каждый раз молодой человек, проходя мимо, чувствовал какое-то болезненное и трусливое ощущение, которого стыдился и от которого морщился. Он был должен кругом хозяйке и боялся с нею встретиться. +# Не то чтоб он был так труслив и забит, совсем даже напротив; но с некоторого времени он был в раздражительном и напряженном состоянии, похожем на ипохондрию. Он до того углубился в себя и уединился от всех, что боялся даже всякой встречи, не только встречи с хозяйкой. Он был задавлен бедностью; но даже стесненное положение перестало в последнее время тяготить его. Насущными делами своими он совсем перестал и не хотел заниматься. Никакой хозяйки, в сущности, он не боялся, что бы та ни замышляла против него. Но останавливаться на лестнице, слушать всякий вздор про всю эту обыденную дребедень, до которой ему нет никакого дела, все эти приставания о платеже, угрозы, жалобы, и при этом самому изворачиваться, извиняться, лгать, -- нет уж, лучше проскользнуть как-нибудь кошкой по лестнице и улизнуть, чтобы никто не видал. +# Впрочем, на этот раз страх встречи с своею кредиторшей даже его самого поразил по выходе на улицу. +# "На какое дело хочу покуситься и в то же время каких пустяков боюсь! -- подумал он с странною улыбкой. -- Гм... да... всё в руках человека, и всё-то он мимо носу проносит, единственно от одной трусости... это уж аксиома... Любопытно, чего люди больше всего боятся? Нового шага, нового собственного слова они всего больше боятся... А впрочем, я слишком много болтаю. Оттого и ничего не делаю, что болтаю. Пожалуй, впрочем, и так: оттого болтаю, что ничего не делаю. Это я в этот последний месяц выучился болтать, лежа по целым суткам в углу и думая... о царе Горохе. Ну зачем я теперь иду? Разве я способен на это? Разве это серьезно? Совсем не серьезно. Так, ради фантазии сам себя тешу; игрушки! Да, пожалуй что и игрушки!" +# На улице жара стояла страшная, к тому же духота, толкотня, всюду известка, леса, кирпич, пыль и та особенная летняя вонь, столь известная каждому петербуржцу, не имеющему возможности нанять дачу, -- всё это разом неприятно потрясло и без того уже расстроенные нервы юноши. Нестерпимая же вонь из распивочных, которых в этой части города особенное множество, и пьяные, поминутно попадавшиеся, несмотря на буднее время, довершили отвратительный и грустный колорит картины. Чувство глубочайшего омерзения мелькнуло на миг в тонких чертах молодого человека. Кстати, он был замечательно хорош собою, с прекрасными темными глазами, темно-рус, ростом выше среднего, тонок и строен. Но скоро он впал как бы в глубокую задумчивость, даже, вернее сказать, как бы в какое-то забытье, и пошел, уже не замечая окружающего, да и не желая его замечать. Изредка только бормотал он что-то про себя, от своей привычки к монологам, в которой он сейчас сам себе признался. В эту же минуту он и сам сознавал, что мысли его порою мешаются и что он очень слаб: второй день как уж он почти совсем ничего не ел. +# Он был до того худо одет, что иной, даже и привычный человек, посовестился бы днем выходить в таких лохмотьях на улицу. Впрочем, квартал был таков, что костюмом здесь было трудно кого-нибудь удивить. Близость Сенной, обилие известных заведений и, по преимуществу, цеховое и ремесленное население, скученное в этих серединных петербургских улицах и переулках, пестрили иногда общую панораму такими субъектами, что странно было бы и удивляться при встрече с иною фигурой. Но столько злобного презрения уже накопилось в душе молодого человека, что, несмотря на всю свою, иногда очень молодую, щекотливость, он менее всего совестился своих лохмотьев на улице. Другое дело при встрече с иными знакомыми или с прежними товарищами, с которыми вообще он не любил встречаться... А между тем, когда один пьяный, которого неизвестно почему и куда провозили в это время по улице в огромной телеге, запряженной огромною ломовою лошадью, крикнул ему вдруг, проезжая: "Эй ты, немецкий шляпник!" -- и заорал во всё горло, указывая на него рукой, -- молодой человек вдруг остановился и судорожно схватился за свою шляпу. Шляпа эта была высокая, круглая, циммермановская, но вся уже изношенная, совсем рыжая, вся в дырах и пятнах, без полей и самым безобразнейшим углом заломившаяся на сторону. Но не стыд, а совсем другое чувство, похожее даже на испуг, охватило его. +# "Я так и знал! -- бормотал он в смущении, -- я так и думал! Это уж всего сквернее! Вот эдакая какая-нибудь глупость, какая-нибудь пошлейшая мелочь, весь замысел может испортить! Да, слишком приметная шляпа... Смешная, потому и приметная... К моим лохмотьям непременно нужна фуражка, хотя бы старый блин какой-нибудь, а не этот урод. Никто таких не носит, за версту заметят, запомнят... главное, потом запомнят, ан и улика. Тут нужно быть как можно неприметнее... Мелочи, мелочи главное!.. Вот эти-то мелочи и губят всегда и всё..." +# '; +# +# commit; +# +# +# -- ###################################################### +# -- ################ C O R E 2 2 3 8 ############# +# -- ###################################################### +# create domain dm_long_utf8 as varchar(8191) character set utf8; +# create table test (long_text dm_long_utf8, long_descr blob sub_type text character set utf8 ); +# commit; +# set count on; +# -- Length of this literal is exact 8191 characters: +# insert into test(long_text) +# values( +# 'Kaikki neuvon-antajat ja etevimmät päälliköt ja mollat ja imamit ja kadit ja kaupungin päähenkilöt olivat suuresti hämmästyksissään. Hänen tunnettu hurskautensa vaati kaikkia äänettömyyteen, sillä välin kuin hän itse lausui pitkän rukouksen, pyytäen Allah''ta ja Profeettaa hämmentämään kaikkia häväiseviä Juutalaisia ja uskottomia ja vuodattamaan totuuden sanoja jumalisten ihmisten suuhun. Ja nyt kunnian-arvoisa sheiki kutsui esiin kaikki todistajat David Alroy''ta vastaan. Heti Kisloch, Kurdilainen, joka oli koroitettu Bagdadin kadiksi, astui esiin, veti sametti-kukkarostansa paperikääryn ja luki semmoisen todistuksen, jossa arvoisa Kisloch vakuutti, että hän ensin tutustui vangin, David Alroy''n kanssa joissakin erämaan raunioissa -- muutamain rosvojen pesässä, joita Alroy johdatti; että hän, Kisloch, oli rehellinen kauppias ja että nämät konnat olivat ryöstäneet hänen karavaninsa ja hän itse joutunut vankeuteen; että hänen vankeutensa toisena yönä Alroy oli ilmestynyt hänen eteensä leijonan muodossa ja kolmantena tuimasilmäisenä härkänä; että hänen oli tapa alinomaa muuttaa itsensä; että hän usein nosti henkiä; että viimein eräänä kauheana yönä Eblis itse tuli suurella juhlasaatolla ja antoi Alroy''lle Salomonin, Davidin pojan valtikan; ja että tämä seuraavana päivänä kohotti lippunsa ja kohta sen jälkeen kukisti Hassan Subah''n ja tämän Seldshukit useitten hirmuisten paholaisten silminnähtävällä avulla. Kalidaan, Indialaisen, Guebriläisen ja Neekerin ja muutamien muitten saman hengen lapsien todistukset vetivät täysin määrin vertoja Kisloch Kurdilaisen uhkealle kertomukselle. Hebrealaisen valloittajan vastustamaton menestys oli kieltämättömällä tavalla selitetty, Mahomettilaisten aseitten kunnia ja Moslemin uskon puhtaus olivat asetetut jälleen entiseen loistoonsa ja saastuttamattomaan maineesensa. Todeksi saatiin, että David Alroy oli Ebliin lapsi, noitamies, taikakalujen ja myrkkyjen käyttäjä. Kansa kuunteli kauhulla ja harmilla. He olisivat tunkeneet vartiaväen läpitse ja repineet hänet kappaleiksi, jolleivät olisi pelänneet Karamanialaisten sotatapparoita. Niin he lohduttivat mieltänsä sillä, että he ennen pitkää saisivat nähdä hänen kidutuksensa. Bagdadin kadi kumarsi Karamanian kuningasta ja kuiskasi soveliaan matkan päästä jotakin kuninkaalliseen korvaan. Torvet kaikkuivat, kuuluttajat vaativat äänettömyyttä ja kuninkaan huulet liikkuivat taas. "Kuule, oi kansa, ja ole viisas. Pääkadi aikoo nyt lukea kuninkaallisen prinsessan Schirenen, noiturin etevimmän uhrin todistuksen." Ja todistus luettiin, joka vakuutti, että David Alroy omisti ja kantoi lähinnä sydäntänsä erästä talismania, jonka Eblis oli antanut hänelle ja jonka voima oli niin suuri, että, jos sitä kerta painettiin naisen rintaa vastaan, tämä ei enää voinut hallita tahtoansa. Tämmöinen kova onni oli kohdannut oikeauskoisten hallitsian tytärtä. "Onko siinä niin kirjoitettu?" vanki kysyi. "On", kadi vastasi, "ja sen alla on vielä prinsessan kuninkaallinen allekirjoitus." "Se on väärennetty." Karamanian kuningas kavahti valta-istuimeltansa ja oli vihoissansa astumallaan sen portaita alas. Hänen kasvonsa olivat veripunaiset, hänen partansa kuin tulen liekki. Joku lempiministeri rohkeni vienosti pidättää häntä hänen kuninkaallisesta vaipastansa. "Tapa paikalla pois se koira", Karamanian kuningas mutisi. "Prinsessa on itse täällä", lausui kadi, "todistamassa niitä noitakeinoja, joitten alaisena hän oli, vaan joitten vaikutuksesta hän nyt Allah''n ja Profeetan voiman kautta on pääsnyt." Alroy''ta vävähti! "Astu esiin, kuninkaallinen prinsessa", kadi sanoi, "ja jos se todistus, jonka kuulit, on perustettu, nosta ylös se kuninkaallinen käsi, joka koristi sen allekirjoituksellaan." Lähellä valta-istuinta oleva eunukkien joukko teki tilaa; naishaamu, joka oli verhottu hunnulla jalkoihin saakka, astui esiin. Hän nosti ylös kätensä; koko kerääntynyt kansa tuskin hengitti mielenliikutuksesta; eunukkien rivit ummistuivat jälleen; huuto kuului ja hunnustettu haamu katosi. "Minä odotan kidutuskoneitasi, kuningas", Alroy lausui raskaan surun äänellä. Hänen lujuutensa näytti luopuneen hänestä. Hänen silmänsä olivat luodut maahan. Hän oli nähtävästi vaipunut syvään miettimiseen taikka heittäynyt epätoivoon. "Valmistakaat seipäät", käski Alp Arslan. Koko kansan joukkoa värisytti vasten mieltäkin. Yksi orja lähestyi ja tarjosi paperikääryä Alroy''lle. Hän tunsi Nubialaisen, joka oli Honainin palveluksessa. Hänen entinen ministerinsä ilmoitti hänelle, että hän oli saapuvilla; että ne ehdot, joita hän vankihuoneessa tarjosi, vielä myönnettäisiin; että jos Alroy, jota asiaa hän ei epäillyt ja jota hän rukoili, suostuisi niitä vastaan-ottamaan, hänen tuli pistää paperikäärö poveensa, mutta, jos hän yhä oli taipumaton, jos hänen yhä oli mieletön päätös kuolla hirveä ja häväisevä kuolema, hänen tuli repiä se rikki ja heittää se tanterelle. Silmänräpäyksellä Alroy otti paperikääryn ja repi sen kiivaasti tuhansiin palasiin. Tuulen puuska levitti kappaleet laajalle yliympäri. Alhaiso riiteli näistä David Alroy''n viimeisistä muistoista; ja tämä vähäinen tapaus tuotti paljon hämminkiä. Tällä välin Neekerit varustivat kidutuksen ja kuoleman koneita. "Tuon juutalaisen koiran itsepintaisuus tekee minun hulluksi", lausui Karamanian kuningas hovimiehillensä. "Minua haluttaa puhutella häntä vähän, ennenkuin hän kuolee." Lempiministeri pyysi hallitsiaansa olemaan levollisena; mutta kuninkaallinen parta kävi niin punaiseksi, ja kuninkaalliset silmät iskivät niin kauheata tulta, että lempiministerikin lopulta myöntyi. Torvi kaikkui, kuuluttajat vaativat vaiti-oloa, ja Alp Arslanin ääni eroitettiin jälleen. "Senkin koira, näetkö sinä, mikä on tarjonasi? Tiedätkö sinä, mikä vartoo sinua sinun herrasi Ebliin asunnoissa? Voiko väärä ylpeys viehättää Juutalaistakin? Eikö elämä ole suloista? Eikö olisi parempi olla minun varvaskenkieni kantaja kuin tulla seivästetyksi?" "Jalomielinen Alp Arslan", vastasi Alroy ilmeisen ylenkatseen äänellä; "luuletko, että mikään kidutus rasittaa niin, kuin se muisto, että sinä olet voittanut minun?" "Partani kautta, hän ivaa minua!" Karamanialaisten hallitsia huudahti; "hän tekee kiusaa minulle! Älkäät koskeko vaippaani. Minä tahdon puhua hänen kanssaan. Te ette näe kauemmaksi kuin hunnustettu haukka, te sokean äidin lapset. Se on noita; hänellä on vielä jälellä joku päätaika; hän pelastaa vielä henkensä. Hän lentää ilmaan taikka vaipuu maan sisään. Hän nauraa meidän kidutuksiamme." Karamanian kuningas astui tuota pikaa valta-istuimensa portaita alaspäin; häntä seurasivat hänen lempiministerinsä ja hänen neuvon-antajansa ja hänen etevimmät päällikkönsä ja kadit ja mollat ja imamit ja kaupungin päähenkilöt. "Sinä noita!" Alp Arslan huudahti, "hävytön noita! halvan äidin halpa poika! koirien koira! niskotteletko sinä meitä vastaan? Kuiskaako herrasi Eblis toivoa sinun korviisi? Nauratko meidän rangaistuksiamme? Aiotko lentää ylös ilmaan? vai painua alas maahan? Niinkö, niinkö?" Hengästyneenä ja vihastansa uupuneena hallitsia vaikeni. Hän repi partaansa ja polki maata rajussa vimmassaan. "Sinä olet viisaampi kuin neuvon-antajasi, kuningas Arslan; minä en nöyrry sinun edessäsi. Minun Herrani, vaikka hän ei ole Eblis, ei ole hylännyt minua. Minä nauran sinun rangaistuksiasi. Sinun kidutuksiasi minä ylenkatson. Minä sekä vaivun maan sisään että kohoan ilmaan. Tyydytkö nyt vastaukseeni?" "Partani kautta", huudahti tulistunut Arslan, "minä tyydyn vastaukseesi. Pelastakoon Eblis sinut, jos hän voi;" ja Karamanian kuningas, Aasian mainioin miekan piteliä veti säilänsä, ikäänkuin salaman, tupesta ja silpaisi yhdellä säväyksellä Alroy''lta pään. Se kaatui, vaan, kun se kaatui, riemuitsevan pilkan hymy näytti vivahtelevan sankarin kylmenevillä kasvoilla ja kysyvän hänen vihollisiltansa: "missä kaikki teidän kidutuksenne nyt ovat?" Do Dzieci Gołąbki i Dziewczynka Dziecię i Koza Wróbel i Jaskółka Osieł i Chłopczyk Nieposłuszny Zajączek Kotek Brytan i Pudelek Egzamin Małego "Misia" Wilk i Owce Lis i Gąski Chłopczyk i Źrebię Gęsia Kapela Lew i Piesek Niedźwiedź i Pszczółka Śniadanie Artysta Z Zimowych Rozrywek Leniwy Chłopczyk Przygoda z Indykiem O hämmästyksissään. Leniwy ЙЦУКЕН' +# ); +# insert into test(long_text) +# values( +# 'Kaikki neuvon-antajat ja etevimmät päälliköt ja mollat ja imamit ja kadit ja kaupungin päähenkilöt olivat suuresti hämmästyksissään. Hänen tunnettu hurskautensa vaati kaikkia äänettömyyteen, sillä välin kuin hän itse lausui pitkän rukouksen, pyytäen Allah''ta ja Profeettaa hämmentämään kaikkia häväiseviä Juutalaisia ja uskottomia ja vuodattamaan totuuden sanoja jumalisten ihmisten suuhun. Ja nyt kunnian-arvoisa sheiki kutsui esiin kaikki todistajat David Alroy''ta vastaan. Heti Kisloch, Kurdilainen, joka oli koroitettu Bagdadin kadiksi, astui esiin, veti sametti-kukkarostansa paperikääryn ja luki semmoisen todistuksen, jossa arvoisa Kisloch vakuutti, että hän ensin tutustui vangin, David Alroy''n kanssa joissakin erämaan raunioissa -- muutamain rosvojen pesässä, joita Alroy johdatti; että hän, Kisloch, oli rehellinen kauppias ja että nämät konnat olivat ryöstäneet hänen karavaninsa ja hän itse joutunut vankeuteen; että hänen vankeutensa toisena yönä Alroy oli ilmestynyt hänen eteensä leijonan muodossa ja kolmantena tuimasilmäisenä härkänä; että hänen oli tapa alinomaa muuttaa itsensä; että hän usein nosti henkiä; että viimein eräänä kauheana yönä Eblis itse tuli suurella juhlasaatolla ja antoi Alroy''lle Salomonin, Davidin pojan valtikan; ja että tämä seuraavana päivänä kohotti lippunsa ja kohta sen jälkeen kukisti Hassan Subah''n ja tämän Seldshukit useitten hirmuisten paholaisten silminnähtävällä avulla. Kalidaan, Indialaisen, Guebriläisen ja Neekerin ja muutamien muitten saman hengen lapsien todistukset vetivät täysin määrin vertoja Kisloch Kurdilaisen uhkealle kertomukselle. Hebrealaisen valloittajan vastustamaton menestys oli kieltämättömällä tavalla selitetty, Mahomettilaisten aseitten kunnia ja Moslemin uskon puhtaus olivat asetetut jälleen entiseen loistoonsa ja saastuttamattomaan maineesensa. Todeksi saatiin, että David Alroy oli Ebliin lapsi, noitamies, taikakalujen ja myrkkyjen käyttäjä. Kansa kuunteli kauhulla ja harmilla. He olisivat tunkeneet vartiaväen läpitse ja repineet hänet kappaleiksi, jolleivät olisi pelänneet Karamanialaisten sotatapparoita. Niin he lohduttivat mieltänsä sillä, että he ennen pitkää saisivat nähdä hänen kidutuksensa. Bagdadin kadi kumarsi Karamanian kuningasta ja kuiskasi soveliaan matkan päästä jotakin kuninkaalliseen korvaan. Torvet kaikkuivat, kuuluttajat vaativat äänettömyyttä ja kuninkaan huulet liikkuivat taas. "Kuule, oi kansa, ja ole viisas. Pääkadi aikoo nyt lukea kuninkaallisen prinsessan Schirenen, noiturin etevimmän uhrin todistuksen." Ja todistus luettiin, joka vakuutti, että David Alroy omisti ja kantoi lähinnä sydäntänsä erästä talismania, jonka Eblis oli antanut hänelle ja jonka voima oli niin suuri, että, jos sitä kerta painettiin naisen rintaa vastaan, tämä ei enää voinut hallita tahtoansa. Tämmöinen kova onni oli kohdannut oikeauskoisten hallitsian tytärtä. "Onko siinä niin kirjoitettu?" vanki kysyi. "On", kadi vastasi, "ja sen alla on vielä prinsessan kuninkaallinen allekirjoitus." "Se on väärennetty." Karamanian kuningas kavahti valta-istuimeltansa ja oli vihoissansa astumallaan sen portaita alas. Hänen kasvonsa olivat veripunaiset, hänen partansa kuin tulen liekki. Joku lempiministeri rohkeni vienosti pidättää häntä hänen kuninkaallisesta vaipastansa. "Tapa paikalla pois se koira", Karamanian kuningas mutisi. "Prinsessa on itse täällä", lausui kadi, "todistamassa niitä noitakeinoja, joitten alaisena hän oli, vaan joitten vaikutuksesta hän nyt Allah''n ja Profeetan voiman kautta on pääsnyt." Alroy''ta vävähti! "Astu esiin, kuninkaallinen prinsessa", kadi sanoi, "ja jos se todistus, jonka kuulit, on perustettu, nosta ylös se kuninkaallinen käsi, joka koristi sen allekirjoituksellaan." Lähellä valta-istuinta oleva eunukkien joukko teki tilaa; naishaamu, joka oli verhottu hunnulla jalkoihin saakka, astui esiin. Hän nosti ylös kätensä; koko kerääntynyt kansa tuskin hengitti mielenliikutuksesta; eunukkien rivit ummistuivat jälleen; huuto kuului ja hunnustettu haamu katosi. "Minä odotan kidutuskoneitasi, kuningas", Alroy lausui raskaan surun äänellä. Hänen lujuutensa näytti luopuneen hänestä. Hänen silmänsä olivat luodut maahan. Hän oli nähtävästi vaipunut syvään miettimiseen taikka heittäynyt epätoivoon. "Valmistakaat seipäät", käski Alp Arslan. Koko kansan joukkoa värisytti vasten mieltäkin. Yksi orja lähestyi ja tarjosi paperikääryä Alroy''lle. Hän tunsi Nubialaisen, joka oli Honainin palveluksessa. Hänen entinen ministerinsä ilmoitti hänelle, että hän oli saapuvilla; että ne ehdot, joita hän vankihuoneessa tarjosi, vielä myönnettäisiin; että jos Alroy, jota asiaa hän ei epäillyt ja jota hän rukoili, suostuisi niitä vastaan-ottamaan, hänen tuli pistää paperikäärö poveensa, mutta, jos hän yhä oli taipumaton, jos hänen yhä oli mieletön päätös kuolla hirveä ja häväisevä kuolema, hänen tuli repiä se rikki ja heittää se tanterelle. Silmänräpäyksellä Alroy otti paperikääryn ja repi sen kiivaasti tuhansiin palasiin. Tuulen puuska levitti kappaleet laajalle yliympäri. Alhaiso riiteli näistä David Alroy''n viimeisistä muistoista; ja tämä vähäinen tapaus tuotti paljon hämminkiä. Tällä välin Neekerit varustivat kidutuksen ja kuoleman koneita. "Tuon juutalaisen koiran itsepintaisuus tekee minun hulluksi", lausui Karamanian kuningas hovimiehillensä. "Minua haluttaa puhutella häntä vähän, ennenkuin hän kuolee." Lempiministeri pyysi hallitsiaansa olemaan levollisena; mutta kuninkaallinen parta kävi niin punaiseksi, ja kuninkaalliset silmät iskivät niin kauheata tulta, että lempiministerikin lopulta myöntyi. Torvi kaikkui, kuuluttajat vaativat vaiti-oloa, ja Alp Arslanin ääni eroitettiin jälleen. "Senkin koira, näetkö sinä, mikä on tarjonasi? Tiedätkö sinä, mikä vartoo sinua sinun herrasi Ebliin asunnoissa? Voiko väärä ylpeys viehättää Juutalaistakin? Eikö elämä ole suloista? Eikö olisi parempi olla minun varvaskenkieni kantaja kuin tulla seivästetyksi?" "Jalomielinen Alp Arslan", vastasi Alroy ilmeisen ylenkatseen äänellä; "luuletko, että mikään kidutus rasittaa niin, kuin se muisto, että sinä olet voittanut minun?" "Partani kautta, hän ivaa minua!" Karamanialaisten hallitsia huudahti; "hän tekee kiusaa minulle! Älkäät koskeko vaippaani. Minä tahdon puhua hänen kanssaan. Te ette näe kauemmaksi kuin hunnustettu haukka, te sokean äidin lapset. Se on noita; hänellä on vielä jälellä joku päätaika; hän pelastaa vielä henkensä. Hän lentää ilmaan taikka vaipuu maan sisään. Hän nauraa meidän kidutuksiamme." Karamanian kuningas astui tuota pikaa valta-istuimensa portaita alaspäin; häntä seurasivat hänen lempiministerinsä ja hänen neuvon-antajansa ja hänen etevimmät päällikkönsä ja kadit ja mollat ja imamit ja kaupungin päähenkilöt. "Sinä noita!" Alp Arslan huudahti, "hävytön noita! halvan äidin halpa poika! koirien koira! niskotteletko sinä meitä vastaan? Kuiskaako herrasi Eblis toivoa sinun korviisi? Nauratko meidän rangaistuksiamme? Aiotko lentää ylös ilmaan? vai painua alas maahan? Niinkö, niinkö?" Hengästyneenä ja vihastansa uupuneena hallitsia vaikeni. Hän repi partaansa ja polki maata rajussa vimmassaan. "Sinä olet viisaampi kuin neuvon-antajasi, kuningas Arslan; minä en nöyrry sinun edessäsi. Minun Herrani, vaikka hän ei ole Eblis, ei ole hylännyt minua. Minä nauran sinun rangaistuksiasi. Sinun kidutuksiasi minä ylenkatson. Minä sekä vaivun maan sisään että kohoan ilmaan. Tyydytkö nyt vastaukseeni?" "Partani kautta", huudahti tulistunut Arslan, "minä tyydyn vastaukseesi. Pelastakoon Eblis sinut, jos hän voi;" ja Karamanian kuningas, Aasian mainioin miekan piteliä veti säilänsä, ikäänkuin salaman, tupesta ja silpaisi yhdellä säväyksellä Alroy''lta pään. Se kaatui, vaan, kun se kaatui, riemuitsevan pilkan hymy näytti vivahtelevan sankarin kylmenevillä kasvoilla ja kysyvän hänen vihollisiltansa: "missä kaikki teidän kidutuksenne nyt ovat?" Do Dzieci Gołąbki i Dziewczynka Dziecię i Koza Wróbel i Jaskółka Osieł i Chłopczyk Nieposłuszny Zajączek Kotek Brytan i Pudelek Egzamin Małego "Misia" Wilk i Owce Lis i Gąski Chłopczyk i Źrebię Gęsia Kapela Lew i Piesek Niedźwiedź i Pszczółka Śniadanie Artysta Z Zimowych Rozrywek Leniwy Chłopczyk Przygoda z Indykiem O hämmästyksissään. Leniwy НЕКУЦЙ' +# ); +# +# update test set long_descr = long_text; +# commit; +# +# +# +# -- ################################################### +# -- ############## C O R E 3 4 4 6 ############## +# -- ################################################### +# recreate table test( s varchar(8187) character set utf8 collate unicode_ci_ai, b blob sub_type 1 character set utf8 collate unicode_ci_ai); +# commit; +# +# insert into test (s, b ) +# values( +# 'Sur le boulevard Montmorency, au n° 53, s''élève une maison portant, +# encastré dans son balcon, un profil lauré de Louis XV, en bronze +# doré, qui a tout l''air d''être le médaillon, dont était décorée la +# tribune de musique de la salle à manger de Luciennes, représenté dans +# l''aquarelle de Moreau que l''on voit au Louvre. Cette tête, que quelques +# promeneurs regardent d''un œil farouche, n''est point,--ai-je besoin de +# le dire?--une affiche des opinions politiques du propriétaire, elle est +# tout bonnement l''enseigne d''un des nids les plus pleins de choses du +# XVIIIe siècle qui existent à Paris. +# +# La porte noire, que surmonte un élégant dessus de grille de chapelle +# jésuite en fer forgé, la porte ouverte, du bas de l''escalier, de +# l''entrée du vestibule, du seuil de la maison, le visiteur est accueilli +# par des terres cuites, des bronzes, des dessins, des porcelaines du +# siècle aimable par excellence, mêlés à des objets de l''Extrême-Orient, +# qui se trouvaient faire si bon ménage dans les collections de Madame de +# Pompadour et de tous les _curieux_ et les _curiolets_ du temps. +# +# La vie d''aujourd''hui est une vie de combattivité; elle demande dans +# toutes les carrières une concentration, un effort, un travail, qui, en +# son foyer enferment l''homme, dont l''existence n''est plus extérieure +# comme au XVIIIe siècle, n''est plus papillonnante parmi la société +# depuis ses dix-sept ans jusqu''à sa mort. De notre temps on va bien +# encore dans le monde, mais toute la vie ne s''y dépense plus, et le +# _chez-soi_ a cessé d''être l''hôtel garni où l''on ne faisait que coucher. +# Dans cette vie assise au coin du feu, renfermée, sédentaire, la +# créature humaine, et la première venue, a été poussée à vouloir les +# quatre murs de son _home_ agréables, plaisants, amusants aux yeux; et +# cet entour et ce décor de son intérieur, elle l''a cherché et trouvé +# naturellement dans l''objet d''art pur ou dans l''objet d''art industriel, +# plus accessible au goût de tous. Du même coup, ces habitudes moins +# mondaines amenaient un amoindrissement du rôle de la femme dans la +# pensée masculine; elle n''était plus pour nous l''occupation galante de +# toute notre existence, cette occupation qui était autrefois la carrière +# du plus grand nombre, et, à la suite de cette modification dans les +# mœurs, il arrivait ceci: c''est que l''intérêt de l''homme, s''en allant +# de l''être charmant, se reportait en grande partie sur les jolis objets +# inanimés dont la passion revêt un peu de la nature et du caractère +# de l''amour. Au XVIIIe siècle, il n''y a pas de _bibeloteurs_ jeunes: +# c''est là la différence des deux siècles. Pour notre génération, la +# _bricabracomanie_ n''est qu''un bouche-trou de la femme qui ne possède +# plus l''imagination de l''homme, et j''ai fait à mon égard cette remarque, +# que, lorsque par hasard mon cœur s''est trouvé occupé, l''objet d''art ne +# m''était de rien. +# +# Oui, cette passion devenue générale, ce plaisir solitaire, auquel se +# livre presque toute une nation, doit son développement au vide, à +# l''ennui du cœur, et aussi, il faut le reconnaître, à la tristesse +# des jours actuels, à l''incertitude des lendemains, à l''enfantement, +# les pieds devant, de la société nouvelle, à des soucis et à des +# préoccupations qui poussent, comme à la veille d''un déluge, les désirs +# et les envies à se donner la jouissance immédiate de tout ce qui les +# charme, les séduit, les tente: l''oubli du moment dans l''assouvissement +# artistique. +# +# Ce sont ces causes, et incontestablement l''éducation de l''œil des +# gens du XIXe siècle, et encore un sentiment tout nouveau, la tendresse +# presque humaine pour les _choses_, qui font, à l''heure qu''il est, de +# presque tout le monde, des collectionneurs et de moi en particulier le +# plus passionné de tous les collectionneurs. +# +# Un riant pavé en marbre blanc et en marbre rouge du Languedoc, avec, +# pour revêtement aux murs et au plafond, un cuir moderne peuplé de +# perroquets fantastiques dorés et peints sur un fond vert d''eau. +# +# Sur ce cuir, dans un désordre cherché, dans un pittoresque +# d''antichambre et d''atelier, toutes sortes de choses voyantes et +# claquantes, de brillants cuivres découpés, des poteries dorées, des +# broderies du Japon et encore des objets bizarres, inattendus, étonnant +# par leur originalité, leur exotisme, et vis-à-vis d''un certain nombre +# desquels je me fais un peu l''effet du bon Père Buffier quand il disait: +# «Voilà des choses que je ne sais pas, il faut que je fasse un livre +# dessus.» +# +# Ça, une petite jardinière à suspension, fabriquée d''une coloquinte +# excentrique, dont la tige tournante et recroquevillée est une tige de +# bronze qui a la flexibilité d''une liane; cette grande planchette fruste +# de bois, toute parcourue des tortils d''un feuillage de lierre, exécuté +# en nacre et en écaille: le porte-éventail qui tient dans l''appartement +# l''éventail ouvert contre le mur; cette petite boule de porcelaine +# jaune impérial si délicatement treillagée: la cage au grillon ou à +# la mouche bourdonnante, que le Chinois aime suspendre au chevet de +# son lit; et cette plaque de faïence figurant une branche de pêcher en +# fleur, modelée à jour dans un cadre de bois en forme d''écran, vous +# représente la décoration de l''angle religieux et mystique d''une chambre +# de prostituée de maison de thé, l''espèce de tableau d''autel devant +# lequel elle place une fleur dans un vase. +# +# Des broderies du Japon, ai-je dit plus haut, c''est là, dans leurs +# cadres de bambous, la riche, la splendide, l''_éclairante_ décoration +# des murs du vestibule et un peu de toute la maison. Ces carrés de soie +# brodés appelés _fusha_ ou _foukousa_ font la chatoyante couverture +# sous laquelle on a l''habitude, dans l''Empire du Lever du Soleil, +# d''envoyer tout présent quelconque, et le plus minime, fût-il même de +# deux œufs[1]. Les anciens _foukousas_ fabriqués à Kioto[2] sont des +# produits d''un art tout particulier au Japon, et auxquels l''Europe +# ne peut rien opposer: de la peinture, de vrais tableaux composés +# et exécutés en soie par un brodeur, où sur les fonds aux adorables +# nuances, et telles qu''en donne le satin ou le crêpe, un oiseau, un +# poisson, une fleur se détache dans le haut relief d''une broderie. +# Et rien là dedans du travail d''un art mécanique, du dessin bête de +# vieille fille de nos broderies à nous, mais des silhouettes d''êtres +# pleins de vie, avec leurs pattes d''oiseau d''un si grand style, avec +# leurs nageoires de poisson d''un si puissant contournement. Quelquefois +# des parties peintes, peintes à l''encre de Chine, s''associent de la +# manière la plus heureuse à la broderie. Je connais, chez Mme Auguste +# Sichel, une fusée de fleurs brodée dans un vase en sparterie peint ou +# imprimé, qui est bien la plus harmonieuse chose qu''il soit possible +# de voir. M. de Nittis a fait un écran, d''un admirable et singulier +# carré, où deux grues, brodées en noir sur un fond rose saumoné, ont, +# comme accompagnement et adoucissement de la broderie, des demi-teintes +# doucement lavées d''encre de Chine sur l''étoffe enchanteresse. Et dans +# ce vestibule, il y a, sur un fond lilas, des carpes nageant au milieu +# de branchages de presle brodées en or, et dont le ventre apparaît comme +# argenté par un reflet de bourbe: un effet obtenu par une réserve au +# milieu du fond tout teinté et obscuré d''encre de Chine. Il est même un +# certain nombre de foukousas absolument peints. J''ai coloriée, sur un +# crêpe gris, dans l''orbe d''un soleil rouge comme du feu, l''échancrure +# pittoresque d''un passage de sept grues, exécuté avec la science que les +# Japonais possèdent du vol de l''échassier. J''ai encore, jetées sur un +# fond maïs, sans aucun détail de terrain, deux grandes grues blanches, +# à la petite crête rougie de vermillon, au cou, aux pattes, à la queue, +# teintés d''encre de Chine. Et ne vous étonnez pas de rencontrer si +# souvent sur les broderies la grue, cet oiseau qui apparaît dans le +# haut du ciel aux Japonais comme un messager céleste, et qu''ils saluent +# de l''appellation: _O Tsouri Sama_, Sa Seigneurie la Grue. +# +# [1] Il n''est guère besoin de dire que le carré est toujours +# rapporté à son maître par le porteur du présent. +# +# [2] Les foukousas modernes seraient aujourd''hui fabriqués à +# Togané, d''où on les expédierait à Yedo. +# ' +# , ----------- +# 'Sur le boulevard Montmorency, au n° 53, s''élève une maison portant, +# encastré dans son balcon, un profil lauré de Louis XV, en bronze +# doré, qui a tout l''air d''être le médaillon, dont était décorée la +# tribune de musique de la salle à manger de Luciennes, représenté dans +# l''aquarelle de Moreau que l''on voit au Louvre. Cette tête, que quelques +# promeneurs regardent d''un œil farouche, n''est point,--ai-je besoin de +# le dire?--une affiche des opinions politiques du propriétaire, elle est +# tout bonnement l''enseigne d''un des nids les plus pleins de choses du +# XVIIIe siècle qui existent à Paris. +# +# La porte noire, que surmonte un élégant dessus de grille de chapelle +# jésuite en fer forgé, la porte ouverte, du bas de l''escalier, de +# l''entrée du vestibule, du seuil de la maison, le visiteur est accueilli +# par des terres cuites, des bronzes, des dessins, des porcelaines du +# siècle aimable par excellence, mêlés à des objets de l''Extrême-Orient, +# qui se trouvaient faire si bon ménage dans les collections de Madame de +# Pompadour et de tous les _curieux_ et les _curiolets_ du temps. +# +# La vie d''aujourd''hui est une vie de combattivité; elle demande dans +# toutes les carrières une concentration, un effort, un travail, qui, en +# son foyer enferment l''homme, dont l''existence n''est plus extérieure +# comme au XVIIIe siècle, n''est plus papillonnante parmi la société +# depuis ses dix-sept ans jusqu''à sa mort. De notre temps on va bien +# encore dans le monde, mais toute la vie ne s''y dépense plus, et le +# _chez-soi_ a cessé d''être l''hôtel garni où l''on ne faisait que coucher. +# Dans cette vie assise au coin du feu, renfermée, sédentaire, la +# créature humaine, et la première venue, a été poussée à vouloir les +# quatre murs de son _home_ agréables, plaisants, amusants aux yeux; et +# cet entour et ce décor de son intérieur, elle l''a cherché et trouvé +# naturellement dans l''objet d''art pur ou dans l''objet d''art industriel, +# plus accessible au goût de tous. Du même coup, ces habitudes moins +# mondaines amenaient un amoindrissement du rôle de la femme dans la +# pensée masculine; elle n''était plus pour nous l''occupation galante de +# toute notre existence, cette occupation qui était autrefois la carrière +# du plus grand nombre, et, à la suite de cette modification dans les +# mœurs, il arrivait ceci: c''est que l''intérêt de l''homme, s''en allant +# de l''être charmant, se reportait en grande partie sur les jolis objets +# inanimés dont la passion revêt un peu de la nature et du caractère +# de l''amour. Au XVIIIe siècle, il n''y a pas de _bibeloteurs_ jeunes: +# c''est là la différence des deux siècles. Pour notre génération, la +# _bricabracomanie_ n''est qu''un bouche-trou de la femme qui ne possède +# plus l''imagination de l''homme, et j''ai fait à mon égard cette remarque, +# que, lorsque par hasard mon cœur s''est trouvé occupé, l''objet d''art ne +# m''était de rien. +# +# Oui, cette passion devenue générale, ce plaisir solitaire, auquel se +# livre presque toute une nation, doit son développement au vide, à +# l''ennui du cœur, et aussi, il faut le reconnaître, à la tristesse +# des jours actuels, à l''incertitude des lendemains, à l''enfantement, +# les pieds devant, de la société nouvelle, à des soucis et à des +# préoccupations qui poussent, comme à la veille d''un déluge, les désirs +# et les envies à se donner la jouissance immédiate de tout ce qui les +# charme, les séduit, les tente: l''oubli du moment dans l''assouvissement +# artistique. +# +# Ce sont ces causes, et incontestablement l''éducation de l''œil des +# gens du XIXe siècle, et encore un sentiment tout nouveau, la tendresse +# presque humaine pour les _choses_, qui font, à l''heure qu''il est, de +# presque tout le monde, des collectionneurs et de moi en particulier le +# plus passionné de tous les collectionneurs. +# +# Un riant pavé en marbre blanc et en marbre rouge du Languedoc, avec, +# pour revêtement aux murs et au plafond, un cuir moderne peuplé de +# perroquets fantastiques dorés et peints sur un fond vert d''eau. +# +# Sur ce cuir, dans un désordre cherché, dans un pittoresque +# d''antichambre et d''atelier, toutes sortes de choses voyantes et +# claquantes, de brillants cuivres découpés, des poteries dorées, des +# broderies du Japon et encore des objets bizarres, inattendus, étonnant +# par leur originalité, leur exotisme, et vis-à-vis d''un certain nombre +# desquels je me fais un peu l''effet du bon Père Buffier quand il disait: +# «Voilà des choses que je ne sais pas, il faut que je fasse un livre +# dessus.» +# +# Ça, une petite jardinière à suspension, fabriquée d''une coloquinte +# excentrique, dont la tige tournante et recroquevillée est une tige de +# bronze qui a la flexibilité d''une liane; cette grande planchette fruste +# de bois, toute parcourue des tortils d''un feuillage de lierre, exécuté +# en nacre et en écaille: le porte-éventail qui tient dans l''appartement +# l''éventail ouvert contre le mur; cette petite boule de porcelaine +# jaune impérial si délicatement treillagée: la cage au grillon ou à +# la mouche bourdonnante, que le Chinois aime suspendre au chevet de +# son lit; et cette plaque de faïence figurant une branche de pêcher en +# fleur, modelée à jour dans un cadre de bois en forme d''écran, vous +# représente la décoration de l''angle religieux et mystique d''une chambre +# de prostituée de maison de thé, l''espèce de tableau d''autel devant +# lequel elle place une fleur dans un vase. +# +# Des broderies du Japon, ai-je dit plus haut, c''est là, dans leurs +# cadres de bambous, la riche, la splendide, l''_éclairante_ décoration +# des murs du vestibule et un peu de toute la maison. Ces carrés de soie +# brodés appelés _fusha_ ou _foukousa_ font la chatoyante couverture +# sous laquelle on a l''habitude, dans l''Empire du Lever du Soleil, +# d''envoyer tout présent quelconque, et le plus minime, fût-il même de +# deux œufs[1]. Les anciens _foukousas_ fabriqués à Kioto[2] sont des +# produits d''un art tout particulier au Japon, et auxquels l''Europe +# ne peut rien opposer: de la peinture, de vrais tableaux composés +# et exécutés en soie par un brodeur, où sur les fonds aux adorables +# nuances, et telles qu''en donne le satin ou le crêpe, un oiseau, un +# poisson, une fleur se détache dans le haut relief d''une broderie. +# Et rien là dedans du travail d''un art mécanique, du dessin bête de +# vieille fille de nos broderies à nous, mais des silhouettes d''êtres +# pleins de vie, avec leurs pattes d''oiseau d''un si grand style, avec +# leurs nageoires de poisson d''un si puissant contournement. Quelquefois +# des parties peintes, peintes à l''encre de Chine, s''associent de la +# manière la plus heureuse à la broderie. Je connais, chez Mme Auguste +# Sichel, une fusée de fleurs brodée dans un vase en sparterie peint ou +# imprimé, qui est bien la plus harmonieuse chose qu''il soit possible +# de voir. M. de Nittis a fait un écran, d''un admirable et singulier +# carré, où deux grues, brodées en noir sur un fond rose saumoné, ont, +# comme accompagnement et adoucissement de la broderie, des demi-teintes +# doucement lavées d''encre de Chine sur l''étoffe enchanteresse. Et dans +# ce vestibule, il y a, sur un fond lilas, des carpes nageant au milieu +# de branchages de presle brodées en or, et dont le ventre apparaît comme +# argenté par un reflet de bourbe: un effet obtenu par une réserve au +# milieu du fond tout teinté et obscuré d''encre de Chine. Il est même un +# certain nombre de foukousas absolument peints. J''ai coloriée, sur un +# crêpe gris, dans l''orbe d''un soleil rouge comme du feu, l''échancrure +# pittoresque d''un passage de sept grues, exécuté avec la science que les +# Japonais possèdent du vol de l''échassier. J''ai encore, jetées sur un +# fond maïs, sans aucun détail de terrain, deux grandes grues blanches, +# à la petite crête rougie de vermillon, au cou, aux pattes, à la queue, +# teintés d''encre de Chine. Et ne vous étonnez pas de rencontrer si +# souvent sur les broderies la grue, cet oiseau qui apparaît dans le +# haut du ciel aux Japonais comme un messager céleste, et qu''ils saluent +# de l''appellation: _O Tsouri Sama_, Sa Seigneurie la Grue. +# +# [1] Il n''est guère besoin de dire que le carré est toujours +# rapporté à son maître par le porteur du présent. +# +# [2] Les foukousas modernes seraient aujourd''hui fabriqués à +# Togané, d''où on les expédierait à Yedo. +# ' +# ); +# commit; +# set count off; +# set heading off; +# set list off; +# select 'All OK.' from rdb$database; +# ''' +# #------------------------------------------- +# f_init_ddl=open( os.path.join(context['temp_directory'],'tmp_check_ddl.sql'), 'w') +# f_init_ddl.write(sql_ddl) +# flush_and_close( f_init_ddl ) +# +# f_init_log = open( os.path.join(context['temp_directory'],'tmp_check_ddl.log'), 'w') +# f_init_err = open( os.path.join(context['temp_directory'],'tmp_check_ddl.err'), 'w') +# subprocess.call( [ context['isql_path'], dsn, '-ch', 'utf8', '-i', f_init_ddl.name ], stdout = f_init_log,stderr = f_init_err) +# flush_and_close( f_init_log ) +# flush_and_close( f_init_err ) +# +# f_meta_log1 = open( os.path.join(context['temp_directory'],'tmp_initial_meta.sql'), 'w') +# f_meta_err1 = open( os.path.join(context['temp_directory'],'tmp_initial_meta.err'), 'w') +# subprocess.call( [ context['isql_path'], dsn, '-x', '-ch', 'utf8' ], stdout = f_meta_log1, stderr = f_meta_err1) +# flush_and_close( f_meta_log1 ) +# flush_and_close( f_meta_err1 ) +# +# +# # Backup with '-ZIP' command switch +# ######## +# +# f_backup_log = open( os.path.join(context['temp_directory'],'tmp_backup.log'), 'w') +# f_backup_err = open( os.path.join(context['temp_directory'],'tmp_backup.err'), 'w') +# subprocess.call( [ context['gbak_path'], '-b', '-zip', dsn, tmpfbk ], stdout = f_backup_log, stderr = f_backup_err) +# flush_and_close( f_backup_log ) +# flush_and_close( f_backup_err ) +# +# # Restore: +# ########## +# +# f_restore_log = open( os.path.join(context['temp_directory'],'tmp_restore.log'), 'w') +# f_restore_err = open( os.path.join(context['temp_directory'],'tmp_restore.err'), 'w') +# subprocess.call( [ context['gbak_path'], '-rep', tmpfbk, 'localhost:' + tmpfdb ], stdout = f_restore_log, stderr = f_restore_err) +# flush_and_close( f_restore_log ) +# flush_and_close( f_restore_err ) +# +# # Validate restored database: +# ########## +# +# f_validate_log = open( os.path.join(context['temp_directory'],'tmp_validate.log'), 'w') +# f_validate_err = open( os.path.join(context['temp_directory'],'tmp_validate.err'), 'w') +# subprocess.call( [ context['gfix_path'], '-v', '-full', 'localhost:' + tmpfdb ], stdout = f_validate_log, stderr = f_validate_err) +# flush_and_close( f_validate_log ) +# flush_and_close( f_validate_err ) +# +# f_meta_log2 = open( os.path.join(context['temp_directory'],'tmp_restored_meta.sql'), 'w') +# f_meta_err2 = open( os.path.join(context['temp_directory'],'tmp_restored_meta.err'), 'w') +# subprocess.call( [ context['isql_path'], dsn, '-x', '-ch', 'utf8' ], stdout = f_meta_log2, stderr = f_meta_err2) +# flush_and_close( f_meta_log2 ) +# flush_and_close( f_meta_err2 ) +# +# +# # Compare extracted metadata: +# ######### +# +# f_meta_log1 = open( f_meta_log1.name,'r' ) +# f_meta_log2 = open( f_meta_log2.name,'r') +# +# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_meta_diff.txt'), 'w') +# f_diff_txt.write( ''.join( difflib.unified_diff( f_meta_log1.readlines(), f_meta_log2.readlines() ) ) ) +# flush_and_close( f_diff_txt ) +# +# flush_and_close( f_meta_log1 ) +# flush_and_close( f_meta_log2 ) +# +# # Check: all files from following set must be EMPTY: +# ######## +# +# f_list = set( (f_init_err, f_meta_err1, f_meta_err2, f_backup_err, f_restore_err, f_validate_log, f_validate_err, f_diff_txt) ) +# for x in f_list: +# with open(x.name, 'r') as f: +# for line in f: +# if line.split(): +# print('UNEXPECTED CONTENT in '+ x.name + ': '+line) +# +# +# ###################################################################### +# # Cleanup: +# f_list |= set( ( f_init_ddl, f_init_log, f_meta_log1, f_meta_log2, f_backup_log, f_restore_log, tmpfbk, tmpfdb) ) +# cleanup( f_list ) +# +# ----------------------------------- diff --git a/tests/functional/view/create/test_01.py b/tests/functional/view/create/test_01.py index a2a2e968..39ff5f16 100644 --- a/tests/functional/view/create/test_01.py +++ b/tests/functional/view/create/test_01.py @@ -1,43 +1,35 @@ #coding:utf-8 -# -# id: functional.view.create.01 -# title: CREATE VIEW -# decription: CREATE VIEW -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.view.create.create_view_01 + +""" +ID: view.create-01 +TITLE: CREATE VIEW +DESCRIPTION: +FBTEST: functional.view.create.01 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +init_script = """CREATE TABLE tb(id INT); +commit; +""" -substitutions_1 = [] +db = db_factory(init=init_script) -init_script_1 = """CREATE TABLE tb(id INT); -commit;""" +test_script = """CREATE VIEW test AS SELECT * FROM tb; +SHOW VIEW test; +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +act = isql_act('db', test_script) -test_script_1 = """CREATE VIEW test AS SELECT * FROM tb; -SHOW VIEW test;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ID INTEGER Nullable +expected_stdout = """ID INTEGER Nullable View Source: ==== ====== -SELECT * FROM tb""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +SELECT * FROM tb +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/view/create/test_02.py b/tests/functional/view/create/test_02.py index 84abe546..eb0c5261 100644 --- a/tests/functional/view/create/test_02.py +++ b/tests/functional/view/create/test_02.py @@ -1,44 +1,36 @@ #coding:utf-8 -# -# id: functional.view.create.02 -# title: CREATE VIEW -# decription: CREATE VIEW -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 2.5 -# qmid: functional.view.create.create_view_02 + +""" +ID: view.create-02 +TITLE: CREATE VIEW +DESCRIPTION: +FBTEST: functional.view.create.02 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.5 -# resources: None +init_script = """CREATE TABLE tb(id INT); +commit; +""" -substitutions_1 = [] +db = db_factory(init=init_script) -init_script_1 = """CREATE TABLE tb(id INT); -commit;""" +test_script = """CREATE VIEW test (id,num) AS SELECT id,5 FROM tb; +SHOW VIEW test; +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +act = isql_act('db', test_script) -test_script_1 = """CREATE VIEW test (id,num) AS SELECT id,5 FROM tb; -SHOW VIEW test;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stdout_1 = """ID INTEGER Nullable +expected_stdout = """ID INTEGER Nullable NUM INTEGER Expression View Source: ==== ====== -SELECT id,5 FROM tb""" - -@pytest.mark.version('>=2.5') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +SELECT id,5 FROM tb +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/view/create/test_03.py b/tests/functional/view/create/test_03.py index 86444d88..de576447 100644 --- a/tests/functional/view/create/test_03.py +++ b/tests/functional/view/create/test_03.py @@ -1,46 +1,38 @@ #coding:utf-8 -# -# id: functional.view.create.03 -# title: CREATE VIEW - bad number of columns -# decription: CREATE VIEW - bad number of columns -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.view.create.create_view_03 + +""" +ID: view.create-03 +TITLE: CREATE VIEW - bad number of columns +DESCRIPTION: +FBTEST: functional.view.create.03 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +init_script = """CREATE TABLE tb(id INT); +commit; +""" -substitutions_1 = [] +db = db_factory(init=init_script) -init_script_1 = """CREATE TABLE tb(id INT); -commit;""" +test_script = """CREATE VIEW test (id,num,text) AS SELECT id,5 FROM tb; +SHOW VIEW test; +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +act = isql_act('db', test_script) -test_script_1 = """CREATE VIEW test (id,num,text) AS SELECT id,5 FROM tb; -SHOW VIEW test;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stderr_1 = """Statement failed, SQLSTATE = 07002 +expected_stderr = """Statement failed, SQLSTATE = 07002 unsuccessful metadata update -CREATE VIEW TEST failed -SQL error code = -607 -Invalid command -number of columns does not match select list -There is no view TEST in this database""" +There is no view TEST in this database +""" @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/view/create/test_04.py b/tests/functional/view/create/test_04.py index ad11be7d..6c2b34e7 100644 --- a/tests/functional/view/create/test_04.py +++ b/tests/functional/view/create/test_04.py @@ -1,45 +1,37 @@ #coding:utf-8 -# -# id: functional.view.create.04 -# title: CREATE VIEW - bad number of columns -# decription: CREATE VIEW - bad number of columns -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.view.create.create_view_04 + +""" +ID: view.create-04 +TITLE: CREATE VIEW - bad number of columns +DESCRIPTION: +FBTEST: functional.view.create.04 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +init_script = """CREATE TABLE tb(id INT); +""" -substitutions_1 = [] +db = db_factory(init=init_script) -init_script_1 = """CREATE TABLE tb(id INT);""" +test_script = """CREATE VIEW test (id) AS SELECT id,5 FROM tb; +SHOW VIEW test; +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +act = isql_act('db', test_script) -test_script_1 = """CREATE VIEW test (id) AS SELECT id,5 FROM tb; -SHOW VIEW test;""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stderr_1 = """Statement failed, SQLSTATE = 07002 +expected_stderr = """Statement failed, SQLSTATE = 07002 unsuccessful metadata update -CREATE VIEW TEST failed -SQL error code = -607 -Invalid command -number of columns does not match select list -There is no view TEST in this database""" +There is no view TEST in this database +""" @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/view/create/test_05.py b/tests/functional/view/create/test_05.py index c7298ab0..dcc6f8b4 100644 --- a/tests/functional/view/create/test_05.py +++ b/tests/functional/view/create/test_05.py @@ -1,47 +1,38 @@ #coding:utf-8 -# -# id: functional.view.create.05 -# title: CREATE VIEW -# decription: CREATE VIEW -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# INSERT -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.view.create.create_view_05 + +""" +ID: view.create-05 +TITLE: CREATE VIEW +DESCRIPTION: +FBTEST: functional.view.create.05 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE tb(id INT); +init_script = """CREATE TABLE tb(id INT); INSERT INTO tb VALUES(3); INSERT INTO tb VALUES(10); -COMMIT;""" +COMMIT; +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """CREATE VIEW test (id,num) AS SELECT id,5 FROM tb; -SELECT * FROM test;""" +test_script = """CREATE VIEW test (id,num) AS SELECT id,5 FROM tb; +SELECT * FROM test; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ ID NUM +expected_stdout = """ ID NUM ============ ============ 3 5 -10 5""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +10 5 +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/view/create/test_06.py b/tests/functional/view/create/test_06.py index 2eddc51f..b7ae51be 100644 --- a/tests/functional/view/create/test_06.py +++ b/tests/functional/view/create/test_06.py @@ -1,45 +1,37 @@ #coding:utf-8 -# -# id: functional.view.create.06 -# title: CREATE VIEW - updateable -# decription: CREATE VIEW - updateable -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.view.create.create_view_06 + +""" +ID: view.create-06 +TITLE: CREATE VIEW - updateable +DESCRIPTION: +FBTEST: functional.view.create.06 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +init_script = """CREATE TABLE tb(id INT); +commit; +""" -substitutions_1 = [] +db = db_factory(init=init_script) -init_script_1 = """CREATE TABLE tb(id INT); -commit;""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE VIEW test (id) AS SELECT id FROM tb; +test_script = """CREATE VIEW test (id) AS SELECT id FROM tb; INSERT INTO test VALUES(2); COMMIT; -SELECT * FROM test;""" +SELECT * FROM test; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ ID +expected_stdout = """ ID ============ -2""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +2 +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/view/create/test_07.py b/tests/functional/view/create/test_07.py index f95d76bf..df918689 100644 --- a/tests/functional/view/create/test_07.py +++ b/tests/functional/view/create/test_07.py @@ -1,44 +1,36 @@ #coding:utf-8 -# -# id: functional.view.create.07 -# title: CREATE VIEW - updateable WITH CHECK OPTION -# decription: CREATE VIEW - updateable WITH CHECK OPTION -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 1.0 -# qmid: functional.view.create.create_view_07 + +""" +ID: view.create-07 +TITLE: CREATE VIEW - updateable WITH CHECK OPTION +DESCRIPTION: +FBTEST: functional.view.create.07 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 1.0 -# resources: None +init_script = """CREATE TABLE tb(id INT); +""" -substitutions_1 = [] +db = db_factory(init=init_script) -init_script_1 = """CREATE TABLE tb(id INT);""" - -db_1 = db_factory(sql_dialect=3, init=init_script_1) - -test_script_1 = """CREATE VIEW test (id) AS SELECT id FROM tb WHERE id<10 WITH CHECK OPTION; +test_script = """CREATE VIEW test (id) AS SELECT id FROM tb WHERE id<10 WITH CHECK OPTION; INSERT INTO test VALUES(2); COMMIT; -SELECT * FROM test;""" +SELECT * FROM test; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """ ID +expected_stdout = """ ID ============ -2""" - -@pytest.mark.version('>=1.0') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +2 +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/functional/view/create/test_08.py b/tests/functional/view/create/test_08.py index e8d6df3b..70e67319 100644 --- a/tests/functional/view/create/test_08.py +++ b/tests/functional/view/create/test_08.py @@ -1,42 +1,34 @@ #coding:utf-8 -# -# id: functional.view.create.08 -# title: CREATE VIEW - updateable WITH CHECK OPTION -# decription: CREATE VIEW - updateable WITH CHECK OPTION -# -# Dependencies: -# CREATE DATABASE -# CREATE TABLE -# tracker_id: -# min_versions: [] -# versions: 3.0 -# qmid: functional.view.create.create_view_08 + +""" +ID: view.create-08 +TITLE: CREATE VIEW - updateable WITH CHECK OPTION +DESCRIPTION: +FBTEST: functional.view.create.08 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 3.0 -# resources: None +init_script = """CREATE TABLE tb(id INT); +commit; +""" -substitutions_1 = [('-At trigger.*', '')] +db = db_factory(init=init_script) -init_script_1 = """CREATE TABLE tb(id INT); -commit;""" +test_script = """CREATE VIEW test (id) AS SELECT id FROM tb WHERE id<10 WITH CHECK OPTION; +INSERT INTO test VALUES(10); +""" -db_1 = db_factory(sql_dialect=3, init=init_script_1) +act = isql_act('db', test_script, substitutions=[('-At trigger.*', '')]) -test_script_1 = """CREATE VIEW test (id) AS SELECT id FROM tb WHERE id<10 WITH CHECK OPTION; -INSERT INTO test VALUES(10);""" - -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) - -expected_stderr_1 = """Statement failed, SQLSTATE = 23000 +expected_stderr = """Statement failed, SQLSTATE = 23000 Operation violates CHECK constraint on view or table TEST --At trigger 'CHECK_1'""" +-At trigger 'CHECK_1' +""" @pytest.mark.version('>=3.0') -def test_1(act_1: Action): - act_1.expected_stderr = expected_stderr_1 - act_1.execute() - assert act_1.clean_stderr == act_1.clean_expected_stderr - +def test_1(act: Action): + act.expected_stderr = expected_stderr + act.execute() + assert act.clean_stderr == act.clean_expected_stderr diff --git a/tests/functional/view/create/test_10.py b/tests/functional/view/create/test_10.py index 1545340f..eafde7d0 100644 --- a/tests/functional/view/create/test_10.py +++ b/tests/functional/view/create/test_10.py @@ -1,42 +1,38 @@ #coding:utf-8 -# -# id: functional.view.create.10 -# title: CREATE VIEW as SELECT .... -# decription: Create view without field list -# tracker_id: CORE-831 -# min_versions: [] -# versions: 2.1 -# qmid: functional.view.create.create_view_10 + +""" +ID: view.create-09 +TITLE: CREATE VIEW as SELECT .... +DESCRIPTION: + Create view without field list +FBTEST: functional.view.create.10 +""" import pytest -from firebird.qa import db_factory, isql_act, Action +from firebird.qa import * -# version: 2.1 -# resources: None - -substitutions_1 = [] - -init_script_1 = """CREATE TABLE T1 (ID INTEGER, NAME VARCHAR(10)); +init_script = """CREATE TABLE T1 (ID INTEGER, NAME VARCHAR(10)); COMMIT; """ -db_1 = db_factory(sql_dialect=3, init=init_script_1) +db = db_factory(init=init_script) -test_script_1 = """CREATE VIEW V1 AS SELECT ID AS VID, NAME FROM T1; +test_script = """CREATE VIEW V1 AS SELECT ID AS VID, NAME FROM T1; COMMIT; -SHOW VIEW V1;""" +SHOW VIEW V1; +""" -act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) +act = isql_act('db', test_script) -expected_stdout_1 = """VID INTEGER Nullable +expected_stdout = """VID INTEGER Nullable NAME VARCHAR(10) Nullable View Source: ==== ====== -SELECT ID AS VID, NAME FROM T1""" - -@pytest.mark.version('>=2.1') -def test_1(act_1: Action): - act_1.expected_stdout = expected_stdout_1 - act_1.execute() - assert act_1.clean_stdout == act_1.clean_expected_stdout +SELECT ID AS VID, NAME FROM T1 +""" +@pytest.mark.version('>=3.0') +def test_1(act: Action): + act.expected_stdout = expected_stdout + act.execute() + assert act.clean_stdout == act.clean_expected_stdout diff --git a/tests/test_full_join_push_where_predicate.py b/tests/test_full_join_push_where_predicate.py index f9ec1258..27436acb 100644 --- a/tests/test_full_join_push_where_predicate.py +++ b/tests/test_full_join_push_where_predicate.py @@ -1,18 +1,18 @@ #coding:utf-8 """ -ID: full-join-push-where-predicate +ID: issue.full-join-push-where-predicate TITLE: WHERE-filter must be applied after FULL JOIN result DESCRIPTION: See (rus): https://www.sql.ru/forum/1326682/dva-cte-ih-full-join-i-uslovie-daut-nekorrektnyy-rezultat - Confirmed bug on 2.5.9.27151. - Checked on 3.0.6.33322, 4.0.0.2073 -- all fine. + Confirmed bug on 2.5.9.27151. + Checked on 3.0.6.33322, 4.0.0.2073 -- all fine. +FBTEST: full_join_push_where_predicate """ import pytest from firebird.qa import * - db = db_factory() test_script = """ @@ -51,7 +51,7 @@ expected_stdout = """ T2_F1 d """ -@pytest.mark.version('>=3') +@pytest.mark.version('>=3.0') def test_1(act: Action): act.expected_stdout = expected_stdout act.execute()