6
0
mirror of https://github.com/FirebirdSQL/firebird-qa.git synced 2025-01-22 13:33:07 +01:00

More pyton tests

This commit is contained in:
Pavel Císař 2021-11-12 18:29:54 +01:00
parent c893f5c946
commit e7b07eade0
13 changed files with 1280 additions and 696 deletions

View File

@ -2,24 +2,24 @@
#
# id: bugs.core_1746
# title: Expression index can be created while doing inserts into table
# decription:
# decription:
# We check three cases of Tx setting: WAIT, NO WAIT and LOCK TIMEOUT n.
#
#
# First ISQL session always inserts some number of rows and falls in delay (it is created
# artificially by attempting to insert duplicate key in index in Tx with lock timeout = 7).
#
# Second ISQL is launched in SYNC mode after small delay (3 seconds) and starts transaction
#
# Second ISQL is launched in SYNC mode after small delay (3 seconds) and starts transaction
# with corresponding WAIT/NO WAIT/LOCK TIMEOUT clause.
#
#
# If Tx starts with NO wait or lock timeout then this (2nd) ISQL always MUST FAIL.
#
#
# After 2nd ISQL will finish, we have to wait yet 5 seconds for 1st ISQL will gone.
# Total time of these two delays (3+5=8) must be greater than lock timeout in the script which
# Total time of these two delays (3+5=8) must be greater than lock timeout in the script which
# is running by 1st ISQL (7 seconds).
#
#
# Initial version of this test did use force interruption of both ISQL processes but this was unneeded,
# though it helped to discover some other bug in engine which produced bugcheck - see CORE-5275.
#
#
# Checked on:
# 4.0.0.2164 SS: 37.707s.
# 4.0.0.2119 SS: 37.982s.
@ -27,23 +27,30 @@
# 3.0.7.33356 SS: 36.675s.
# 3.0.7.33356 CS: 37.839s.
# 2.5.9.27150 SC: 35.755s.
#
#
# tracker_id: CORE-1746
# min_versions: ['2.5.6']
# versions: 2.5.6
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
import time
import subprocess
from pathlib import Path
from firebird.qa import db_factory, python_act, Action, temp_file
# version: 2.5.6
# resources: None
substitutions_1 = [('0: CREATE INDEX LOG: RDB_EXPR_BLOB.*', '0: CREATE INDEX LOG: RDB_EXPR_BLOB'), ('BULK_INSERT_START.*', 'BULK_INSERT_START'), ('BULK_INSERT_FINISH.*', 'BULK_INSERT_FINISH'), ('CREATE_INDX_START.*', 'CREATE_INDX_START'), ('AFTER LINE.*', 'AFTER LINE')]
substitutions_1 = [('0: CREATE INDEX LOG: RDB_EXPR_BLOB.*', '0: CREATE INDEX LOG: RDB_EXPR_BLOB'),
('BULK_INSERT_START.*', 'BULK_INSERT_START'),
('BULK_INSERT_FINISH.*', 'BULK_INSERT_FINISH'),
('CREATE_INDX_START.*', 'CREATE_INDX_START'),
('AFTER LINE.*', 'AFTER LINE')]
init_script_1 = """
create or alter procedure sp_ins(n int) as begin end;
recreate table test(x int unique using index test_x, s varchar(10) default 'qwerty' );
set term ^;
@ -79,49 +86,49 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# import os
# import time
# import subprocess
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# db_conn.close()
#
#
# #--------------------------------------------
#
#
# def flush_and_close(file_handle):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb'):
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
# if os.path.isfile( f_names_list[i]):
# os.remove( f_names_list[i] )
#
#
#
#
# #########################################################
#
#
# # NB-1: value of 'rows_to_add' must have value that will require at least
# # 4...5 seconds for inserting such number of rows
# # NB-2: FB 2.5 makes DML *faster* than 3.0 in single-connection mode!
#
#
# rows_to_add=1000
#
#
# sql_bulk_insert=''' set bail on;
# set list on;
#
#
# -- do NOT use it !! >>> alter sequence g restart with 0; -- gen_id(g,1) will return 0 rather than 1 since 06-aug-2020 on FB 4.x !!
#
#
# delete from test;
# set term ^;
# execute block as
@ -132,9 +139,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# ^
# set term ;^
# commit;
#
#
# set transaction lock timeout 7; -- THIS LOCK TIMEOUT SERVES ONLY FOR DELAY, see below auton Tx start.
#
#
# select current_timestamp as bulk_insert_start from rdb$database;
# set term ^;
# execute block as
@ -147,7 +154,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# -- #########################################################
# in autonomous transaction do
# insert into test( x ) values( %(rows_to_add)s ); -- this will cause delay because of duplicate in index
# when any do
# when any do
# begin
# i = gen_id(g,1);
# end
@ -158,37 +165,37 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# commit;
# select current_timestamp as bulk_insert_finish from rdb$database;
# '''
#
#
# sql_create_indx=''' set bail on;
# set list on;
# set blob all;
# select
# iif( gen_id(g,0) > 0 and gen_id(g,0) < 1 + %(rows_to_add)s,
# 'OK, IS RUNNING',
# iif( gen_id(g,0) <=0,
# 'WRONG: not yet started, current gen_id='||gen_id(g,0),
# select
# iif( gen_id(g,0) > 0 and gen_id(g,0) < 1 + %(rows_to_add)s,
# 'OK, IS RUNNING',
# iif( gen_id(g,0) <=0,
# 'WRONG: not yet started, current gen_id='||gen_id(g,0),
# 'WRONG: already finished, rows_to_add='||%(rows_to_add)s ||', current gen_id='||gen_id(g,0)
# )
# ) as inserts_state,
# current_timestamp as create_indx_start
# ) as inserts_state,
# current_timestamp as create_indx_start
# from rdb$database;
# set autoddl off;
# commit;
#
#
# set echo on;
# set transaction %(tx_decl)s;
#
# create index test_%(idx_name)s on test computed by( %(idx_expr)s );
#
# create index test_%(idx_name)s on test computed by( %(idx_expr)s );
# commit;
# set echo off;
#
# select
# iif( gen_id(g,0) >= 1 + %(rows_to_add)s,
# 'OK, FINISHED',
#
# select
# iif( gen_id(g,0) >= 1 + %(rows_to_add)s,
# 'OK, FINISHED',
# 'SOMETHING WRONG: current gen_id=' || gen_id(g,0)||', rows_to_add='||%(rows_to_add)s
# ) as inserts_state
# from rdb$database;
#
#
# set count on;
# select
# rdb$index_name
@ -208,65 +215,69 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# drop index test_%(idx_name)s;
# commit;
# '''
#
#
# tx_param=['WAIT','NO WAIT','LOCK TIMEOUT 1']
#
#
# for i in range(len(tx_param)):
#
#
# #if i >= 2:
# # continue # temply!
#
#
# f_bulk_insert_sql = open( os.path.join(context['temp_directory'],'tmp_1746_ins.sql'), 'w')
# f_bulk_insert_sql.write(sql_bulk_insert % locals() )
# f_bulk_insert_sql.close()
#
#
# tx_decl=tx_param[i]
# idx_name=tx_decl.replace(' ','_')
# idx_expr="'"+idx_name+"'|| s"
#
#
# f_create_indx_sql = open( os.path.join(context['temp_directory'],'tmp_1746_idx_%s.sql' % str(i) ), 'w')
# f_create_indx_sql.write( sql_create_indx % locals() )
# f_create_indx_sql.close()
#
#
# f_bulk_insert_log = open( os.path.join(context['temp_directory'],'tmp_1746_ins_%s.log' % str(i) ), 'w')
#
#
# # This will insert rows and then stay in pause 10 seconds:
# p_bulk_insert=subprocess.Popen( [ context['isql_path'], dsn, "-q", "-i", f_bulk_insert_sql.name ],
# stdout = f_bulk_insert_log,
# stderr = subprocess.STDOUT
# )
#
#
# # 3.0 Classic: seems that it requires at least 2 seconds for ISQL be loaded into memory.
# time.sleep(3)
#
#
# f_create_indx_log = open( os.path.join(context['temp_directory'],'tmp_1746_idx_%s.log' % str(i) ), 'w')
#
#
# # This will wait until first ISQL finished:
# subprocess.call( [ context['isql_path'], dsn, "-n", "-q", "-i", f_create_indx_sql.name ],
# stdout = f_create_indx_log,
# stderr = subprocess.STDOUT
# )
#
#
# time.sleep(7) # NB: this delay plus previous (3+5=8) must be GREATER than lock timeout in <sql_bulk_insert>
#
#
# p_bulk_insert.terminate()
# flush_and_close( f_bulk_insert_log )
# flush_and_close( f_create_indx_log )
#
#
#
#
# with open( f_bulk_insert_log.name,'r') as f:
# for line in f:
# if line.split():
# print( str(i)+': BULK INSERTS LOG: '+line.strip().upper() )
#
#
# with open( f_create_indx_log.name,'r') as f:
# for line in f:
# if line.split():
# print( str(i)+': CREATE INDEX LOG: '+line.strip().upper() )
#
#
# cleanup( [i.name for i in (f_bulk_insert_sql, f_create_indx_sql, f_bulk_insert_log, f_create_indx_log)] )
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
tmp_file_bi_in = temp_file('bulk_insert.sql')
tmp_file_bi_out = temp_file('bulk_insert.out')
expected_stdout_1 = """
0: BULK INSERTS LOG: BULK_INSERT_START
@ -274,7 +285,7 @@ expected_stdout_1 = """
0: CREATE INDEX LOG: INSERTS_STATE OK, IS RUNNING
0: CREATE INDEX LOG: CREATE_INDX_START
0: CREATE INDEX LOG: SET TRANSACTION WAIT;
0: CREATE INDEX LOG: CREATE INDEX TEST_WAIT ON TEST COMPUTED BY( 'WAIT'|| S );
0: CREATE INDEX LOG: CREATE INDEX TEST_WAIT ON TEST COMPUTED BY('WAIT'|| S);
0: CREATE INDEX LOG: COMMIT;
0: CREATE INDEX LOG: SET ECHO OFF;
0: CREATE INDEX LOG: INSERTS_STATE OK, FINISHED
@ -282,7 +293,7 @@ expected_stdout_1 = """
0: CREATE INDEX LOG: RDB$UNIQUE_FLAG 0
0: CREATE INDEX LOG: RDB$INDEX_INACTIVE 0
0: CREATE INDEX LOG: RDB_EXPR_BLOB
0: CREATE INDEX LOG: ( 'WAIT'|| S )
0: CREATE INDEX LOG: ('WAIT'|| S)
0: CREATE INDEX LOG: RECORDS AFFECTED: 1
0: CREATE INDEX LOG: SET PLAN ON;
0: CREATE INDEX LOG: SELECT 1 FROM TEST WHERE 'WAIT'|| S > '' ROWS 0;
@ -295,31 +306,166 @@ expected_stdout_1 = """
1: CREATE INDEX LOG: INSERTS_STATE OK, IS RUNNING
1: CREATE INDEX LOG: CREATE_INDX_START
1: CREATE INDEX LOG: SET TRANSACTION NO WAIT;
1: CREATE INDEX LOG: CREATE INDEX TEST_NO_WAIT ON TEST COMPUTED BY( 'NO_WAIT'|| S );
1: CREATE INDEX LOG: CREATE INDEX TEST_NO_WAIT ON TEST COMPUTED BY('NO_WAIT'|| S);
1: CREATE INDEX LOG: COMMIT;
1: CREATE INDEX LOG: STATEMENT FAILED, SQLSTATE = 40001
1: CREATE INDEX LOG: LOCK CONFLICT ON NO WAIT TRANSACTION
1: CREATE INDEX LOG: -UNSUCCESSFUL METADATA UPDATE
1: CREATE INDEX LOG: -OBJECT TABLE "TEST" IS IN USE
1: CREATE INDEX LOG: AFTER LINE
2: BULK INSERTS LOG: BULK_INSERT_START
2: BULK INSERTS LOG: BULK_INSERT_FINISH
2: CREATE INDEX LOG: INSERTS_STATE OK, IS RUNNING
2: CREATE INDEX LOG: CREATE_INDX_START
2: CREATE INDEX LOG: SET TRANSACTION LOCK TIMEOUT 1;
2: CREATE INDEX LOG: CREATE INDEX TEST_LOCK_TIMEOUT_1 ON TEST COMPUTED BY( 'LOCK_TIMEOUT_1'|| S );
2: CREATE INDEX LOG: CREATE INDEX TEST_LOCK_TIMEOUT_1 ON TEST COMPUTED BY('LOCK_TIMEOUT_1'|| S);
2: CREATE INDEX LOG: COMMIT;
2: CREATE INDEX LOG: STATEMENT FAILED, SQLSTATE = 40001
2: CREATE INDEX LOG: LOCK TIME-OUT ON WAIT TRANSACTION
2: CREATE INDEX LOG: -UNSUCCESSFUL METADATA UPDATE
2: CREATE INDEX LOG: -OBJECT TABLE "TEST" IS IN USE
2: CREATE INDEX LOG: AFTER LINE
"""
"""
@pytest.mark.version('>=2.5.6')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action, tmp_file_bi_in: Path, tmp_file_bi_out: Path, capsys):
# NB-1: value of 'rows_to_add' must have value that will require at least
# 4...5 seconds for inserting such number of rows
# NB-2: FB 2.5 makes DML *faster* than 3.0 in single-connection mode!
rows_to_add = 1000
tmp_file_bi_in.write_text (f'''
set bail on;
set list on;
-- do NOT use it !! >>> alter sequence g restart with 0; -- gen_id(g,1) will return 0 rather than 1 since 06-aug-2020 on FB 4.x !!
delete from test;
set term ^;
execute block as
declare c bigint;
begin
c = gen_id(g, -gen_id(g, 0)); -- restart sequence
end
^
set term ;^
commit;
set transaction lock timeout 7; -- THIS LOCK TIMEOUT SERVES ONLY FOR DELAY, see below auton Tx start.
select current_timestamp as bulk_insert_start from rdb$database;
set term ^;
execute block as
declare i int;
begin
execute procedure sp_ins({rows_to_add});
begin
-- #########################################################
-- ####################### D E L A Y #####################
-- #########################################################
in autonomous transaction do
insert into test( x ) values({rows_to_add}); -- this will cause delay because of duplicate in index
when any do
begin
i = gen_id(g,1);
end
end
end
^
set term ;^
commit;
select current_timestamp as bulk_insert_finish from rdb$database;
''')
tx_param = ['WAIT', 'NO WAIT', 'LOCK TIMEOUT 1']
#
i = 0
#
for tx_decl in tx_param:
idx_name = tx_decl.replace(' ', '_')
idx_expr = "'" + idx_name + "'|| s"
sql_create_indx = f'''
set bail on;
set list on;
set blob all;
select
iif( gen_id(g,0) > 0 and gen_id(g,0) < 1 + {rows_to_add},
'OK, IS RUNNING',
iif( gen_id(g,0) <=0,
'WRONG: not yet started, current gen_id='||gen_id(g,0),
'WRONG: already finished, rows_to_add='|| {rows_to_add} ||', current gen_id='||gen_id(g,0)
)
) as inserts_state,
current_timestamp as create_indx_start
from rdb$database;
set autoddl off;
commit;
set echo on;
set transaction {tx_decl};
create index test_{idx_name} on test computed by({idx_expr});
commit;
set echo off;
select
iif( gen_id(g,0) >= 1 + {rows_to_add},
'OK, FINISHED',
'SOMETHING WRONG: current gen_id=' || gen_id(g,0)||', rows_to_add='|| {rows_to_add}
) as inserts_state
from rdb$database;
set count on;
select
rdb$index_name
,coalesce(rdb$unique_flag,0) as rdb$unique_flag
,coalesce(rdb$index_inactive,0) as rdb$index_inactive
,rdb$expression_source as rdb_expr_blob
from rdb$indices ri
where ri.rdb$index_name = upper( 'test_{idx_name}' )
;
set count off;
set echo on;
set plan on;
select 1 from test where {idx_expr} > '' rows 0;
set plan off;
set echo off;
commit;
drop index test_{idx_name};
commit;
'''
with open(tmp_file_bi_out, mode='w') as f_bulk_insert_log:
# This will insert rows and then stay in pause 10 seconds:
p_bulk_insert = subprocess.Popen([act_1.vars['isql'], act_1.db.dsn,
'-user', act_1.db.user,
'-password', act_1.db.password,
'-q', '-i', str(tmp_file_bi_in)],
stdout = f_bulk_insert_log,
stderr = subprocess.STDOUT
)
#act_1.isql(switches=['-q'], input=sql_bulk_insert)
#bulk_insert_log = act_1.stdout
# 3.0 Classic: seems that it requires at least 2 seconds for ISQL be loaded into memory.
time.sleep(3)
# This will wait until first ISQL finished
act_1.expected_stderr = 'DISABLED'
act_1.isql(switches=['-q', '-n'], input=sql_create_indx)
time.sleep(7) # NB: this delay plus previous (3+5=8) must be GREATER than lock timeout in <sql_bulk_insert>
p_bulk_insert.terminate()
bulk_insert_log = tmp_file_bi_out.read_text()
create_indx_log = act_1.stdout + act_1.stderr
for line in bulk_insert_log.splitlines():
if line.split():
print( str(i)+': BULK INSERTS LOG: '+line.strip().upper() )
for line in create_indx_log.splitlines():
if line.split():
print( str(i)+': CREATE INDEX LOG: '+line.strip().upper() )
#
i += 1
# Checks
act_1.reset()
act_1.stdout = capsys.readouterr().out
act_1.expected_stdout = expected_stdout_1
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,19 +2,19 @@
#
# id: bugs.core_1760
# title: Support hex numeric and string literals
# decription:
# decription:
# See doc\\sql.extensions\\README.hex_literals.txt
#
#
# REFACTORED 27.02.2020:
# 1) all SQL code was moved into separate file: $files_location/core_1760.sql because it is common for all major FB versions;
# 2) added examples from https://firebirdsql.org/refdocs/langrefupd25-bigint.html (see core_1760.sql);
# 3) added check for output datatypes (sqlda_display).
#
#
# Checked on:
# 4.0.0.1789 SS: 1.458s.
# 3.0.6.33259 SS: 0.805s.
# 2.5.9.27149 SC: 0.397s.
#
#
# tracker_id: CORE-1760
# min_versions: ['2.5.0']
# versions: 3.0
@ -32,164 +32,224 @@ init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
test_script_1 = """
set list on;
-- binary literal ::= { x | X } <quote> [ { <hexit> <hexit> }... ] <quote>
select x'1' from rdb$database; -- raises: token unknown because length is odd
select x'11' from rdb$database; -- must raise: token unknown because length is odd
select x'0123456789' from rdb$database;
select x'01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789' from rdb$database;
-- must raise: token unknown because last char is not hexit
select x'0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678x' from rdb$database;
select uuid_to_char(x'BA1749B583BF9146B360F54E25FE583E') from rdb$database;
-- ##############################################################################
-- Numeric literal: { 0x | 0X } <hexit> [ <hexit>... ]
-- https://firebirdsql.org/refdocs/langrefupd25-bigint.html
recreate view v_test as
select
+-0x1 "-1(a)"
,-+-0xf "+15"
,0x7FFF "32767"
,0x8000 "32768"
,0xFFFF "65535"
,0x10000 "65536(a)"
,0x000000000010000 "65536(b)"
,0x80000000 "-2147483648"
,0x080000000 "+2147483648(a)"
,0x000000080000000 "+2147483648(b)"
,0XFFFFFFFF "-1(b)"
,0X0FFFFFFFF "+4294967295"
,0x100000000 "+4294967296(a)"
,0x0000000100000000 "+4294967296(b)"
,0X7FFFFFFFFFFFFFFF "9223372036854775807"
,0x8000000000000000 "-9223372036854775808"
,0x8000000000000001 "-9223372036854775807"
,0x8000000000000002 "-9223372036854775806"
,0xffffffffffffffff "-1(c)"
from rdb$database;
select * from v_test;
-- If the number of <hexit> is greater than 8, the constant data type is a signed BIGINT
-- If it's less or equal than 8, the data type is a signed INTEGER
set sqlda_display on;
select * from v_test rows 0;
set sqlda_display off;
"""
#---
#
#
# import os
# import sys
# import subprocess
# from fdb import services
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# #--------------------------------------------
#
#
# def flush_and_close(file_handle):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb'):
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
# if os.path.isfile( f_names_list[i]):
# os.remove( f_names_list[i] )
# #--------------------------------------------
#
#
# db_conn.close()
#
#
# sql_chk = os.path.join(context['files_location'],'core_1760.sql')
#
#
# f_sql_log = open( os.path.join(context['temp_directory'],'tmp_core_1760.log'), 'w', buffering = 0)
# f_sql_err = open( os.path.join(context['temp_directory'],'tmp_core_1760.err'), 'w', buffering = 0)
#
#
# subprocess.call( [ context['isql_path'], dsn, '-q', '-i', sql_chk ], stdout = f_sql_log, stderr = f_sql_err)
#
#
# flush_and_close( f_sql_log )
# flush_and_close( f_sql_err )
#
#
# for f in (f_sql_log, f_sql_err):
# with open( f.name,'r') as g:
# for line in g:
# if line.strip():
# print( ('STDOUT: ' if f == f_sql_log else 'STDERR: ') + line )
#
#
# cleanup( (f_sql_log.name, f_sql_err.name) )
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
STDOUT: CONSTANT 11
STDOUT: CONSTANT 0123456789
STDOUT: CONSTANT 01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
STDOUT: UUID_TO_CHAR BA1749B5-83BF-9146-B360-F54E25FE583E
STDOUT: -1(a) -1
STDOUT: +15 15
STDOUT: 32767 32767
STDOUT: 32768 32768
STDOUT: 65535 65535
STDOUT: 65536(a) 65536
STDOUT: 65536(b) 65536
STDOUT: -2147483648 -2147483648
STDOUT: +2147483648(a) 2147483648
STDOUT: +2147483648(b) 2147483648
STDOUT: -1(b) -1
STDOUT: +4294967295 4294967295
STDOUT: +4294967296(a) 4294967296
STDOUT: +4294967296(b) 4294967296
STDOUT: 9223372036854775807 9223372036854775807
STDOUT: -9223372036854775808 -9223372036854775808
STDOUT: -9223372036854775807 -9223372036854775807
STDOUT: -9223372036854775806 -9223372036854775806
STDOUT: -1(c) -1
STDOUT: INPUT message field count: 0
STDOUT: OUTPUT message field count: 19
STDOUT: 01: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
STDOUT: : name: -1(a) alias: -1(a)
STDOUT: : table: V_TEST owner: SYSDBA
STDOUT: 02: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
STDOUT: : name: +15 alias: +15
STDOUT: : table: V_TEST owner: SYSDBA
STDOUT: 03: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
STDOUT: : name: 32767 alias: 32767
STDOUT: : table: V_TEST owner: SYSDBA
STDOUT: 04: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
STDOUT: : name: 32768 alias: 32768
STDOUT: : table: V_TEST owner: SYSDBA
STDOUT: 05: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
STDOUT: : name: 65535 alias: 65535
STDOUT: : table: V_TEST owner: SYSDBA
STDOUT: 06: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
STDOUT: : name: 65536(a) alias: 65536(a)
STDOUT: : table: V_TEST owner: SYSDBA
STDOUT: 07: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
STDOUT: : name: 65536(b) alias: 65536(b)
STDOUT: : table: V_TEST owner: SYSDBA
STDOUT: 08: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
STDOUT: : name: -2147483648 alias: -2147483648
STDOUT: : table: V_TEST owner: SYSDBA
STDOUT: 09: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
STDOUT: : name: +2147483648(a) alias: +2147483648(a)
STDOUT: : table: V_TEST owner: SYSDBA
STDOUT: 10: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
STDOUT: : name: +2147483648(b) alias: +2147483648(b)
STDOUT: : table: V_TEST owner: SYSDBA
STDOUT: 11: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
STDOUT: : name: -1(b) alias: -1(b)
STDOUT: : table: V_TEST owner: SYSDBA
STDOUT: 12: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
STDOUT: : name: +4294967295 alias: +4294967295
STDOUT: : table: V_TEST owner: SYSDBA
STDOUT: 13: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
STDOUT: : name: +4294967296(a) alias: +4294967296(a)
STDOUT: : table: V_TEST owner: SYSDBA
STDOUT: 14: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
STDOUT: : name: +4294967296(b) alias: +4294967296(b)
STDOUT: : table: V_TEST owner: SYSDBA
STDOUT: 15: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
STDOUT: : name: 9223372036854775807 alias: 9223372036854775807
STDOUT: : table: V_TEST owner: SYSDBA
STDOUT: 16: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
STDOUT: : name: -9223372036854775808 alias: -9223372036854775808
STDOUT: : table: V_TEST owner: SYSDBA
STDOUT: 17: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
STDOUT: : name: -9223372036854775807 alias: -9223372036854775807
STDOUT: : table: V_TEST owner: SYSDBA
STDOUT: 18: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
STDOUT: : name: -9223372036854775806 alias: -9223372036854775806
STDOUT: : table: V_TEST owner: SYSDBA
STDOUT: 19: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
STDOUT: : name: -1(c) alias: -1(c)
STDOUT: : table: V_TEST owner: SYSDBA
CONSTANT 11
CONSTANT 0123456789
CONSTANT 01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
UUID_TO_CHAR BA1749B5-83BF-9146-B360-F54E25FE583E
-1(a) -1
+15 15
32767 32767
32768 32768
65535 65535
65536(a) 65536
65536(b) 65536
-2147483648 -2147483648
+2147483648(a) 2147483648
+2147483648(b) 2147483648
-1(b) -1
+4294967295 4294967295
+4294967296(a) 4294967296
+4294967296(b) 4294967296
9223372036854775807 9223372036854775807
-9223372036854775808 -9223372036854775808
-9223372036854775807 -9223372036854775807
-9223372036854775806 -9223372036854775806
-1(c) -1
INPUT message field count: 0
OUTPUT message field count: 19
01: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
: name: -1(a) alias: -1(a)
: table: V_TEST owner: SYSDBA
02: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
: name: +15 alias: +15
: table: V_TEST owner: SYSDBA
03: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
: name: 32767 alias: 32767
: table: V_TEST owner: SYSDBA
04: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
: name: 32768 alias: 32768
: table: V_TEST owner: SYSDBA
05: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
: name: 65535 alias: 65535
: table: V_TEST owner: SYSDBA
06: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
: name: 65536(a) alias: 65536(a)
: table: V_TEST owner: SYSDBA
07: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
: name: 65536(b) alias: 65536(b)
: table: V_TEST owner: SYSDBA
08: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
: name: -2147483648 alias: -2147483648
: table: V_TEST owner: SYSDBA
09: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
: name: +2147483648(a) alias: +2147483648(a)
: table: V_TEST owner: SYSDBA
10: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
: name: +2147483648(b) alias: +2147483648(b)
: table: V_TEST owner: SYSDBA
11: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
: name: -1(b) alias: -1(b)
: table: V_TEST owner: SYSDBA
12: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
: name: +4294967295 alias: +4294967295
: table: V_TEST owner: SYSDBA
13: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
: name: +4294967296(a) alias: +4294967296(a)
: table: V_TEST owner: SYSDBA
14: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
: name: +4294967296(b) alias: +4294967296(b)
: table: V_TEST owner: SYSDBA
15: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
: name: 9223372036854775807 alias: 9223372036854775807
: table: V_TEST owner: SYSDBA
16: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
: name: -9223372036854775808 alias: -9223372036854775808
: table: V_TEST owner: SYSDBA
17: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
: name: -9223372036854775807 alias: -9223372036854775807
: table: V_TEST owner: SYSDBA
18: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
: name: -9223372036854775806 alias: -9223372036854775806
: table: V_TEST owner: SYSDBA
19: sqltype: 580 INT64 Nullable scale: 0 subtype: 0 len: 8
: name: -1(c) alias: -1(c)
: table: V_TEST owner: SYSDBA
"""
STDERR: Statement failed, SQLSTATE = 42000
STDERR: Dynamic SQL Error
STDERR: -SQL error code = -104
STDERR: -Token unknown - line 1, column 9
STDERR: -'1'
expected_stderr_1 = """
Statement failed, SQLSTATE = 42000
Dynamic SQL Error
-SQL error code = -104
-Token unknown - line 1, column 9
-'1'
STDERR: Statement failed, SQLSTATE = 42000
STDERR: Dynamic SQL Error
STDERR: -SQL error code = -104
STDERR: -Token unknown - line 1, column 9
STDERR: -'0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678x'
"""
Statement failed, SQLSTATE = 42000
Dynamic SQL Error
-SQL error code = -104
-Token unknown - line 1, column 9
-'0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678x'
"""
@pytest.mark.version('>=3.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.expected_stderr = expected_stderr_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
assert act_1.clean_stderr == act_1.clean_expected_stderr

View File

@ -2,10 +2,10 @@
#
# id: bugs.core_1845
# title: Some standard calls show server installation directory to regular users
# decription:
# decription:
# Instead of usage 'resource:test_user' (as it was before) we create every time this test run user TMP$C1845
# and make test connect to database with login = this user in order to check ability to make attach.
# Then we do subsequent run of FBSVCMGR utility with passing ONE of following options from 'Information requests'
# Then we do subsequent run of FBSVCMGR utility with passing ONE of following options from 'Information requests'
# group:
# info_server_version
# info_implementation
@ -17,19 +17,20 @@
# info_version
# NOTE: option 'info_capabilities' was introduces only in 3.0. Its output differs on Classic vs SS and SC.
# Currently this option is NOT passed to fbsvcmgr.
#
#
# tracker_id: CORE-1845
# min_versions: ['2.5.0']
# versions: 2.5
# qmid: bugs.core_1845
import pytest
from firebird.qa import db_factory, isql_act, Action
from firebird.qa import db_factory, python_act, Action, user_factory, User
from firebird.driver import DatabaseError
# version: 2.5
# resources: None
substitutions_1 = [('SERVER VERSION:.*', 'SERVER VERSION:'), ('SERVER IMPLEMENTATION:.*', 'SERVER IMPLEMENTATION:'), ('SERVICE MANAGER VERSION:.*', 'SERVICE MANAGER VERSION:'), ('Statement failed, SQLSTATE = HY000', ''), ('record not found for user.*', '')]
substitutions_1 = []
init_script_1 = """"""
@ -37,47 +38,47 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
#
#
# # Refactored 05-JAN-2016: removed dependency on recource 'test_user' because this lead to:
# # UNTESTED: bugs.core_1845
# # Add new user
# # Unexpected stderr stream received from GSEC.
# # (i.e. test remained in state "Untested" because of internal error in gsec while creating user 'test' from resource).
# # Checked on WI-V2.5.5.26952 (SC), WI-V3.0.0.32266 (SS/SC/CS).
#
#
# import os
# import subprocess
# import time
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# #--------------------------------------------
#
#
# def flush_and_close(file_handle):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb'):
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
# if os.path.isfile( f_names_list[i]):
# os.remove( f_names_list[i] )
#
#
# #--------------------------------------------
#
#
# sql_create_user=''' drop user tmp$c1845;
# commit;
# create user tmp$c1845 password 'QweRtyUi';
@ -87,16 +88,16 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# select current_user who_am_i from rdb$database;
# quit;
# ''' % dsn
#
#
# sqllog=open( os.path.join(context['temp_directory'],'tmp_user_1845.log'), 'w')
# sqllog.close()
# runProgram('isql',[dsn,'-user',user_name,'-pas',user_password,'-q','-m', '-o', sqllog.name], sql_create_user)
#
#
#
#
# fn_log=open( os.path.join(context['temp_directory'],'tmp_fbsvc_1845.log'), 'w')
#
#
# svc_list=["info_server_version","info_implementation","info_user_dbpath","info_get_env","info_get_env_lock","info_get_env_msg","info_svr_db_info","info_version"]
#
#
# for i in range(len(svc_list)):
# fn_log.write("Check service '"+svc_list[i]+"':")
# fn_log.write("\\n")
@ -106,58 +107,49 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# ,stderr=fn_log
# )
# fn_log.write("\\n")
#
#
# flush_and_close( fn_log )
#
#
# # CLEANUP: drop user that was temp-ly created for this test:
# ##########
# runProgram('isql', [dsn, '-q','-m', '-o', sqllog.name], 'drop user tmp$c1845; commit;')
#
#
# # Check content of files: 1st shuld contain name of temply created user, 2nd should be with error during get FB log:
#
#
# with open( sqllog.name,'r') as f:
# print(f.read())
#
#
# # Print output of fbsvcmgr but: 1) remove exceessive whitespaces from lines; 2) transform text to uppercase
# # (in order to reduce possibility of mismatches in case of minor changes that can occur in future versions of fbsvcmgr)
#
#
# with open( fn_log.name,'r') as f:
# for line in f:
# print( ' '.join(line.split()).upper() )
#
# # Do not remove this pause: on Windows closing of handles can take some (small) time.
#
# # Do not remove this pause: on Windows closing of handles can take some (small) time.
# # Otherwise Windows(32) access error can raise here.
# time.sleep(1)
#
#
# cleanup( (sqllog.name, fn_log.name) )
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
WHO_AM_I TMP$C1845
CHECK SERVICE 'INFO_SERVER_VERSION':
SERVER VERSION: WI-V3.0.0.32266 FIREBIRD 3.0 RELEASE CANDIDATE 2
CHECK SERVICE 'INFO_IMPLEMENTATION':
SERVER IMPLEMENTATION: FIREBIRD/WINDOWS/INTEL/I386
CHECK SERVICE 'INFO_USER_DBPATH':
SERVICE ISC_INFO_SVC_USER_DBPATH REQUIRES SYSDBA PERMISSIONS. REATTACH TO THE SERVICE MANAGER USING THE SYSDBA ACCOUNT.
CHECK SERVICE 'INFO_GET_ENV':
SERVICE ISC_INFO_SVC_GET_ENV REQUIRES SYSDBA PERMISSIONS. REATTACH TO THE SERVICE MANAGER USING THE SYSDBA ACCOUNT.
CHECK SERVICE 'INFO_GET_ENV_LOCK':
SERVICE ISC_INFO_SVC_GET_ENV REQUIRES SYSDBA PERMISSIONS. REATTACH TO THE SERVICE MANAGER USING THE SYSDBA ACCOUNT.
CHECK SERVICE 'INFO_GET_ENV_MSG':
SERVICE ISC_INFO_SVC_GET_ENV REQUIRES SYSDBA PERMISSIONS. REATTACH TO THE SERVICE MANAGER USING THE SYSDBA ACCOUNT.
CHECK SERVICE 'INFO_SVR_DB_INFO':
SERVICE ISC_INFO_SVC_SVR_DB_INFO REQUIRES SYSDBA PERMISSIONS. REATTACH TO THE SERVICE MANAGER USING THE SYSDBA ACCOUNT.
CHECK SERVICE 'INFO_VERSION':
SERVICE MANAGER VERSION: 2
"""
act_1 = python_act('db_1', substitutions=substitutions_1)
user_1 = user_factory(name='TMP$C1845', password='QweRtyUi')
@pytest.mark.version('>=2.5')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action, user_1: User):
with act_1.connect_server(user=user_1.name, password=user_1.password) as srv:
with pytest.raises(DatabaseError, match='.*requires SYSDBA permissions.*'):
print(srv.info.security_database)
with pytest.raises(DatabaseError, match='.*requires SYSDBA permissions.*'):
print(srv.info.home_directory)
with pytest.raises(DatabaseError, match='.*requires SYSDBA permissions.*'):
print(srv.info.lock_directory)
with pytest.raises(DatabaseError, match='.*requires SYSDBA permissions.*'):
print(srv.info.message_directory)
with pytest.raises(DatabaseError, match='.*requires SYSDBA permissions.*'):
print(srv.info.attached_databases)

View File

@ -2,8 +2,8 @@
#
# id: bugs.core_1865
# title: BLR error on restore database with computed by Field
# decription:
# Confirmed bug on WI-V2.0.0.12724: it was unable to restore DB with "-o" command switch ("-one_at_a_time"):
# decription:
# Confirmed bug on WI-V2.0.0.12724: it was unable to restore DB with "-o" command switch ("-one_at_a_time"):
# got errors that are specified in the ticket.
# No errors on WI-V2.1.0.17798, and also on:
# 2.5.9.27107: OK, 1.953s.
@ -12,56 +12,64 @@
# NB-1: old versions of FB did restore with redirection all messages to STDERR, w/o STDOUT. For this reason we store
# all output to file and then check whether this file contains at least one line with phrase "ERROR:".
# NB-2: could _NOT_ reproduce without use "-o" command switch!
#
#
# tracker_id: CORE-1865
# min_versions: ['2.0.0']
# versions: 2.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from io import BytesIO
from firebird.qa import db_factory, python_act, Action
from firebird.driver import SrvRestoreFlag
# version: 2.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
init_script_1 = """
create table tmain(id int);
create table tdetl( id int, pid int, cost numeric(12,2) );
alter table tmain
add dsum2 computed by ( (select sum(cost) from tdetl d where d.pid = tmain.id) ) ;
commit;
"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
# import os
# import time
# import subprocess
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# db_conn.close()
#
#
# sql_ddl='''
# create table tmain(id int);
# create table tdetl( id int, pid int, cost numeric(12,2) );
# alter table tmain
# alter table tmain
# add dsum2 computed by ( (select sum(cost) from tdetl d where d.pid = tmain.id) )
# ;
# commit;
# '''
# runProgram('isql', [ dsn, '-q' ], sql_ddl)
#
#
# tmpfbk='$(DATABASE_LOCATION)'+'core_1865.fbk'
# tmpfdb='$(DATABASE_LOCATION)'+'tmp_check_1865.fdb'
#
#
# runProgram( 'gbak', [ '-b', dsn, tmpfbk ] )
#
#
# f_restore_log=open( os.path.join(context['temp_directory'],'tmp_check_1865.log'), 'w')
# subprocess.call( [ context['gbak_path'], '-rep', '-o', '-v', tmpfbk, 'localhost:' + tmpfdb], stdout = f_restore_log, stderr=subprocess.STDOUT)
# f_restore_log.close()
# time.sleep(1)
#
#
# # should be empty:
# ##################
# with open( f_restore_log.name,'r') as f:
@ -69,19 +77,23 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# if line.split():
# if 'ERROR:'.lower() in line.lower():
# print('UNEXPECTED ERROR ON RESTORE: '+line)
#
#
# os.remove(f_restore_log.name)
# os.remove(tmpfdb)
# os.remove(tmpfbk)
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
@pytest.mark.version('>=2.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action):
backup = BytesIO()
with act_1.connect_server() as srv:
srv.database.local_backup(database=str(act_1.db.db_path), backup_stream=backup)
backup.seek(0)
# test fails if restore raises an exception
srv.database.local_restore(backup_stream=backup, database=str(act_1.db.db_path),
flags=SrvRestoreFlag.ONE_AT_A_TIME | SrvRestoreFlag.REPLACE)

View File

@ -9,7 +9,7 @@
# qmid: bugs.core_1926
import pytest
from firebird.qa import db_factory, isql_act, Action
from firebird.qa import db_factory, python_act, Action
# version: 2.1.2
# resources: None
@ -37,14 +37,21 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# print (j-i)
# con_detail.commit()
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """1
"""
act_1 = python_act('db_1', substitutions=substitutions_1)
@pytest.mark.version('>=2.1.2')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action):
with act_1.db.connect() as con:
c = con.cursor()
c.execute('SELECT 1 FROM RDB$DATABASE')
with act_1.db.connect() as con_detail:
con_detail.begin()
c_detail = con_detail.cursor()
c_detail.execute("select MON$NEXT_TRANSACTION from MON$DATABASE")
tra_1 = c_detail.fetchone()[0]
con_detail.commit()
c_detail.execute("select MON$NEXT_TRANSACTION from MON$DATABASE")
tra_2 = c_detail.fetchone()[0]
con_detail.commit()
assert tra_2 - tra_1 == 1

View File

@ -2,38 +2,39 @@
#
# id: bugs.core_1972
# title: Non-SYSDBA user can change FW mode of database.
# decription:
# decription:
# We create common user using Services API and try to establish TWO subsequent connections under his login:
# 1) with flag 'forced_write' = 1 and then
# 2) with flad 'no_reserve' = 1.
# Note: both values of these flags have to be equal 1 because of some specifics of DPB building inside fdb driver:
# 1) with flag 'forced_write' = 1 and then
# 2) with flag 'no_reserve' = 1.
# Note: both values of these flags have to be equal 1 because of some specifics of DPB building inside fdb driver:
# value 0 means 'nothing to add', so no error will be raised in this case (and we DO expect error in this ticket).
# In WI-V2.1.1.17910 one may to specify *ANY* values of these flags and NO error will be raised.
# Fortunately, actual DB state also was not changed.
#
#
# Starting from WI-V2.1.2.18118 attempt to specify non-zero flag leads to runtime exception with SQLCODE=-901
# ("unable to perform: you must be either SYSDBA or owner...")
# See also: https://firebirdsql.org/rlsnotesh/rnfb210-apiods.html
#
#
# Additional filtering of output is required because of different error message in 4.0: it checks whether current user
# has grant of role with system privilege 'CHANGE_HEADER_SETTINGS'.
# has grant of role with system privilege 'CHANGE_HEADER_SETTINGS'.
# If no then message will be "System privilege CHANGE_HEADER_SETTINGS is missing" (differ from older FB versions).
# If yes then DB header is allowed to be change and NO ERROR at all will be raised on attempt to establish such connections.
# For that reason it was decided to completely suppress output of error detalization ("you must be either SYSDBA" or
# For that reason it was decided to completely suppress output of error detalization ("you must be either SYSDBA" or
# "System privilege CHANGE_HEADER_SETTINGS is missing") and to display only line with SQLCODE.
#
#
# Checked on 2.1.2.18118, and also on:
# 2.5.9.27107: OK, 0.328s.
# 3.0.4.32924: OK, 2.078s.
# 4.0.0.916: OK, 1.079s.
#
#
# tracker_id: CORE-1972
# min_versions: ['2.1.1']
# versions: 2.1.1
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from firebird.qa import db_factory, python_act, Action, user_factory, User
from firebird.driver import driver_config, connect
# version: 2.1.1
# resources: None
@ -46,21 +47,21 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
# import os
# import fdb
# from fdb import services
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# # Obtain engine version:
# engine = str(db_conn.engine_version) # convert to text because 'float' object has no attribute 'startswith'
# db_conn.close()
#
#
# FB_PORT= '' # '/3212'
# A_USER = 'TMP$C1972'
# A_PSWD = '123'
#
#
# con=None
# try:
# con = services.connect(host='localhost'+FB_PORT, user='SYSDBA', password='masterkey')
@ -72,7 +73,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# if con:
# con.close()
# #-------------------------------------------------------------------------------------------
#
#
# # 1. Try to specifying 'force_write' flag: no errors and NO changes in 2.1.1; error in 2.1.2 and above:
# try:
# print( 'Trying_to_establish connection with specifying force_write' )
@ -87,7 +88,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# finally:
# if con:
# con.close()
#
#
# # 2. Try to specifying 'no_reserve' flag: no errors and NO changes in 2.1.1; error in 2.1.2 and above:
# try:
# print( 'Trying_to_establish connection with specifying no_reserve' )
@ -102,7 +103,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# finally:
# if con:
# con.close()
#
#
# #-------------------------------------------------------------------------------------------
# try:
# con = services.connect(host='localhost' + FB_PORT, user='SYSDBA', password='masterkey')
@ -111,29 +112,44 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# finally:
# if con:
# con.close()
#
#
# print('Successfully finished script')
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """
Successfully added non-privileged user
Trying_to_establish connection with specifying force_write
- SQLCODE: -901
Trying_to_establish connection with specifying no_reserve
- SQLCODE: -901
Successfully removed non-privileged user
Successfully finished script
"""
user_1 = user_factory(name='TMP$C1972', password='123')
@pytest.mark.version('>=2.1.1')
@pytest.mark.xfail
def test_1(db_1):
def test_1(act_1: Action, user_1: User):
pytest.fail("Test not IMPLEMENTED")
# This test is not possible to implement with new Python driver as it does not
# allow to specify `forced_writes` or `reserve_space` options on connect() as tested
# configuration options are passed to DPB only for create_database()!
# This is intentional change in firebird-driver from fdb, as using these DPB options
# on connect has side-effects (changes database option) which was considered dangerous.
#
# 1. Try to specifying 'force_write' flag: no errors and NO changes in 2.1.1; error in 2.1.2 and above
act_1.db._make_config(user=user_1.name, password=user_1.password)
db_conf = driver_config.get_database('pytest')
db_conf.forced_writes.value = True
with pytest.raises():
connect('pytest')
# 2. Try to specifying 'no_reserve' flag: no errors and NO changes in 2.1.1; error in 2.1.2 and above

View File

@ -2,7 +2,7 @@
#
# id: bugs.core_1999
# title: TimeStamp in the every line output gbak.exe utility
# decription:
# decription:
# Database for this test was created beforehand and filled-up with all possible kind of objects:
# domain, table, view, standalone procedure & function, package, trigger, sequence, exception and role.
# Then backup was created for this DB and it was packed into .zip archive - see files/core_1999_nn.zip.
@ -14,7 +14,7 @@
# NB.
# Utility fbsvcmgr in 2.5.5 was not able to produce output with statistics (i.e. "res_stat tdrw") until commit #62537
# (see: http://sourceforge.net/p/firebird/code/62537 ).
#
#
# 28.10.2019. Checked on:
# 4.0.0.1635 SS: 3.495s.
# 4.0.0.1633 CS: 3.982s.
@ -22,20 +22,21 @@
# 3.0.5.33178 CS: 4.538s.
# 2.5.9.27119 SS: 1.972s.
# 2.5.9.27146 SC: 1.540s.
#
#
# 13.04.2021: removed code for 2.5.x, changed platform to 'All', replaced path to FB utilities with 'context[...]'.
# Checked on:
# Windows: 3.0.8.33445, 4.0.0.2416
# Linux: 3.0.8.33426, 4.0.0.2416
#
#
#
#
# tracker_id: CORE-1999
# min_versions: ['3.0.5']
# versions: 3.0.5
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from firebird.qa import db_factory, python_act, Action, temp_file
from firebird.driver import SrvBackupFlag, SrvRestoreFlag
# version: 3.0.5
# resources: None
@ -52,28 +53,28 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# import zipfile
# import time
# import subprocess
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
# db_conn.close()
#
#
# #--------------------------------------------
#
#
# def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -85,21 +86,21 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# print('type(f_names_list[i])=',type(f_names_list[i]))
# del_name = None
#
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
#
# #--------------------------------------------
#
#
# zf = zipfile.ZipFile( os.path.join(context['files_location'],'core_1999_30.zip') )
# zf.extractall( context['temp_directory'] )
# zf.close()
#
#
# # Result: core_1999_30.fbk is extracted into context['temp_directory']
#
#
# tmpres='$(DATABASE_LOCATION)tmp_core_1999_30.fdb'
# tmpbkp='$(DATABASE_LOCATION)tmp_core_1999_30.fbk'
#
#
# f_restore=open( os.path.join(context['temp_directory'],'tmp_restore_1999_30.log'), 'w')
# subprocess.check_call( [ context['fbsvcmgr_path']
# ,"localhost:service_mgr"
@ -113,9 +114,9 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stdout=f_restore, stderr=subprocess.STDOUT
# )
# flush_and_close( f_restore )
#
#
# # Result: database file 'tmp_core_1999_30.fdb' should be created after this restoring, log in 'tmp_restore_1999_30.log'
#
#
# f_backup=open( os.path.join(context['temp_directory'],'tmp_backup_1999_30.log'), 'w')
# subprocess.check_call( [ context['fbsvcmgr_path']
# ,"localhost:service_mgr"
@ -128,13 +129,13 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# stdout=f_backup, stderr=subprocess.STDOUT
# )
# flush_and_close( f_backup )
#
#
# # Result: backup file 'tmp_core_1999_30.fbk' should be replaced after this backup, log in 'tmp_backup_1999_30.log'
#
#
#
#
# # Sample of backup log with statistics:
# # -------------------------------------
# # gbak: time delta reads writes
# # gbak: time delta reads writes
# # gbak: 0.019 0.019 43 0 readied database . . .fdb for backup
# # gbak: 0.019 0.000 0 0 creating file . . ..fbk
# # gbak: 0.020 0.000 0 0 starting transaction
@ -144,33 +145,33 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # . . .
# # gbak: 0.847 0.109 2 0 closing file, committing, and finishing. 1105920 bytes written
# # gbak: 0.847 0.000 802 2 total statistics
#
#
# rows_without_stat=0
# with open(f_backup.name, 'r') as f:
# for line in f:
# tokens=line.split()
# if not ( tokens[1].replace('.','',1).replace(',','',1).isdigit() and tokens[2].replace('.','',1).replace(',','',1).isdigit() and tokens[3].replace('.','',1).replace(',','',1).isdigit() and tokens[4].replace('.','',1).replace(',','',1).isdigit() ):
# rows_without_stat = rows_without_stat + 1
#
#
# print("bkp: rows_without_stat="+str(rows_without_stat))
#
#
# # Sample of restore log with statistics:
# # -------------------------------------
# # gbak: time delta reads writes
# # gbak: 0.000 0.000 0 0 opened file ....fbk
# # gbak: 0.004 0.003 0 0 transportable backup -- data in XDR format
# # gbak: 0.004 0.000 0 0 backup file is compressed
# # gbak: 0.004 0.000 0 0 backup version is 10
# # gbak: 0.275 0.270 0 711 created database ....fdb, page_size 4096 bytes
# # gbak: 0.277 0.002 0 2 started transaction
# # gbak: 0.278 0.001 0 0 restoring domain RDB$11
# # gbak: time delta reads writes
# # gbak: 0.000 0.000 0 0 opened file ....fbk
# # gbak: 0.004 0.003 0 0 transportable backup -- data in XDR format
# # gbak: 0.004 0.000 0 0 backup file is compressed
# # gbak: 0.004 0.000 0 0 backup version is 10
# # gbak: 0.275 0.270 0 711 created database ....fdb, page_size 4096 bytes
# # gbak: 0.277 0.002 0 2 started transaction
# # gbak: 0.278 0.001 0 0 restoring domain RDB$11
# # . . .
# # gbak: 1.987 0.000 0 31 fixing system generators
# # gbak: 2.016 0.029 0 10 finishing, closing, and going home
# # gbak: 2.017 0.000 0 1712 total statistics
# # gbak:adjusting the ONLINE and FORCED WRITES flags
#
#
# # gbak: 1.987 0.000 0 31 fixing system generators
# # gbak: 2.016 0.029 0 10 finishing, closing, and going home
# # gbak: 2.017 0.000 0 1712 total statistics
# # gbak:adjusting the ONLINE and FORCED WRITES flags
#
#
# rows_without_stat=0
# with open(f_restore.name, 'r') as f:
# for line in f:
@ -185,39 +186,70 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# tokens[4].replace('.','',1).replace(',','',1).isdigit()
# ):
# rows_without_stat = rows_without_stat + 1
#
#
# print("res: rows_without_stat="+str(rows_without_stat))
#
#
# # Backup log should contain SINGLE row without statistics, in its header (1st line):
# # gbak: time delta reads writes
#
# # gbak: time delta reads writes
#
# # Restore log should contain TWO rows without statistics, first and last:
# # gbak: time delta reads writes
# # gbak:adjusting the ONLINE and FORCED WRITES flags
#
# # gbak: time delta reads writes
# # gbak:adjusting the ONLINE and FORCED WRITES flags
#
# #####################################################################
# # Cleanup:
#
# # do NOT remove this pause otherwise some of logs will not be enable for deletion and test will finish with
#
# # do NOT remove this pause otherwise some of logs will not be enable for deletion and test will finish with
# # Exception raised while executing Python test script. exception: WindowsError: 32
# time.sleep(1)
#
#
# # CLEANUP
# #########
# cleanup( (f_backup, f_restore, tmpbkp, tmpres ) )
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
bkp: rows_without_stat=1
res: rows_without_stat=2
"""
act_1 = python_act('db_1', substitutions=substitutions_1)
file_1 = temp_file('pytest-run.fbk')
@pytest.mark.version('>=3.0.5')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action, file_1):
src_backup = act_1.vars['backups'] / 'core1999_30.fbk'
with act_1.connect_server() as srv:
srv.database.restore(database=str(act_1.db.db_path), backup=str(src_backup),
flags=SrvRestoreFlag.REPLACE,
verbose=True, stats='TDWR')
restore_log = srv.readlines()
srv.database.backup(database=str(act_1.db.db_path), backup=str(file_1),
verbose=True, stats='TDWR')
backup_log = srv.readlines()
#
# Backup log should contain SINGLE row without statistics, in its header (1st line):
# gbak: time delta reads writes
#
rows_without_stat = 0
for line in backup_log:
tokens = line.split()
if not (tokens[1].replace('.', '', 1).replace(',', '', 1).isdigit() and
tokens[2].replace('.', '', 1).replace(',', '', 1).isdigit() and
tokens[3].replace('.', '', 1).replace(',', '', 1).isdigit() and
tokens[4].replace('.', '', 1).replace(',', '', 1).isdigit()):
rows_without_stat = rows_without_stat + 1
assert rows_without_stat == 1
#
# Restore log should contain TWO rows without statistics, first and last:
# gbak: time delta reads writes
# gbak:adjusting the ONLINE and FORCED WRITES flags
#
rows_without_stat = 0
for line in restore_log:
tokens = line.split()
if not (tokens[1].replace('.', '', 1).replace(',', '', 1).isdigit() and
tokens[2].replace('.', '', 1).replace(',', '', 1).isdigit() and
tokens[3].replace('.', '', 1).replace(',', '', 1).isdigit() and
tokens[4].replace('.', '', 1).replace(',', '', 1).isdigit()):
rows_without_stat = rows_without_stat + 1
assert rows_without_stat == 2

View File

@ -2,7 +2,7 @@
#
# id: bugs.core_2004
# title: ALTER USER XXX INACTIVE
# decription:
# decription:
# We create two users ('foo' and 'bar') and make them immediatelly INACTIVE.
# One of them has been granted with RDB$ADMIN role, so he will be able to manage of other user access.
# Then we chek then connect for one of these users (e.g., 'foo') is unable because of his inactive status.
@ -11,30 +11,31 @@
# * create and immediatelly drop new user ('rio');
# * change state of other existing user ('bar') to active.
# Finally, we check that user 'bar' really can connect now (after he was allowed to do this by 'foo').
#
#
# ::: NB :::
# FB config parameters AuthClient and UserManager must contain 'Srp' plugin in their values.
#
#
# Checked on Super and Classic:
# 3.0.4.32924: OK, 3.234s.
# 4.0.0.918: OK, 5.063s.
#
#
# tracker_id: CORE-2004
# min_versions: ['3.0.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from firebird.qa import db_factory, python_act, Action, user_factory, User
# version: 3.0
# resources: None
substitutions_1 = [('Use CONNECT or CREATE DATABASE.*', ''), ('.*After line.*', '')]
substitutions_1 = [('Use CONNECT or CREATE DATABASE.*', ''),
('.*After line.*', '')]
init_script_1 = """
create or alter view v_check as
select s.sec$user_name, s.sec$active, s.sec$plugin
select s.sec$user_name, s.sec$active, s.sec$plugin
from rdb$database r
left join sec$users s on lower(s.sec$user_name) in (lower('tmp$c2004_foo'), lower('tmp$c2004_bar'), lower('tmp$c2004_rio'))
;
@ -44,39 +45,39 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
# import os
# import sys
# import time
# import subprocess
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# #--------------------------------------------
#
#
# def flush_and_close(file_handle):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb'):
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
# if os.path.isfile( f_names_list[i]):
# os.remove( f_names_list[i] )
#
#
# #--------------------------------------------
#
#
# db_conn.close()
# db_name=dsn
# sql_txt='''
@ -84,135 +85,198 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# commit;
# connect '%(db_name)s' user SYSDBA password 'masterkey';
# create or alter user tmp$c2004_foo password '123' inactive using plugin Srp grant admin role;
#
#
# -- NB: currently it seems strange that one need to grant rdb$admin to 'foo'
# -- For what reason this role need to be added if 'foo' does his actions only in security_db ?
# -- Sent letter to dimitr and alex, 10-mar-18 16:00
# grant rdb$admin to tmp$c2004_foo;
#
#
# create or alter user tmp$c2004_bar password '456' inactive using plugin Srp;
# commit;
#
#
# set count on;
# select 'init_state' as msg, v.* from v_check v;
# set count off;
#
#
# select 'try to connect as INACTIVE users' as msg from rdb$database;
# commit;
#
#
# connect '%(db_name)s' user tmp$c2004_foo password '123'; -- should fail
# select current_user as who_am_i from rdb$database;
# rollback;
#
#
# connect '%(db_name)s' user tmp$c2004_bar password '456'; -- should fail
# select current_user as who_am_i from rdb$database;
# rollback;
#
#
# connect '%(db_name)s' user SYSDBA password 'masterkey';
#
#
#
#
# -- NB: following "alter user" statement must contain "using plugin Srp" clause
# -- otherwise get:
# -- Statement failed, SQLSTATE = HY000
# -- record not found for user: TMP$C2004_BAR
#
#
# alter user tmp$c2004_foo active using plugin Srp;
# select 'try to connect as user FOO which was just set as active by SYSDBA.' as msg from rdb$database;
# commit;
#
#
# connect '%(db_name)s' user tmp$c2004_foo password '123' role 'RDB$ADMIN'; -- should pass
# select current_user as who_am_i, current_role as whats_my_role from rdb$database;
#
#
#
#
# -- should pass because foo has admin role:
# create or alter user tmp$c2004_rio password '123' using plugin Srp;
# drop user tmp$c2004_rio using plugin Srp;
#
#
# -- should pass because foo has admin role:
# alter user tmp$c2004_bar active using plugin Srp;
# select 'try to connect as user BAR which was just set as active by FOO.' as msg from rdb$database;
# commit;
#
#
# connect '%(db_name)s' user tmp$c2004_bar password '456'; -- should pass
# select current_user as who_am_i from rdb$database;
# commit;
#
#
#
#
# connect '%(db_name)s' user SYSDBA password 'masterkey';
# select 'try to drop both non-privileged users by SYSDBA.' as msg from rdb$database;
# drop user tmp$c2004_foo using plugin Srp;
# drop user tmp$c2004_bar using plugin Srp;
# commit;
# set count on;
#
#
# select 'final_state' as msg, v.* from v_check v;
# set count off;
# ''' % locals()
#
#
#
#
# f_isql_run=open( os.path.join(context['temp_directory'],'tmp_check_2004.sql'), 'w')
# f_isql_run.write( sql_txt )
# f_isql_run.close()
#
#
# f_isql_log=open( os.path.join(context['temp_directory'],'tmp_check_2004.log'), 'w')
# f_isql_err=open( os.path.join(context['temp_directory'],'tmp_check_2004.err'), 'w')
#
#
# subprocess.call( [ context['isql_path'], '-q', '-i', f_isql_run.name], stdout = f_isql_log, stderr=f_isql_err)
#
#
# flush_and_close( f_isql_log )
# flush_and_close( f_isql_err )
#
#
# with open(f_isql_log.name,'r') as f:
# for line in f:
# if line.rstrip().split():
# print( 'STDLOG: ', line )
#
#
# with open(f_isql_err.name,'r') as f:
# for line in f:
# if line.rstrip().split():
# print( 'STDERR: ', line )
#
#
# # Cleanup:
# ##########
# time.sleep(1)
# cleanup( [i.name for i in (f_isql_run, f_isql_log, f_isql_err)] )
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
user_1 = user_factory(name='tmp$c2004_foo', password='123')
user_2 = user_factory(name='tmp$c2004_bar', password='456')
expected_stdout_1 = """
STDLOG: MSG init_state
STDLOG: SEC$USER_NAME TMP$C2004_FOO
STDLOG: SEC$ACTIVE <false>
STDLOG: SEC$PLUGIN Srp
STDLOG: MSG init_state
STDLOG: SEC$USER_NAME TMP$C2004_BAR
STDLOG: SEC$ACTIVE <false>
STDLOG: SEC$PLUGIN Srp
STDLOG: Records affected: 2
STDLOG: MSG try to connect as INACTIVE users
STDLOG: MSG try to connect as user FOO which was just set as active by SYSDBA.
STDLOG: WHO_AM_I TMP$C2004_FOO
STDLOG: WHATS_MY_ROLE RDB$ADMIN
STDLOG: MSG try to connect as user BAR which was just set as active by FOO.
STDLOG: WHO_AM_I TMP$C2004_BAR
STDLOG: MSG try to drop both non-privileged users by SYSDBA.
STDLOG: MSG final_state
STDLOG: SEC$USER_NAME <null>
STDLOG: SEC$ACTIVE <null>
STDLOG: SEC$PLUGIN <null>
STDLOG: Records affected: 1
STDERR: Statement failed, SQLSTATE = 28000
STDERR: Your user name and password are not defined. Ask your database administrator to set up a Firebird login.
STDERR: Statement failed, SQLSTATE = 28000
STDERR: Your user name and password are not defined. Ask your database administrator to set up a Firebird login.
STDERR: After line 19 in file C:\\MIX irebird\\QA bt-repo mp mp_check_2004.sql
"""
MSG init_state
SEC$USER_NAME TMP$C2004_FOO
SEC$ACTIVE <false>
SEC$PLUGIN Srp
MSG init_state
SEC$USER_NAME TMP$C2004_BAR
SEC$ACTIVE <false>
SEC$PLUGIN Srp
Records affected: 2
MSG try to connect as INACTIVE users
MSG try to connect as user FOO which was just set as active by SYSDBA.
WHO_AM_I TMP$C2004_FOO
WHATS_MY_ROLE RDB$ADMIN
MSG try to connect as user BAR which was just set as active by FOO.
WHO_AM_I TMP$C2004_BAR
"""
expected_stderr_1 = """
Statement failed, SQLSTATE = 28000
Your user name and password are not defined. Ask your database administrator to set up a Firebird login.
Statement failed, SQLSTATE = 28000
Your user name and password are not defined. Ask your database administrator to set up a Firebird login.
After line 19 in file tmp_check_2004.sql
"""
@pytest.mark.version('>=3.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action, user_1: User, user_2: User):
act_1.script = f'''
set list on;
commit;
create or alter user tmp$c2004_foo password '123' inactive using plugin Srp grant admin role;
-- NB: currently it seems strange that one need to grant rdb$admin to 'foo'
-- For what reason this role need to be added if 'foo' does his actions only in security_db ?
-- Sent letter to dimitr and alex, 10-mar-18 16:00
grant rdb$admin to tmp$c2004_foo;
create or alter user tmp$c2004_bar password '456' inactive using plugin Srp;
commit;
set count on;
select 'init_state' as msg, v.* from v_check v;
set count off;
select 'try to connect as INACTIVE users' as msg from rdb$database;
commit;
connect '{act_1.db.dsn}' user tmp$c2004_foo password '123'; -- should fail
select current_user as who_am_i from rdb$database;
rollback;
connect '{act_1.db.dsn}' user tmp$c2004_bar password '456'; -- should fail
select current_user as who_am_i from rdb$database;
rollback;
connect '{act_1.db.dsn}' user SYSDBA password 'masterkey';
-- NB: following "alter user" statement must contain "using plugin Srp" clause
-- otherwise get:
-- Statement failed, SQLSTATE = HY000
-- record not found for user: TMP$C2004_BAR
alter user tmp$c2004_foo active using plugin Srp;
select 'try to connect as user FOO which was just set as active by SYSDBA.' as msg from rdb$database;
commit;
connect '{act_1.db.dsn}' user tmp$c2004_foo password '123' role 'RDB$ADMIN'; -- should pass
select current_user as who_am_i, current_role as whats_my_role from rdb$database;
-- should pass because foo has admin role:
create or alter user tmp$c2004_rio password '123' using plugin Srp;
drop user tmp$c2004_rio using plugin Srp;
-- should pass because foo has admin role:
alter user tmp$c2004_bar active using plugin Srp;
select 'try to connect as user BAR which was just set as active by FOO.' as msg from rdb$database;
commit;
connect '{act_1.db.dsn}' user tmp$c2004_bar password '456'; -- should pass
select current_user as who_am_i from rdb$database;
commit;
'''
act_1.expected_stdout = expected_stdout_1
act_1.expected_stderr = expected_stderr_1
act_1.execute()
assert act_1.clean_stderr == act_1.clean_expected_stderr
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,12 +2,12 @@
#
# id: bugs.core_2017
# title: I/O statistics for stored procedures are not accounted in monitoring tables
# decription:
# decription:
# We open TWO cursors within the same attachments and:
# 1) make query to procedure inside cursor-1 (trivial count from table there);
# 2) ask MON$ tables inside cur-2 with aquiring IO statistics (fetches) for cur-1 statement.
# Number of fetches should be not less then 202400 - see results for 2.1.x, 2.5.x and 3.0 below.
#
#
# 17.12.2016 NOTE. Value of fetches in 3.0.2 and 4.0.0 was significantly reduced (~ twice) since ~25-nov-2016
# See results for: 4.0.0.459 and 3.0.2.32641
# Possible reason:
@ -15,15 +15,15 @@
# https://github.com/FirebirdSQL/firebird/commit/dac882c97e2642e260abef475de75c490c5e4bc7
# "Introduced small per-relation cache of physical numbers of data pages.
# It allows to reduce number of pointer page fetches and improves performance."
#
#
#
#
# tracker_id: CORE-2017
# min_versions: ['2.5.7']
# versions: 2.5.7
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from firebird.qa import db_factory, python_act, Action
# version: 2.5.7
# resources: None
@ -65,46 +65,46 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
#
#
# # Obtain engine version:
# engine = str(db_conn.engine_version) # convert to text because 'float' object has no attribute 'startswith'
#
#
# stt1 = db_conn.cursor()
# stt2 = db_conn.cursor()
#
#
# sp_query = "select * from sp_test"
# stt2.execute(sp_query)
# for row in stt2:
# pass
#
#
# # do NOT!! >>> con1.commit()
#
#
# # Threshold: minimal value of fetches that should be reflected in mon$tables.
#
#
# MIN_FETCHES = 202400 if engine.startswith('2.5') else 104500
#
#
# sql_io='''
# select
# select
# -- i.mon$page_fetches
# --,m.mon$sql_text
# --,m.mon$sql_text
# --,rdb$get_context('SYSTEM','ENGINE_VERSION')
# iif( i.mon$page_fetches > %(MIN_FETCHES)s, 'IO statistics for procedure is OK',
# iif( i.mon$page_fetches > %(MIN_FETCHES)s, 'IO statistics for procedure is OK',
# 'Strange low value for fetches: ' || i.mon$page_fetches
# ) as fetches_result
# from rdb$database r
# from rdb$database r
# left join mon$statements m on
# m.mon$sql_text containing '%(sp_query)s'
# and m.mon$sql_text NOT containing 'mon$statements'
# left join mon$io_stats i on
# left join mon$io_stats i on
# m.mon$stat_id = i.mon$stat_id and i.mon$stat_group = 3
# ;
# ''' % locals()
# stt1.execute(sql_io)
#
#
# for row in stt1:
# print(row[0])
#
#
# # (0, 'select * from sp_test', '2.1.0')
# # (0, 'select * from sp_test', '2.1.1')
# # (202472, 'select * from sp_test', '2.1.2')
@ -117,15 +117,37 @@ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
# # BEFORE 3.0.2.32641: (202472, 'select * from sp_test', '3.0.0') // after: 104942
# # BEFORE 4.0.0.459: (202472, 'select * from sp_test', '4.0.0') // after: 104942
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """
IO statistics for procedure is OK
"""
IO statistics for procedure is OK
"""
@pytest.mark.version('>=2.5.7')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action, capsys):
sp_query = 'select * from sp_test'
sql_io = '''
select
iif( i.mon$page_fetches > 104500, 'IO statistics for procedure is OK',
'Strange low value for fetches: ' || i.mon$page_fetches
) as fetches_result
from rdb$database r
left join mon$statements m on
m.mon$sql_text containing 'select * from sp_test'
and m.mon$sql_text NOT containing 'mon$statements'
left join mon$io_stats i on
m.mon$stat_id = i.mon$stat_id and i.mon$stat_group = 3
;
'''
act_1.expected_stdout = expected_stdout_1
with act_1.db.connect() as con:
stt1 = con.cursor()
stt2 = con.cursor()
stt2.execute(sp_query)
stt2.fetchall()
stt1.execute(sql_io)
for row in stt1:
print(row[0])
act_1.stdout = capsys.readouterr().out
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,12 +2,12 @@
#
# id: bugs.core_2026
# title: Problem with a read-only marked database
# decription:
# Since FB 2.1 engine performs transliteraion of blobs between character sets.
# In this case system blob, stored in UNICODE_FSS, transliterated into connection charset.
# To do this, temporary blob is created. Engine didn't support temporary blobs creation in
# decription:
# Since FB 2.1 engine performs transliteraion of blobs between character sets.
# In this case system blob, stored in UNICODE_FSS, transliterated into connection charset.
# To do this, temporary blob is created. Engine didn't support temporary blobs creation in
# read-only databases since read-only databases was introduced
#
#
# tracker_id: CORE-2026
# min_versions: ['2.5.0']
# versions: 2.5
@ -15,6 +15,7 @@
import pytest
from firebird.qa import db_factory, isql_act, Action
from firebird.driver import DbAccessMode
# version: 2.5
# resources: None
@ -30,45 +31,67 @@ db_1 = db_factory(charset='ISO8859_1', sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
# import os
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# db_conn.close()
#
#
# script='''
# set list on;
# set blob all;
# select mon$read_only from mon$database;
# set count on;
# select RDB$FIELD_NAME, rdb$default_source
# from rdb$relation_fields
# select RDB$FIELD_NAME, rdb$default_source
# from rdb$relation_fields
# where rdb$default_source is not null;
# '''
# runProgram('isql',[dsn],script)
# runProgram('gfix',['-mode','read_only',dsn])
# runProgram('isql',['-ch','iso8859_1',dsn],script)
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
MON$READ_ONLY 0
RDB$FIELD_NAME X
default 0
Records affected: 1
MON$READ_ONLY 1
RDB$FIELD_NAME X
default 0
Records affected: 1
"""
test_script_1 = """
set list on;
set blob all;
select mon$read_only from mon$database;
set count on;
select RDB$FIELD_NAME, rdb$default_source from rdb$relation_fields
where rdb$default_source is not null;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1_a = """
MON$READ_ONLY 0
RDB$FIELD_NAME X
default 0
Records affected: 1
"""
expected_stdout_1_b = """
MON$READ_ONLY 1
RDB$FIELD_NAME X
default 0
Records affected: 1
"""
@pytest.mark.version('>=2.5')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1_a
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
#
with act_1.connect_server() as srv:
srv.database.set_access_mode(database=str(act_1.db.db_path), mode=DbAccessMode.READ_ONLY)
#
act_1.reset()
act_1.expected_stdout = expected_stdout_1_b
act_1.isql(switches=[], charset='iso8859_1', input=act_1.script)
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,14 +2,15 @@
#
# id: bugs.core_2115
# title: Query plan is missing for the long query
# decription:
# decription:
# tracker_id: CORE-2115
# min_versions: []
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from zipfile import Path
from firebird.qa import db_factory, python_act, Action
# version: 3.0
# resources: None
@ -22,34 +23,34 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
#
# import os
# import sys
# import subprocess
# import zipfile
# import filecmp
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# db_conn.close()
#
#
# #-----------------------------------
#
#
# def flush_and_close(file_handle):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
#
# file_handle.flush()
# os.fsync(file_handle.fileno())
#
#
# file_handle.close()
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -57,35 +58,35 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# os.remove( f_names_list[i] )
# if os.path.isfile( f_names_list[i]):
# print('ERROR: can not remove file ' + f_names_list[i])
#
#
# #-------------------------------------------
#
#
# zf = zipfile.ZipFile( os.path.join(context['files_location'],'core_2115.zip'), 'r' )
# for name in zf.namelist():
# zo=zf.open(name, 'rU')
# fn=open( os.path.join(context['temp_directory'],name), 'w')
# fn.write( zo.read().replace('\\r','') )
# fn.close()
#
#
# ### do NOT: preserves Carriage Return character from Windows: zf.extractall( context['temp_directory'] )
# zf.close()
#
# # Result: files
#
# # Result: files
# # 1. tmp_core_2115_queries_with_long_plans.sql and
# # 2. tmp_core_2115_check_txt_of_long_plans.log
# # 2. tmp_core_2115_check_txt_of_long_plans.log
# # -- have been extracted into context['temp_directory']
# #
# # These queries were created in AUTOMATED way using following batch scenario:
# #
# # @echo off
# # setlocal enabledelayedexpansion enableextensions
# #
# # set in_list_min_count=50
# #
# # set in_list_min_count=50
# # set in_list_max_count=1500
# #
# #
# # @rem -Implementation limit exceeded
# # @rem -Too many values (more than 1500) in member list to match against
# #
# #
# # set log=%~n0.log
# # set sql=%~n0.sql
# # del %sql% 2>nul
@ -93,7 +94,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # echo -- GENERATED AUTO, DO NOT EDIT.
# # echo.
# # echo recreate table t234567890123456789012345678901(
# # echo f234567890123456789012345678901 smallint
# # echo f234567890123456789012345678901 smallint
# # echo unique using index x234567890123456789012345678901
# # echo ^);
# # echo commit;
@ -101,25 +102,25 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # @rem echo set list on; select current_timestamp from rdb$database;
# # echo set planonly;
# # ) >> %sql%
# #
# #
# #
# #
# # for /l %%i in (%in_list_min_count%, 25, %in_list_max_count%) do (
# # @echo Generating query with IN-list of %%i elements.
# # call :make_query %sql% %%i
# # )
# #
# #
# # @echo Done.
# #
# #
# # goto end
# #
# #
# # :make_query
# #
# #
# # setlocal
# # set sql=%1
# # set in_elems=%2
# # set /a k=10000+%in_elems%
# # set suff=!k:~1,4!
# #
# #
# # (
# # echo.
# # echo -- Query with "IN"-list of %in_elems% elements:
@ -128,7 +129,7 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # echo t__________________________!suff!
# # echo where f234567890123456789012345678901 in (
# # ) >>%sql%
# #
# #
# # set /a k=1
# # set s=
# # for /l %%i in (1,1,%in_elems%) do (
@ -138,55 +139,57 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# # set s=!s!,0
# # )
# # set /a m=!k! %% 50
# #
# #
# # if !m! equ 0 (
# # if .%%i.==.%in_elems%. ( echo !s!>>%sql% ) else ( echo !s!,>>%sql% )
# # set s=
# # )
# # set /a k=!k!+1
# # )
# #
# #
# #
# #
# # (
# # if not .!s!.==.. echo !s!
# # echo ^);
# # ) >>%sql%
# #
# #
# # endlocal & goto:eof
# #
# #
# # :end
#
#
# sql_long_plans_qry=os.path.join(context['temp_directory'],'tmp_core_2115_queries_with_long_plans.sql')
# sql_long_plans_chk=os.path.join(context['temp_directory'],'tmp_core_2115_check_txt_of_long_plans.log')
#
#
# sql_long_plans_res=open( os.path.join(context['temp_directory'],'tmp_core_2115_current_txt_of_long_plans.log'), 'w')
#
#
# subprocess.call( [context["isql_path"], dsn, "-i", sql_long_plans_qry], stdout=sql_long_plans_res, stderr=subprocess.STDOUT )
# flush_and_close( sql_long_plans_res )
#
#
# if filecmp.cmp( sql_long_plans_chk, sql_long_plans_res.name):
# print("Current plans match to original ones.")
# else:
# print("Found at least one MISMATCH with original plans.")
#
#
#
#
# # cleanup
# ##########
#
#
# f_list = [ sql_long_plans_qry, sql_long_plans_chk, sql_long_plans_res.name ]
# cleanup( f_list )
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
Current plans match to original ones.
"""
act_1 = python_act('db_1', substitutions=substitutions_1)
@pytest.mark.version('>=3.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action):
# Read script and expected stdout from zip file
datafile = Path(act_1.vars['files'] / 'core_2115.zip',
at='tmp_core_2115_queries_with_long_plans.sql')
act_1.script = datafile.read_text()
datafile = Path(act_1.vars['files'] / 'core_2115.zip',
at='tmp_core_2115_check_txt_of_long_plans.log')
act_1.expected_stdout = datafile.read_text()
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout

View File

@ -2,7 +2,7 @@
#
# id: bugs.core_2192
# title: Extend maximum database page size to 32KB
# decription:
# decription:
# We create DB with page_size = 32784, then add two table int it, both with UTF8 fields.
# First table (test_non_coll) has field which is based on trivial text domain.
# Second table (test_collated) has two 'domained' fields and both underlying domains are
@ -15,25 +15,59 @@
# Finally, we:
# 1) check that all error logs are empty;
# 2) compare logs of DML, metadata extraction - they should be identical.
#
#
# Checked on 4.0.0.172, intermediate build based on sources of 10-may-2015 10:44 - works fine.
#
#
# tracker_id: CORE-2192
# min_versions: ['4.0']
# versions: 4.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
from firebird.qa import db_factory, python_act, Action
from firebird.driver import DbWriteMode, SrvRestoreFlag
#from difflib import unified_diff
from io import BytesIO
# version: 4.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
test_size = 32768 # -- Ran 1 tests in 4.504s
# test_size = 16384 # -- Ran 1 tests in 2.735s
max_indx1 = int(test_size / 4 - 9)
max_indx6 = int(max_indx1 / 6)
max_indx8 = int(max_indx1 / 8)
db_1 = db_factory(sql_dialect=3, init=init_script_1)
init_script_1 = f"""
set list on;
set bail on;
set echo on;
create sequence g;
commit;
create collation utf8_ci for utf8 from unicode case insensitive;
create collation utf8_ai_ci for utf8 from unicode accent insensitive case insensitive ;
commit;
create domain dm_non_coll as varchar({max_indx1});
create domain dm_collated_ci as varchar({max_indx6}) character set utf8 collate utf8_ci;
create domain dm_collated_ai_ci as varchar({max_indx6}) character set utf8 collate utf8_ai_ci;
commit;
recreate table test_non_coll(
txt_non_coll dm_non_coll
);
recreate table test_collated(
txt_ci dm_collated_ci
,txt_ai_ci dm_collated_ai_ci
);
commit;
create index test_non_coll on test_non_coll(txt_non_coll);
create index test_coll_ci on test_collated(txt_ci);
create index test_coll_ai_ci on test_collated(txt_ai_ci);
commit;
"""
db_1 = db_factory(sql_dialect=3, init=init_script_1, page_size=32784)
# test_script_1
#---
@ -41,12 +75,12 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# import time
# import subprocess
# import difflib
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# #--------------------------------------------
#
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
@ -54,28 +88,28 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# os.remove( f_names_list[i] )
# if os.path.isfile( f_names_list[i]):
# print('ERROR: can not remove file ' + f_names_list[i])
#
#
# #-------------------------------------------
#
#
# db_conn.close()
#
#
# tmpfdb1=os.path.join(context['temp_directory'],'tmp_2192_32k.fdb')
# if os.path.isfile(tmpfdb1):
# os.remove(tmpfdb1)
#
#
# tmpfbk1=os.path.join(context['temp_directory'],'tmp_2192_32k.fbk')
# if os.path.isfile(tmpfbk1):
# os.remove(tmpfbk1)
#
#
#
#
# test_size = 32768 # -- Ran 1 tests in 4.504s
# # test_size = 16384 # -- Ran 1 tests in 2.735s
#
#
# max_indx1 = test_size / 4 - 9
# max_indx6 = max_indx1 / 6
# max_indx8 = max_indx1 / 8
#
#
#
#
# sql_ddl=''' set list on;
# set bail on;
# set echo on;
@ -91,10 +125,10 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# create domain dm_collated_ci as varchar( %(max_indx6)s ) character set utf8 collate utf8_ci;
# create domain dm_collated_ai_ci as varchar( %(max_indx6)s ) character set utf8 collate utf8_ai_ci;
# commit;
# recreate table test_non_coll(
# recreate table test_non_coll(
# txt_non_coll dm_non_coll
# );
# recreate table test_collated(
# recreate table test_collated(
# txt_ci dm_collated_ci
# ,txt_ai_ci dm_collated_ai_ci
# );
@ -104,92 +138,92 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# create index test_coll_ai_ci on test_collated(txt_ai_ci);
# commit;
# ''' % locals()
#
#
# dml_test = ''' --show version;
# delete from test_non_coll;
# delete from test_collated;
# commit;
# set count on;
# insert into test_non_coll(txt_non_coll)
# select
# select
# rpad('', %(max_indx1)s, 'QWERTY' || gen_id(g,1) )
# from
# from
# -- rdb$types rows 10000
# (select 1 i from rdb$types rows 200), (select 1 i from rdb$types rows 5)
# rows 361
# ;
# commit;
#
#
# insert into test_collated(txt_ci, txt_ai_ci)
# select
# select
# rpad('', %(max_indx6)s, 'Ещё Съешь Этих Мягких Французских Булочек Да Выпей Же Чаю')
# ,rpad('', %(max_indx6)s, 'Ещё Французских Булочек Этих Мягких Съешь Да Чаю Выпей Же')
# from
# from
# (select 1 i from rdb$types rows 250), (select 1 i from rdb$types rows 2)
# ;
#
#
# commit;
#
#
# set count off;
# set list on;
# set plan on;
#
#
# select count(*)
# from test_non_coll
# where txt_non_coll starting with 'QWERTY'
#
#
# union all
#
#
# select count(*)
# from test_collated
# where txt_ci starting with 'еЩё'
#
#
# union all
#
#
# select count(*)
# from test_collated
# where txt_ai_ci starting with 'ёЩЕ'
#
#
# union all
#
#
# select count(*)
# from test_collated
# where txt_ci = lower(rpad('', %(max_indx6)s, 'Ещё Съешь Этих Мягких Французских Булочек Да Выпей Же Чаю'))
#
# union all
#
#
# union all
#
# select count(*)
# from test_collated
# where txt_ai_ci = rpad('', %(max_indx6)s, 'Ещё Французских Булочек Этих Мягких Съешь Да Чаю Выпей Же')
# ;
#
#
# select count(*)
# from test_non_coll
# where txt_non_coll like 'QWERTY%%'
#
#
# union all
#
#
# select count(*)
# from test_collated
# where txt_ci like 'еЩё%%'
#
#
# union all
#
#
# select count(*)
# from test_collated
# where txt_ai_ci like 'ёЩЕ%%'
#
#
# union all
#
#
# select count(*)
# from test_collated
# where txt_ci between
# where txt_ci between
# rpad('', %(max_indx6)s, 'ещё Съешь ЭТИХ Мягких Французских Булочек Да Выпей Же Чаю')
# and
# rpad('', %(max_indx6)s, 'ЕЩЁ Съешь Этих МЯГКИХ фРанцузских Булочек Да Выпей Же Чаю')
#
# union all
#
#
# union all
#
# select count(*)
# from test_collated
# where txt_ai_ci between
@ -197,17 +231,17 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# and
# rpad('', %(max_indx6)s, 'ёщё Французских Булочёк Этих Мягких Съёшь Да Чаю Выпёй Жё')
# ;
#
#
# set plan off;
# ''' % locals()
#
#
# f_create_db_32k_sql = open( os.path.join(context['temp_directory'],'tmp_2192_ddl.sql'), 'w')
# f_create_db_32k_sql.write(sql_ddl)
# f_create_db_32k_sql.close()
#
# # 0. CREATE DATABASE
#
# # 0. CREATE DATABASE
# ####################
#
#
# f_create_db_32k_log = open( os.path.join(context['temp_directory'],'tmp_2192_ddl.log'), 'w')
# f_create_db_32k_err = open( os.path.join(context['temp_directory'],'tmp_2192_ddl.err'), 'w')
# subprocess.call( [ context['isql_path'], "-q", "-i", f_create_db_32k_sql.name, "-ch", "utf8" ]
@ -216,97 +250,97 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# )
# f_create_db_32k_log.close()
# f_create_db_32k_err.close()
#
#
# # CHANGE FW to OFF
# ##################
# f_change_fw = open(os.devnull, 'w')
# subprocess.call( [ context['fbsvcmgr_path'], "localhost:service_mgr", "action_properties", "dbname", tmpfdb1, "prp_write_mode", "prp_wm_async" ], stdout = f_change_fw,stderr = subprocess.STDOUT )
# f_change_fw.close()
#
#
# # 1. FIRST RUN DML_TEST
# #######################
#
#
# f_run_dml_sql = open( os.path.join(context['temp_directory'],'tmp_2192_dml.sql'), 'w')
# f_run_dml_sql.write(dml_test)
# f_run_dml_sql.close()
#
#
# f_run_dml_log_1 = open( os.path.join(context['temp_directory'],'tmp_2192_dml_1.log'), 'w')
# subprocess.call( [ context['isql_path'], 'localhost:'+tmpfdb1, "-i", f_run_dml_sql.name, "-ch", "utf8" ]
# ,stdout = f_run_dml_log_1
# ,stderr = subprocess.STDOUT
# )
# f_run_dml_log_1.close()
#
#
# # 2. EXTRACT METADATA-1
# #######################
#
#
# f_extract_meta1_sql = open( os.path.join(context['temp_directory'],'tmp_2192_meta1.log'), 'w')
# subprocess.call( [ context['isql_path'], 'localhost:'+tmpfdb1, "-x" ]
# ,stdout = f_extract_meta1_sql
# ,stderr = subprocess.STDOUT
# )
# f_extract_meta1_sql.close()
#
#
# # 3. VALIDATE DATABASE-1
# ########################
# f_validate_log_1=open( os.path.join(context['temp_directory'],'tmp_2192_validate1.log'), "w")
# f_validate_err_1=open( os.path.join(context['temp_directory'],'tmp_2192_validate1.err'), "w")
#
#
# subprocess.call( [ context['fbsvcmgr_path'],"localhost:service_mgr","action_validate","dbname", tmpfdb1 ],stdout=f_validate_log_1,stderr=f_validate_err_1 )
#
#
# f_validate_log_1.close()
# f_validate_err_1.close()
#
#
#
#
# # 4. TRY TO BACKUP AND RESTORE
# ##############################
#
#
# f_backup_log=open( os.path.join(context['temp_directory'],'tmp_2192_backup.log'), "w")
# f_backup_err=open( os.path.join(context['temp_directory'],'tmp_2192_backup.err'), "w")
#
#
# subprocess.call( [ context['fbsvcmgr_path'],"localhost:service_mgr","action_backup", "verbose","dbname", tmpfdb1, "bkp_file", tmpfbk1],stdout=f_backup_log,stderr=f_backup_err)
#
#
# f_backup_log.close()
# f_backup_err.close()
#
#
# f_restore_log=open( os.path.join(context['temp_directory'],'tmp_2192_restore.log'), "w")
# f_restore_err=open( os.path.join(context['temp_directory'],'tmp_2192_restore.err'), "w")
# subprocess.call( [ context['fbsvcmgr_path'],"localhost:service_mgr",
# "action_restore", "res_replace", "verbose",
# "bkp_file", tmpfbk1,
# "dbname", tmpfdb1
# "bkp_file", tmpfbk1,
# "dbname", tmpfdb1
# ]
# ,stdout=f_restore_log
# ,stderr=f_restore_err
# )
# f_restore_log.close()
# f_restore_err.close()
#
# f_restore_log.close()
# f_restore_err.close()
#
# # 5. EXTRACT METADATA-2
# #######################
#
#
# f_extract_meta2_sql = open( os.path.join(context['temp_directory'],'tmp_2192_meta2.log'), 'w')
# subprocess.call( [ context['isql_path'], 'localhost:'+tmpfdb1, "-x"],stdout = f_extract_meta2_sql,stderr = subprocess.STDOUT)
# f_extract_meta2_sql.close()
#
#
# # 6. AGAIN RUN DML_TEST
# #######################
#
#
# f_run_dml_log_2 = open( os.path.join(context['temp_directory'],'tmp_2192_dml_2.log'), 'w')
# subprocess.call( [ context['isql_path'], 'localhost:'+tmpfdb1, "-i", f_run_dml_sql.name, "-ch", "utf8" ],stdout = f_run_dml_log_2,stderr = subprocess.STDOUT )
# f_run_dml_log_2.close()
#
#
# # 7. VALIDATE DATABASE-2
# ########################
# f_validate_log_2=open( os.path.join(context['temp_directory'],'tmp_2192_validate2.log'), "w")
# f_validate_err_2=open( os.path.join(context['temp_directory'],'tmp_2192_validate2.err'), "w")
#
#
# subprocess.call( [ context['fbsvcmgr_path'],"localhost:service_mgr","action_validate","dbname", tmpfdb1],stdout=f_validate_log_2,stderr=f_validate_err_2)
#
#
# f_validate_log_2.close()
# f_validate_err_2.close()
#
#
#
#
# # 7. CHECKS
# ###########
# # 1) STDERR for: create DB, backup, restore, validation-1 and validation-2 - they all must be EMPTY.
@ -316,48 +350,48 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# f_list.append(f_backup_err)
# f_list.append(f_restore_err)
# f_list.append(f_validate_err_2)
#
#
# for i in range(len(f_list)):
# f_name=f_list[i].name
# if os.path.getsize(f_name) > 0:
# with open( f_name,'r') as f:
# for line in f:
# print("Unexpected STDERR, file "+f_name+": "+line)
#
#
# # 2) diff between dml_1.log and dml_2.log should be EMPTY.
# # 3) diff between meta1.log and meta2.log should be EMPTY.
#
#
# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_2192_diff.txt'), 'w')
# f_old=[]
# f_new=[]
#
#
# f_old.append(f_extract_meta1_sql) # DDL: what we have BEFORE database backup
# f_old.append(f_run_dml_log_1) # DML: result of querying tables before DB backup
# f_new.append(f_extract_meta2_sql) # DDL: what we have AFTER database restore
# f_new.append(f_run_dml_log_2) # DML: result of querying tables AFTER database restore
#
#
# for i in range(len(f_old)):
# old_file=open(f_old[i].name,'r')
# new_file=open(f_new[i].name,'r')
#
#
# f_diff_txt.write( ''.join( difflib.unified_diff( old_file.readlines(), new_file.readlines() ) ) )
#
#
# old_file.close()
# new_file.close()
#
#
# f_diff_txt.close()
#
#
# # Should be EMPTY:
# ##################
# with open( f_diff_txt.name,'r') as f:
# for line in f:
# print( 'Unexpected diff: '.join(line.split()).upper() )
#
#
# #####################################################################
# # Cleanup:
# ##########
# time.sleep(1)
#
#
# f_list= (
# f_create_db_32k_sql
# ,f_create_db_32k_log
@ -378,18 +412,156 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# ,f_diff_txt
# )
# cleanup( [i.name for i in f_list] )
#
# os.remove(tmpfdb1)
#
# os.remove(tmpfdb1)
# os.remove(tmpfbk1)
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
test_script_1 = f'''
--show version;
delete from test_non_coll;
delete from test_collated;
commit;
set count on;
insert into test_non_coll(txt_non_coll)
select
rpad('', {max_indx1}, 'QWERTY' || gen_id(g,1) )
from
-- rdb$types rows 10000
(select 1 i from rdb$types rows 200), (select 1 i from rdb$types rows 5)
rows 361
;
commit;
insert into test_collated(txt_ci, txt_ai_ci)
select
rpad('', {max_indx6}, 'Ещё Съешь Этих Мягких Французских Булочек Да Выпей Же Чаю')
,rpad('', {max_indx6}, 'Ещё Французских Булочек Этих Мягких Съешь Да Чаю Выпей Же')
from
(select 1 i from rdb$types rows 250), (select 1 i from rdb$types rows 2)
;
commit;
set count off;
set list on;
set plan on;
select count(*)
from test_non_coll
where txt_non_coll starting with 'QWERTY'
union all
select count(*)
from test_collated
where txt_ci starting with 'еЩё'
union all
select count(*)
from test_collated
where txt_ai_ci starting with 'ёЩЕ'
union all
select count(*)
from test_collated
where txt_ci = lower(rpad('', {max_indx6}, 'Ещё Съешь Этих Мягких Французских Булочек Да Выпей Же Чаю'))
union all
select count(*)
from test_collated
where txt_ai_ci = rpad('', {max_indx6}, 'Ещё Французских Булочек Этих Мягких Съешь Да Чаю Выпей Же')
;
select count(*)
from test_non_coll
where txt_non_coll like 'QWERTY%%'
union all
select count(*)
from test_collated
where txt_ci like 'еЩё%%'
union all
select count(*)
from test_collated
where txt_ai_ci like 'ёЩЕ%%'
union all
select count(*)
from test_collated
where txt_ci between
rpad('', {max_indx6}, 'ещё Съешь ЭТИХ Мягких Французских Булочек Да Выпей Же Чаю')
and
rpad('', {max_indx6}, 'ЕЩЁ Съешь Этих МЯГКИХ фРанцузских Булочек Да Выпей Же Чаю')
union all
select count(*)
from test_collated
where txt_ai_ci between
rpad('', {max_indx6}, 'ёще фРанцузских Булочек Этих Мягких Съешь Да Чаю Выпёй Же')
and
rpad('', {max_indx6}, 'ёщё Французских Булочёк Этих Мягких Съёшь Да Чаю Выпёй Жё')
;
set plan off;
'''
act_1 = python_act('db_1', substitutions=substitutions_1)
@pytest.mark.version('>=4.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action):
# CHANGE FW to OFF
with act_1.connect_server() as srv:
srv.database.set_write_mode(database=str(act_1.db.db_path), mode=DbWriteMode.ASYNC)
# 1. FIRST RUN DML_TEST
act_1.script = test_script_1
act_1.execute()
run_dml_log_1 = act_1.stdout
# 2. EXTRACT METADATA-1
act_1.reset()
act_1.isql(switches=['-x'])
extract_meta1_sql = act_1.stdout
# 3. VALIDATE DATABASE-1
# [pcisar] I don't understand the point of validation as the original test does not check
# that validation passed
with act_1.connect_server() as srv:
srv.database.validate(database=str(act_1.db.db_path))
validate_log_1 = srv.readlines()
# 4. TRY TO BACKUP AND RESTORE
with act_1.connect_server() as srv:
backup = BytesIO()
srv.database.local_backup(database=str(act_1.db.db_path), backup_stream=backup)
backup.seek(0)
srv.database.local_restore(backup_stream=backup, database=str(act_1.db.db_path),
flags=SrvRestoreFlag.REPLACE)
backup.close()
# 5. EXTRACT METADATA-2
act_1.reset()
act_1.isql(switches=['-x'])
extract_meta2_sql = act_1.stdout
# 6. AGAIN RUN DML_TEST
act_1.reset()
act_1.script = test_script_1
act_1.execute()
run_dml_log_2 = act_1.stdout
# 7. VALIDATE DATABASE-2
with act_1.connect_server() as srv:
srv.database.validate(database=str(act_1.db.db_path))
validate_log_2 = srv.readlines()
# 8. CHECKS
# 1) STDERR for: create DB, backup, restore, validation-1 and validation-2 - they all must be EMPTY.
# [pcisar] This is granted as exception would be raised if there would be any error
# 2) diff between dml_1.log and dml_2.log should be EMPTY.
assert run_dml_log_1 == run_dml_log_2
# 3) diff between meta1.log and meta2.log should be EMPTY.
assert extract_meta1_sql == extract_meta2_sql

View File

@ -2,8 +2,8 @@
#
# id: bugs.core_2197
# title: Add support for -nodbtriggers switch in gbak into services API
# decription:
# We add two database triggers (on connect and on disconnect) and make them do real work only when
# decription:
# We add two database triggers (on connect and on disconnect) and make them do real work only when
# new attachment will be established (see trick with rdb$get_context('USER_SESSION', 'INIT_STATE') ).
# After finish backup we restore database and check that there is no records in 'log' table.
# (if option 'bkp_no_triggers' will be omitted then two records will be in that table).
@ -11,14 +11,16 @@
# 2.5.9.27103: OK, 0.938s.
# 3.0.4.32920: OK, 2.875s.
# 4.0.0.912: OK, 3.328s.
#
#
# tracker_id: CORE-2197
# min_versions: ['2.5.0']
# versions: 2.5
# qmid:
# qmid:
import pytest
from firebird.qa import db_factory, isql_act, Action
from io import BytesIO
from firebird.driver import SrvRestoreFlag, SrvBackupFlag
# version: 2.5
# resources: None
@ -53,19 +55,19 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# import time
# import subprocess
# from subprocess import Popen
#
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
#
# db_conn.close()
#
#
# thisdb='$(DATABASE_LOCATION)bugs.core_2197.fdb'
# tmpbkp='$(DATABASE_LOCATION)bugs.core_2197_fbk.tmp'
# tmpres='$(DATABASE_LOCATION)bugs.core_2197_new.tmp'
#
#
#
#
# #---------------------------------------------------------------
#
#
# isql_txt=''' delete from log;
# commit;
# set term ^;
@ -80,60 +82,93 @@ db_1 = db_factory(sql_dialect=3, init=init_script_1)
# --set count on;
# --select * from log;
# '''
#
#
# runProgram('isql',[dsn], isql_txt)
#
#
# f_svc_log=open( os.path.join(context['temp_directory'],'tmp_svc_2197.log'), 'w')
# f_svc_err=open( os.path.join(context['temp_directory'],'tmp_svc_2197.err'), 'w')
#
# subprocess.call( [ context['fbsvcmgr_path'], 'localhost:service_mgr','action_backup',
#
# subprocess.call( [ context['fbsvcmgr_path'], 'localhost:service_mgr','action_backup',
# 'dbname', thisdb, 'bkp_file', tmpbkp,
# 'bkp_no_triggers'
# ],
# ],
# stdout=f_svc_log,stderr=f_svc_err
# )
#
#
# runProgram('isql',[dsn, '-nod'], 'set list on; set count on; select 1, g.* from log g;')
#
# subprocess.call( [ context['fbsvcmgr_path'], 'localhost:service_mgr','action_restore',
# 'bkp_file', tmpbkp, 'dbname', tmpres,
#
# subprocess.call( [ context['fbsvcmgr_path'], 'localhost:service_mgr','action_restore',
# 'bkp_file', tmpbkp, 'dbname', tmpres,
# 'res_replace'
# ],
# ],
# stdout=f_svc_log,stderr=f_svc_err
# )
#
#
# f_svc_log.close()
# f_svc_err.close()
#
#
# runProgram('isql',[dsn, '-nod'], 'set list on; set count on; select 1, g.* from log g;')
#
#
#
#
# #############################################
# # Cleanup.
# f_list=(
# f_list=(
# f_svc_log
# ,f_svc_err
# )
#
#
# for i in range(len(f_list)):
# if os.path.isfile(f_list[i].name):
# os.remove(f_list[i].name)
#
#
# os.remove(tmpbkp)
# os.remove(tmpres)
#
#
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
test_script_1 = '''
delete from log;
commit;
set term ^;
execute block as begin
rdb$set_context('USER_SESSION', 'INIT_STATE','1');
end
^
set term ;^
alter trigger trg_attach active;
alter trigger trg_detach active;
commit;
--set count on;
--select * from log;
'''
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
Records affected: 0
Records affected: 0
"""
"""
@pytest.mark.version('>=2.5')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
def test_1(act_1: Action):
check_sql = 'set list on; set count on; select 1, g.* from log g;'
act_1.execute()
backup = BytesIO()
with act_1.connect_server() as srv:
srv.database.local_backup(database=str(act_1.db.db_path), backup_stream=backup,
flags=SrvBackupFlag.NO_TRIGGERS)
backup.seek(0)
act_1.reset()
act_1.expected_stdout = expected_stdout_1
act_1.isql(switches=['-nod'], input=check_sql)
assert act_1.clean_stdout == act_1.clean_expected_stdout
srv.database.local_restore(backup_stream=backup, database=str(act_1.db.db_path),
flags=SrvRestoreFlag.REPLACE)
backup.close()
act_1.reset()
act_1.expected_stdout = expected_stdout_1
act_1.isql(switches=['-nod'], input=check_sql)
assert act_1.clean_stdout == act_1.clean_expected_stdout