2021-04-26 20:07:00 +02:00
|
|
|
#coding:utf-8
|
|
|
|
#
|
|
|
|
# id: bugs.core_5441
|
|
|
|
# title: Cache physical numbers of often used data pages to reduce number of fetches of pointer pages
|
2021-12-07 20:09:36 +01:00
|
|
|
# decription:
|
2021-04-26 20:07:00 +02:00
|
|
|
# We create table with single field, add several rows and create index.
|
|
|
|
# Number of these rows must be enough to fit all of them in the single data page.
|
|
|
|
# Than we do loop and query on every iteration one row, using PLAN INDEX.
|
2021-12-07 20:09:36 +01:00
|
|
|
# Only _first_ iteration should lead to reading PP (and this requires 1 fetch),
|
2021-04-26 20:07:00 +02:00
|
|
|
# but all subseq. must require to read only DP. This should refect in trace as:
|
|
|
|
# * 4 fetches for 1st statement;
|
|
|
|
# * 3 fetches for statements starting with 2nd.
|
|
|
|
# Distinct number of fetches are accumulated in Python dict, and is displayed finally.
|
|
|
|
# We should have TWO distinct elements in this dict, and numbers in their values must
|
|
|
|
# differ at (N-1), where N = number of rows in the table.
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# Checked on:
|
|
|
|
# * WI-T4.0.0.454 (22-nov-2016) -- number of fethes on every iteration is the same and equal to 4;
|
|
|
|
# * WI-T4.0.0.460 (02-dec-2016) -- only at FIRST iter. fetches=4; for all subsequent loops fetches = 3.
|
|
|
|
# This proves that lot of PP scans are avoided and necessary data are taken from cache.
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# Also checked on:
|
|
|
|
# WI-V3.0.2.32677 (SS), WI-V3.0.2.32643 (SC/CS), WI-T4.0.0.519 (SS) -- all fine.
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# tracker_id: CORE-5441
|
|
|
|
# min_versions: ['3.0.2']
|
|
|
|
# versions: 3.0.2
|
|
|
|
# qmid: None
|
|
|
|
|
|
|
|
import pytest
|
2021-12-07 20:09:36 +01:00
|
|
|
from firebird.qa import db_factory, python_act, Action
|
2021-04-26 20:07:00 +02:00
|
|
|
|
|
|
|
# version: 3.0.2
|
|
|
|
# resources: None
|
|
|
|
|
|
|
|
substitutions_1 = []
|
|
|
|
|
|
|
|
init_script_1 = """
|
|
|
|
recreate table test(x int primary key);
|
|
|
|
commit;
|
|
|
|
insert into test(x) select r from (
|
2021-12-07 20:09:36 +01:00
|
|
|
select row_number()over() r
|
2021-04-26 20:07:00 +02:00
|
|
|
from rdb$types a,rdb$types b
|
|
|
|
rows 10
|
2021-12-07 20:09:36 +01:00
|
|
|
)
|
2021-04-26 20:07:00 +02:00
|
|
|
order by rand();
|
|
|
|
commit;
|
|
|
|
|
2021-12-07 20:09:36 +01:00
|
|
|
set term ^;
|
2021-04-26 20:07:00 +02:00
|
|
|
create procedure sp_test as
|
2021-12-07 20:09:36 +01:00
|
|
|
declare n int;
|
|
|
|
declare c int;
|
|
|
|
begin
|
2021-04-26 20:07:00 +02:00
|
|
|
n = 10;
|
2021-12-07 20:09:36 +01:00
|
|
|
while( n > 0 ) do
|
2021-04-26 20:07:00 +02:00
|
|
|
begin
|
|
|
|
execute statement ( 'select 1 from test where x = ? rows 1' ) ( :n ) into c;
|
2021-12-07 20:09:36 +01:00
|
|
|
n = n - 1;
|
|
|
|
end
|
|
|
|
end^
|
2021-04-26 20:07:00 +02:00
|
|
|
set term ;^
|
|
|
|
commit;
|
2021-12-07 20:09:36 +01:00
|
|
|
"""
|
2021-04-26 20:07:00 +02:00
|
|
|
|
|
|
|
db_1 = db_factory(page_size=8192, sql_dialect=3, init=init_script_1)
|
|
|
|
|
|
|
|
# test_script_1
|
|
|
|
#---
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# import os
|
|
|
|
# import subprocess
|
|
|
|
# import time
|
|
|
|
# from fdb import services
|
|
|
|
# from subprocess import Popen
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# os.environ["ISC_USER"] = user_name
|
|
|
|
# os.environ["ISC_PASSWORD"] = user_password
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# db_conn.close()
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# #--------------------------------------------
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# def flush_and_close( file_handle ):
|
|
|
|
# # https://docs.python.org/2/library/os.html#os.fsync
|
2021-12-07 20:09:36 +01:00
|
|
|
# # If you're starting with a Python file object f,
|
|
|
|
# # first do f.flush(), and
|
2021-04-26 20:07:00 +02:00
|
|
|
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
|
|
|
# global os
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# file_handle.flush()
|
|
|
|
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
|
|
|
|
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
|
|
|
# os.fsync(file_handle.fileno())
|
|
|
|
# file_handle.close()
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# #--------------------------------------------
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# def cleanup( f_names_list ):
|
|
|
|
# global os
|
|
|
|
# for i in range(len( f_names_list )):
|
|
|
|
# if type(f_names_list[i]) == file:
|
|
|
|
# del_name = f_names_list[i].name
|
|
|
|
# elif type(f_names_list[i]) == str:
|
|
|
|
# del_name = f_names_list[i]
|
|
|
|
# else:
|
|
|
|
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
|
|
|
|
# del_name = None
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# if del_name and os.path.isfile( del_name ):
|
|
|
|
# os.remove( del_name )
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# #--------------------------------------------
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# txt = '''# Generated auto, do not edit!
|
|
|
|
# database=%[\\\\\\\\/]security?.fdb
|
|
|
|
# {
|
|
|
|
# enabled = false
|
|
|
|
# }
|
|
|
|
# database=%[\\\\\\\\/]bugs.core_5441.fdb
|
|
|
|
# {
|
|
|
|
# enabled = true
|
|
|
|
# time_threshold = 0
|
|
|
|
# include_filter = "%(select % from test where x = ?)%"
|
|
|
|
# log_statement_finish = true
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# }
|
|
|
|
# '''
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# f_trc_cfg=open( os.path.join(context['temp_directory'],'tmp_trace_5441.cfg'), 'w')
|
|
|
|
# f_trc_cfg.write(txt)
|
|
|
|
# flush_and_close( f_trc_cfg )
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# # ##############################################################
|
|
|
|
# # S T A R T T R A C E i n S E P A R A T E P R O C E S S
|
|
|
|
# # ##############################################################
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# f_trc_log=open( os.path.join(context['temp_directory'],'tmp_trace_5441.log'), "w")
|
|
|
|
# f_trc_err=open( os.path.join(context['temp_directory'],'tmp_trace_5441.err'), "w")
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# p_trace = Popen( [ context['fbsvcmgr_path'], 'localhost:service_mgr', 'action_trace_start' , 'trc_cfg', f_trc_cfg.name],stdout=f_trc_log,stderr=f_trc_err)
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# time.sleep(1)
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# # ####################################################
|
|
|
|
# # G E T A C T I V E T R A C E S E S S I O N I D
|
|
|
|
# # ####################################################
|
|
|
|
# # Save active trace session info into file for further parsing it and obtain session_id back (for stop):
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# f_trc_lst = open( os.path.join(context['temp_directory'],'tmp_trace_5441.lst'), 'w')
|
|
|
|
# subprocess.call([context['fbsvcmgr_path'], 'localhost:service_mgr', 'action_trace_list'], stdout=f_trc_lst)
|
|
|
|
# flush_and_close( f_trc_lst )
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# # !!! DO NOT REMOVE THIS LINE !!!
|
|
|
|
# # time.sleep(1)
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# trcssn=0
|
|
|
|
# with open( f_trc_lst.name,'r') as f:
|
|
|
|
# for line in f:
|
|
|
|
# i=1
|
|
|
|
# if 'Session ID' in line:
|
|
|
|
# for word in line.split():
|
|
|
|
# if i==3:
|
|
|
|
# trcssn=word
|
|
|
|
# i=i+1
|
|
|
|
# break
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# # Result: `trcssn` is ID of active trace session. Now we have to terminate it:
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# con1 = fdb.connect(dsn=dsn)
|
|
|
|
# cur1=con1.cursor()
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# #############################
|
|
|
|
# # R U N T E S T P R O C
|
|
|
|
# #############################
|
|
|
|
# cur1.callproc('sp_test')
|
|
|
|
# con1.close()
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# # Let trace log to be entirely written on disk:
|
|
|
|
# time.sleep(1)
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# # ####################################################
|
|
|
|
# # S E N D R E Q U E S T T R A C E T O S T O P
|
|
|
|
# # ####################################################
|
|
|
|
# if trcssn>0:
|
|
|
|
# fn_nul = open(os.devnull, 'w')
|
|
|
|
# subprocess.call([context['fbsvcmgr_path'], 'localhost:service_mgr', 'action_trace_stop','trc_id', trcssn], stdout=fn_nul)
|
|
|
|
# fn_nul.close()
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# # 23.02.2021. DELAY FOR AT LEAST 1 SECOND REQUIRED HERE!
|
|
|
|
# # Otherwise trace log can remain empty.
|
|
|
|
# time.sleep(2)
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# p_trace.terminate()
|
|
|
|
# flush_and_close( f_trc_log )
|
|
|
|
# flush_and_close( f_trc_err )
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# # Following file should be EMPTY:
|
|
|
|
# ################
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# f_list=(f_trc_err,)
|
|
|
|
# for i in range(len(f_list)):
|
|
|
|
# f_name=f_list[i].name
|
|
|
|
# if os.path.getsize(f_name) > 0:
|
|
|
|
# with open( f_name,'r') as f:
|
|
|
|
# for line in f:
|
|
|
|
# print("Unexpected STDERR, file "+f_name+": "+line)
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# fetches_distinct_amounts={}
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# with open( f_trc_log.name,'r') as f:
|
|
|
|
# for line in f:
|
|
|
|
# if 'fetch(es)' in line:
|
|
|
|
# words = line.split()
|
|
|
|
# for k in range(len(words)):
|
|
|
|
# if words[k].startswith('fetch'):
|
|
|
|
# if not words[k-1] in fetches_distinct_amounts:
|
|
|
|
# fetches_distinct_amounts[ words[k-1] ] = 1
|
|
|
|
# else:
|
|
|
|
# fetches_distinct_amounts[ words[k-1] ] += 1
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# for k, v in sorted( fetches_distinct_amounts.items() ):
|
|
|
|
# print( ''.join( ('fetches=',k) ), 'occured', v, 'times' )
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# # CLEANUP
|
|
|
|
# #########
|
|
|
|
# time.sleep(1)
|
|
|
|
# cleanup( (f_trc_cfg, f_trc_lst, f_trc_log, f_trc_err) )
|
2021-12-07 20:09:36 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
#---
|
2021-12-07 20:09:36 +01:00
|
|
|
|
|
|
|
act_1 = python_act('db_1', substitutions=substitutions_1)
|
2021-04-26 20:07:00 +02:00
|
|
|
|
|
|
|
expected_stdout_1 = """
|
|
|
|
fetches=3 occured 9 times
|
|
|
|
fetches=4 occured 1 times
|
2021-12-07 20:09:36 +01:00
|
|
|
"""
|
2021-04-26 20:07:00 +02:00
|
|
|
|
2021-12-07 20:09:36 +01:00
|
|
|
trace_1 = ['time_threshold = 0',
|
|
|
|
'log_initfini = false',
|
|
|
|
'log_statement_finish = true',
|
|
|
|
'include_filter = "%(select % from test where x = ?)%"',
|
|
|
|
]
|
2021-04-26 20:07:00 +02:00
|
|
|
|
2021-12-07 20:09:36 +01:00
|
|
|
@pytest.mark.version('>=3.0.2')
|
|
|
|
def test_1(act_1: Action, capsys):
|
|
|
|
with act_1.trace(db_events=trace_1), act_1.db.connect() as con:
|
|
|
|
c = con.cursor()
|
|
|
|
c.call_procedure('sp_test')
|
|
|
|
# Process trace
|
|
|
|
fetches_distinct_amounts = {}
|
|
|
|
for line in act_1.trace_log:
|
|
|
|
if 'fetch(es)' in line:
|
|
|
|
words = line.split()
|
|
|
|
for k in range(len(words)):
|
|
|
|
if words[k].startswith('fetch'):
|
|
|
|
amount = words[k - 1]
|
|
|
|
if not amount in fetches_distinct_amounts:
|
|
|
|
fetches_distinct_amounts[amount] = 1
|
|
|
|
else:
|
|
|
|
fetches_distinct_amounts[amount] += 1
|
|
|
|
for k, v in sorted(fetches_distinct_amounts.items()):
|
|
|
|
print(f'fetches={k} occured {v} times')
|
|
|
|
# Check
|
|
|
|
act_1.expected_stdout = expected_stdout_1
|
|
|
|
act_1.stdout = capsys.readouterr().out
|
|
|
|
assert act_1.clean_stdout == act_1.clean_expected_stdout
|