2021-04-26 20:07:00 +02:00
|
|
|
#coding:utf-8
|
|
|
|
#
|
|
|
|
# id: bugs.core_3977
|
|
|
|
# title: DELETE FROM MON$STATEMENTS does not interrupt a longish fetch
|
2021-11-18 20:15:37 +01:00
|
|
|
# decription:
|
2021-04-26 20:07:00 +02:00
|
|
|
# Refactored 08-may-2017.
|
|
|
|
# Checked on 2.5.8.27061, 3.0.2.32708 and 4.0.0.633 (Classic & Super).
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# Refactored 02.11.2019: restored DB state must be changed to full shutdown in order to make sure tha all attachments are gone.
|
|
|
|
# Otherwise one may get:
|
|
|
|
# Error while dropping database
|
|
|
|
# - SQLCODE: -901
|
|
|
|
# - lock time-out on wait transaction
|
|
|
|
# - object E:\\QA\\FBT-REPO\\TMP\\BUGS.CORE_3977.FDB is in use
|
|
|
|
# This is actual for 4.0+ SS/SC when ExtConnPoolLifeTime > 0.
|
|
|
|
# Checked on:
|
|
|
|
# 4.0.0.1639 SS: 5.299s.
|
|
|
|
# 4.0.0.1633 CS: 6.037s.
|
|
|
|
# 3.0.5.33183 SS: 4.795s.
|
|
|
|
# 3.0.5.33178 CS: 5.192s.
|
|
|
|
# 2.5.9.27119 SS: 3.491s.
|
|
|
|
# 2.5.9.27146 SC: 3.617s.
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# tracker_id: CORE-3977
|
|
|
|
# min_versions: ['2.5.3']
|
|
|
|
# versions: 2.5.3
|
|
|
|
# qmid: None
|
|
|
|
|
|
|
|
import pytest
|
2021-11-18 20:15:37 +01:00
|
|
|
import subprocess
|
|
|
|
import time
|
|
|
|
from pathlib import Path
|
|
|
|
from firebird.qa import db_factory, python_act, Action, temp_file
|
2021-04-26 20:07:00 +02:00
|
|
|
|
|
|
|
# version: 2.5.3
|
|
|
|
# resources: None
|
|
|
|
|
|
|
|
substitutions_1 = [('^((?!RECORDS AFFECTED:|RESULT_MSG).)*$', '')]
|
|
|
|
|
|
|
|
init_script_1 = """
|
|
|
|
create sequence g;
|
|
|
|
commit;
|
2021-11-18 20:15:37 +01:00
|
|
|
"""
|
2021-04-26 20:07:00 +02:00
|
|
|
|
|
|
|
db_1 = db_factory(sql_dialect=3, init=init_script_1)
|
|
|
|
|
|
|
|
# test_script_1
|
|
|
|
#---
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# import os
|
|
|
|
# #import sys
|
|
|
|
# import subprocess
|
|
|
|
# from subprocess import Popen
|
|
|
|
# import time
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# os.environ["ISC_USER"] = user_name
|
|
|
|
# os.environ["ISC_PASSWORD"] = user_password
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
|
|
|
# # dsn localhost/3400:C:\\FBTESTING\\qa\\fbt-repo\\tmp\\bugs.core_NNNN.fdb
|
2021-04-26 20:07:00 +02:00
|
|
|
# # db_conn.database_name C:\\FBTESTING\\QA\\FBT-REPO\\TMP\\BUGS.CORE_NNNN.FDB
|
|
|
|
# # $(DATABASE_LOCATION)... C:/FBTESTING/qa/fbt-repo/tmp/bugs.core_NNN.fdb
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# this_fdb=db_conn.database_name
|
|
|
|
# db_conn.close()
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# #---------------------------------------------
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# def flush_and_close(file_handle):
|
|
|
|
# # https://docs.python.org/2/library/os.html#os.fsync
|
2021-11-18 20:15:37 +01:00
|
|
|
# # If you're starting with a Python file object f,
|
|
|
|
# # first do f.flush(), and
|
2021-04-26 20:07:00 +02:00
|
|
|
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
|
|
|
|
# global os
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# file_handle.flush()
|
|
|
|
# if file_handle.mode not in ('r', 'rb'):
|
|
|
|
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
|
|
|
|
# os.fsync(file_handle.fileno())
|
|
|
|
# file_handle.close()
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# #--------------------------------------------
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# def cleanup( f_names_list ):
|
|
|
|
# global os
|
|
|
|
# for i in range(len( f_names_list )):
|
|
|
|
# if os.path.isfile( f_names_list[i]):
|
|
|
|
# os.remove( f_names_list[i] )
|
|
|
|
# if os.path.isfile( f_names_list[i]):
|
|
|
|
# print('ERROR: can not remove file ' + f_names_list[i])
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# #--------------------------------------------
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# # Script for ISQL that will do 'heavy select':
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# usr=user_name
|
|
|
|
# pwd=user_password
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# sql_cmd='''
|
|
|
|
# alter sequence g restart with 0;
|
|
|
|
# commit;
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# set term ^;
|
|
|
|
# execute block as
|
|
|
|
# declare x int;
|
|
|
|
# begin
|
2021-11-18 20:15:37 +01:00
|
|
|
# for
|
2021-04-26 20:07:00 +02:00
|
|
|
# execute statement ('select gen_id(g,1) from rdb$types,rdb$types,rdb$types')
|
2021-11-18 20:15:37 +01:00
|
|
|
# on external
|
2021-04-26 20:07:00 +02:00
|
|
|
# 'localhost:' || rdb$get_context('SYSTEM','DB_NAME')
|
|
|
|
# as user '%(usr)s' password '%(pwd)s'
|
|
|
|
# into x
|
|
|
|
# do begin
|
|
|
|
# end
|
|
|
|
# end
|
|
|
|
# ^
|
|
|
|
# set term ;^
|
|
|
|
# ''' % locals()
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# f_query_to_be_killedcmd=open( os.path.join(context['temp_directory'],'tmp_isql_3977.sql'), 'w')
|
|
|
|
# f_query_to_be_killedcmd.write(sql_cmd)
|
|
|
|
# flush_and_close( f_query_to_be_killedcmd )
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# ############################################################
|
|
|
|
# # Starting ISQL in separate process with doing 'heavy query'
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# f_query_to_be_killedlog=open( os.path.join(context['temp_directory'],'tmp_isql_3977.log'), 'w')
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# p_heavy_dml = Popen( [ context['isql_path'] , dsn, "-i", f_query_to_be_killedcmd.name ],
|
|
|
|
# stdout=f_query_to_be_killedlog,
|
|
|
|
# stderr=subprocess.STDOUT
|
|
|
|
# )
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
|
|
|
# # Here we have to wait for sure that ISQL could establish its connect and starts DML
|
2021-04-26 20:07:00 +02:00
|
|
|
# # before we start DELETE FROM MON$ATTACHMENTS
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# time.sleep(2)
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# ######################################
|
|
|
|
# # Run 2nd isql and issue there DELETE FROM MON$ATATSMENTS command. First ISQL process should be terminated for short time.
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# v_mon_sttm_sql='''
|
2021-11-18 20:15:37 +01:00
|
|
|
# commit;
|
|
|
|
# set list on;
|
|
|
|
#
|
|
|
|
# select *
|
|
|
|
# from mon$statements
|
|
|
|
# where
|
2021-04-26 20:07:00 +02:00
|
|
|
# mon$attachment_id != current_connection
|
|
|
|
# and mon$sql_text containing 'gen_id('
|
|
|
|
# --order by mon$stat_id
|
2021-11-18 20:15:37 +01:00
|
|
|
# ;
|
|
|
|
#
|
|
|
|
# set count on;
|
|
|
|
#
|
|
|
|
# delete from mon$statements
|
|
|
|
# where
|
2021-04-26 20:07:00 +02:00
|
|
|
# mon$attachment_id != current_connection
|
|
|
|
# and mon$sql_text containing 'gen_id('
|
|
|
|
# --order by mon$stat_id
|
|
|
|
# ;
|
|
|
|
# quit;
|
|
|
|
# '''
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# f_delete_from_mon_sttm_sql=open( os.path.join(context['temp_directory'],'tmp_dels_3977.sql'), 'w')
|
|
|
|
# f_delete_from_mon_sttm_sql.write(v_mon_sttm_sql)
|
|
|
|
# flush_and_close( f_delete_from_mon_sttm_sql )
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# f_delete_from_mon_sttm_log=open( os.path.join(context['temp_directory'],'tmp_dels_3977.log'), 'w')
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# subprocess.call( [context['isql_path'], dsn, "-i", f_delete_from_mon_sttm_sql.name ],
|
|
|
|
# stdout=f_delete_from_mon_sttm_log,
|
|
|
|
# stderr=subprocess.STDOUT
|
|
|
|
# )
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# f_delete_from_mon_sttm_log.close()
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# p_heavy_dml.terminate()
|
|
|
|
# flush_and_close( f_query_to_be_killedlog )
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# #########################################
|
|
|
|
# # Run checking query: what is resuling value of sequence 'g' ?
|
|
|
|
# # (it must be > 0 and < total number of records to be handled).
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# sql_cmd='''
|
|
|
|
# --set echo on;
|
|
|
|
# set list on;
|
|
|
|
# set count on;
|
2021-11-18 20:15:37 +01:00
|
|
|
# select iif( current_gen > 0 and current_gen < total_rows,
|
|
|
|
# 'OK: query was interrupted in the middle point.',
|
|
|
|
# 'WRONG! Query to be interrupted '
|
|
|
|
# || iif(current_gen <= 0, 'did not start.', 'already gone, current_gen = '||current_gen )
|
2021-04-26 20:07:00 +02:00
|
|
|
# ) as result_msg
|
|
|
|
# from (
|
|
|
|
# select gen_id(g,0) as current_gen, c.n * c.n * c.n as total_rows
|
|
|
|
# from ( select (select count(*) from rdb$types) as n from rdb$database) c
|
|
|
|
# );
|
|
|
|
# '''
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# f_check_result_cmd=open( os.path.join(context['temp_directory'],'tmp_chkr_3977.sql'), 'w')
|
|
|
|
# f_check_result_cmd.write(sql_cmd)
|
|
|
|
# flush_and_close( f_check_result_cmd )
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# f_check_result_log=open( os.path.join(context['temp_directory'],'tmp_chkr_3977.log'), 'w')
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# subprocess.call( [context['isql_path'], dsn, "-i", f_check_result_cmd.name ],
|
|
|
|
# stdout=f_check_result_log,
|
|
|
|
# stderr=subprocess.STDOUT
|
|
|
|
# )
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# flush_and_close( f_check_result_log )
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# with open(f_delete_from_mon_sttm_log.name) as f:
|
|
|
|
# for line in f:
|
|
|
|
# if not 'EXECUTE STATEMENT' in line.upper():
|
|
|
|
# print('DEL FROM MON$STTM: ', ' '.join(line.upper().split()) )
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# with open(f_query_to_be_killedlog.name) as f:
|
|
|
|
# for line in f:
|
|
|
|
# print('QUERY THAT KILLED: ', ' '.join(line.upper().split()) )
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# with open(f_check_result_log.name) as f:
|
|
|
|
# for line in f:
|
|
|
|
# print('CHECK RESULTS LOG: ', ' '.join(line.upper().split()) )
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# ########################
|
|
|
|
# # Cleanup
|
|
|
|
# time.sleep(1)
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# # 02.11.2019: add full shutdown to forcedly drop all attachments.
|
|
|
|
# ## ||||||||||||||||||||||||||||
|
|
|
|
# ## ###################################||| FB 4.0+, SS and SC |||##############################
|
|
|
|
# ## ||||||||||||||||||||||||||||
|
|
|
|
# ## If we check SS or SC and ExtConnPoolLifeTime > 0 (config parameter FB 4.0+) then current
|
|
|
|
# ## DB (bugs.core_NNNN.fdb) will be 'captured' by firebird.exe process and fbt_run utility
|
|
|
|
# ## will not able to drop this database at the final point of test.
|
|
|
|
# ## Moreover, DB file will be hold until all activity in firebird.exe completed and AFTER this
|
|
|
|
# ## we have to wait for <ExtConnPoolLifeTime> seconds after it (discussion and small test see
|
|
|
|
# ## in the letter to hvlad and dimitr 13.10.2019 11:10).
|
|
|
|
# ## This means that one need to kill all connections to prevent from exception on cleanup phase:
|
|
|
|
# ## SQLCODE: -901 / lock time-out on wait transaction / object <this_test_DB> is in use
|
|
|
|
# ## #############################################################################################
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# f_shutdown_log=open( os.path.join(context['temp_directory'],'tmp_3977_shutdown.log'), 'w')
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# subprocess.call( [context['fbsvcmgr_path'], "localhost:service_mgr",
|
|
|
|
# "action_properties", "prp_shutdown_mode", "prp_sm_full", "prp_shutdown_db", "0", "dbname", this_fdb,
|
|
|
|
# ],
|
|
|
|
# stdout = f_shutdown_log,
|
|
|
|
# stderr = subprocess.STDOUT
|
|
|
|
# )
|
|
|
|
# subprocess.call( [context['fbsvcmgr_path'], "localhost:service_mgr",
|
|
|
|
# "action_properties", "prp_db_online", "dbname", this_fdb,
|
|
|
|
# ],
|
|
|
|
# stdout = f_shutdown_log,
|
|
|
|
# stderr = subprocess.STDOUT
|
|
|
|
# )
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# flush_and_close( f_shutdown_log )
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# with open( f_shutdown_log.name,'r') as f:
|
|
|
|
# for line in f:
|
|
|
|
# if line.split():
|
|
|
|
# print( 'UNEXPECTED OUTPUT IN DB SHUTDOWN LOG: ' + (' '.join(line.split()).upper()) )
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
|
|
|
#
|
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
# # CLEANUP
|
|
|
|
# #########
|
|
|
|
# time.sleep(1)
|
|
|
|
# cleanup( [i.name for i in (f_query_to_be_killedcmd, f_query_to_be_killedlog, f_delete_from_mon_sttm_sql, f_delete_from_mon_sttm_log, f_check_result_cmd, f_check_result_log, f_shutdown_log ) ] )
|
2021-11-18 20:15:37 +01:00
|
|
|
#
|
|
|
|
#
|
2021-04-26 20:07:00 +02:00
|
|
|
#---
|
2021-11-18 20:15:37 +01:00
|
|
|
|
|
|
|
act_1 = python_act('db_1', substitutions=substitutions_1)
|
2021-04-26 20:07:00 +02:00
|
|
|
|
|
|
|
expected_stdout_1 = """
|
|
|
|
DEL FROM MON$STTM: RECORDS AFFECTED: 2
|
|
|
|
CHECK RESULTS LOG: RESULT_MSG OK: QUERY WAS INTERRUPTED IN THE MIDDLE POINT.
|
|
|
|
CHECK RESULTS LOG: RECORDS AFFECTED: 1
|
2021-11-18 20:15:37 +01:00
|
|
|
"""
|
|
|
|
|
|
|
|
work_script_1 = temp_file('work_script.sql')
|
2021-04-26 20:07:00 +02:00
|
|
|
|
|
|
|
@pytest.mark.version('>=2.5.3')
|
2021-11-18 20:15:37 +01:00
|
|
|
def test_1(act_1: Action, work_script_1: Path, capsys):
|
|
|
|
work_script_1.write_text(f"""
|
|
|
|
alter sequence g restart with 0;
|
|
|
|
commit;
|
|
|
|
|
|
|
|
set term ^;
|
|
|
|
execute block as
|
|
|
|
declare x int;
|
|
|
|
begin
|
|
|
|
for
|
|
|
|
execute statement ('select gen_id(g,1) from rdb$types,rdb$types,rdb$types')
|
|
|
|
on external
|
|
|
|
'localhost:' || rdb$get_context('SYSTEM','DB_NAME')
|
|
|
|
as user '{act_1.db.user}' password '{act_1.db.password}'
|
|
|
|
into x
|
|
|
|
do begin
|
|
|
|
end
|
|
|
|
end
|
|
|
|
^
|
|
|
|
set term ;^
|
|
|
|
""")
|
|
|
|
# Starting ISQL in separate process with doing 'heavy query'
|
|
|
|
p_work_sql = subprocess.Popen([act_1.vars['isql'], '-i', str(work_script_1),
|
|
|
|
'-user', act_1.db.user,
|
|
|
|
'-password', act_1.db.password, act_1.db.dsn],
|
|
|
|
stderr = subprocess.STDOUT)
|
|
|
|
time.sleep(3)
|
|
|
|
# Run 2nd isql and issue there DELETE FROM MON$ATATSMENTS command. First ISQL process should be terminated for short time.
|
|
|
|
drop_sql = """
|
|
|
|
commit;
|
|
|
|
set list on;
|
|
|
|
|
|
|
|
select *
|
|
|
|
from mon$statements
|
|
|
|
where
|
|
|
|
mon$attachment_id != current_connection
|
|
|
|
and mon$sql_text containing 'gen_id('
|
|
|
|
--order by mon$stat_id
|
|
|
|
;
|
2021-04-26 20:07:00 +02:00
|
|
|
|
2021-11-18 20:15:37 +01:00
|
|
|
set count on;
|
2021-04-26 20:07:00 +02:00
|
|
|
|
2021-11-18 20:15:37 +01:00
|
|
|
delete from mon$statements
|
|
|
|
where
|
|
|
|
mon$attachment_id != current_connection
|
|
|
|
and mon$sql_text containing 'gen_id('
|
|
|
|
--order by mon$stat_id
|
|
|
|
;
|
|
|
|
quit;
|
2021-12-22 20:23:11 +01:00
|
|
|
"""
|
2021-11-18 20:15:37 +01:00
|
|
|
try:
|
|
|
|
act_1.isql(switches=[], input=drop_sql)
|
2022-01-16 10:03:34 +01:00
|
|
|
delete_from_mon_sttm_log = act_1.string_strip(act_1.stdout)
|
2021-11-18 20:15:37 +01:00
|
|
|
finally:
|
|
|
|
p_work_sql.terminate()
|
|
|
|
# Run checking query: what is resuling value of sequence 'g' ?
|
|
|
|
# (it must be > 0 and < total number of records to be handled).
|
|
|
|
check_sql = """
|
|
|
|
--set echo on;
|
|
|
|
set list on;
|
|
|
|
set count on;
|
|
|
|
select iif( current_gen > 0 and current_gen < total_rows,
|
|
|
|
'OK: query was interrupted in the middle point.',
|
|
|
|
'WRONG! Query to be interrupted '
|
|
|
|
|| iif(current_gen <= 0, 'did not start.', 'already gone, current_gen = '||current_gen )
|
|
|
|
) as result_msg
|
|
|
|
from (
|
|
|
|
select gen_id(g,0) as current_gen, c.n * c.n * c.n as total_rows
|
|
|
|
from (select (select count(*) from rdb$types) as n from rdb$database) c
|
|
|
|
);
|
2021-12-22 20:23:11 +01:00
|
|
|
"""
|
2021-11-18 20:15:37 +01:00
|
|
|
act_1.isql(switches=[], input=check_sql)
|
|
|
|
#
|
|
|
|
for line in delete_from_mon_sttm_log.splitlines():
|
|
|
|
if not 'EXECUTE STATEMENT' in line.upper():
|
|
|
|
print('DEL FROM MON$STTM: ', ' '.join(line.upper().split()))
|
2022-01-16 10:03:34 +01:00
|
|
|
for line in act_1.string_strip(act_1.stdout).splitlines():
|
2021-11-18 20:15:37 +01:00
|
|
|
print('CHECK RESULTS LOG: ', ' '.join(line.upper().split()))
|
|
|
|
#
|
|
|
|
act_1.expected_stdout = expected_stdout_1
|
|
|
|
act_1.stdout = capsys.readouterr().out
|
|
|
|
assert act_1.clean_stdout == act_1.clean_expected_stdout
|